Was in the proces of finishing my allocator, when I realized that NO THIS WHOLE API IS TOTALLY DUMB, why would I take a pointer to a place to place a pointer to a node and then update that pointer to a node through a pointer to a pointer, which is somehow stored in the node? That does not make any sense at all. I am so bad at programming, oh my god AAAAAAAA, I am going insane. I am actually gonna go insane. I am rewatching Parkour Civilization third time already instead of writing double-linked list. Yes. I am gonna store block in a double-linked list. And ou yeah, I am gonna learn how to transfer allocated RB nodes between RB trees, yes.
This commit is contained in:
parent
98af159dbc
commit
2ea49d61d7
@ -8,24 +8,6 @@ typedef struct {
|
||||
U64 len;
|
||||
} U64Segment;
|
||||
|
||||
// bool U64Segment_equal_by_start(const U64Segment* A, const U64Segment* B) {
|
||||
// return A->start == B->start;
|
||||
// }
|
||||
|
||||
// bool U64Segment_less_by_start(const U64Segment* A, const U64Segment* B) {
|
||||
// return A->start < B->start;
|
||||
// }
|
||||
|
||||
// bool U64Segment_equal_U64Segment(const U64Segment* A, const U64Segment* B) {
|
||||
// return A->start == B->start && A->len == B->len;
|
||||
// }
|
||||
|
||||
// bool U64Segment_less_by_len_and_start(const U64Segment* A, const U64Segment* B) {
|
||||
// if (A->len == B->len)
|
||||
// return A->start < B->start;
|
||||
// return A->len < B->len;
|
||||
// }
|
||||
|
||||
U64 U64Segment_get_length_resp_alignment(U64Segment self, U8 alignment_exp) {
|
||||
if (self.start & ((1ull << alignment_exp) - 1)) {
|
||||
U64 pad_left = (1ull << alignment_exp) - (self.start & ((1ull << alignment_exp) - 1));
|
||||
@ -34,18 +16,4 @@ U64 U64Segment_get_length_resp_alignment(U64Segment self, U8 alignment_exp) {
|
||||
return self.len;
|
||||
}
|
||||
|
||||
// bool U64Segment_equal_U64Segment_resp_align(const U64Segment* A, const U64Segment* B, U8 alignment_exp) {
|
||||
// U64 len_A = U64Segment_get_length_resp_alignment(A, alignment_exp);
|
||||
// U64 len_B = U64Segment_get_length_resp_alignment(B, alignment_exp);
|
||||
// return len_A == len_B && A->start == B->start;
|
||||
// }
|
||||
|
||||
// bool U64Segment_less_by_len_and_start_resp_align(const U64Segment* A, const U64Segment* B, U8 alignment_exp) {
|
||||
// U64 len_A = U64Segment_get_length_resp_alignment(A, alignment_exp);
|
||||
// U64 len_B = U64Segment_get_length_resp_alignment(B, alignment_exp);
|
||||
// if (len_A == len_B)
|
||||
// return A->start < B->start;
|
||||
// return len_A < len_B;
|
||||
// }
|
||||
|
||||
#endif
|
||||
|
||||
@ -18,7 +18,7 @@ void generate_l1_5_template_instantiations_for_margaret(){
|
||||
.guest_data_T = cstr("U8"),
|
||||
});
|
||||
generate_rbtree_Map_templ_inst_eve_header(l, ns, (map_instantiation_op){
|
||||
.K = cstr("U64"), .k_integer = true, .V = cstr("MargaretMemoryOccupation"), .v_primitive = true,
|
||||
.K = cstr("U64"), .k_integer = true, .V = cstr("MargaretMAOccupation"), .v_primitive = true,
|
||||
}, true /* We want RBTreeNode_KVPU64ToMargaretMemoryOccupation to be generated here for us */ );
|
||||
}
|
||||
|
||||
|
||||
@ -180,7 +180,7 @@ typedef U8 MargaretMemAllocatorDemands;
|
||||
* position-structures, that it filled, will be updated. If these values
|
||||
* (buffer/image handlers + sub-buffer positions) are dependencies of other
|
||||
* objects, these objects need to be updated (or rebuilt) */
|
||||
#define MARGARET_MEM_ALLOCATOR_DEMANDS_DEFRAGMENTATION_BITS 1
|
||||
#define MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT 1
|
||||
/* If for some set of requests MargaretMemAllocator needs to execute some Vulkan copying commands,
|
||||
* it will demand you to actually execute the command buffer that you gave it. If this is `true` it does
|
||||
* not necessarily mean that defragmentation is happening right now, no, defragmentation is indicated by
|
||||
@ -190,7 +190,9 @@ typedef U8 MargaretMemAllocatorDemands;
|
||||
* It won't affect other data structures in your memory,
|
||||
* of course, (still, notice that position of your sub-buffer will be updated).
|
||||
*/
|
||||
#define MARGARET_MEM_ALLOCATOR_DEMANDS_CMD_BUFFER 2
|
||||
#define MARGARET_MA_DEMANDS_CMD_BUFFER_BIT 2
|
||||
|
||||
#define MARGARET_MA_DEMANDS_DEFRAGMENTATION (MARGARET_MA_DEMANDS_CMD_BUFFER_BIT | MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT)
|
||||
|
||||
#define MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP 21
|
||||
|
||||
@ -210,7 +212,6 @@ typedef struct {
|
||||
VkImageUsageFlags usage_flags;
|
||||
bool preserve_at_quiet;
|
||||
VkImage image;
|
||||
MargaretMemAllocatorOccupantPosition* ans;
|
||||
} MargaretMemoryOccupationImage;
|
||||
|
||||
/* primitive */
|
||||
@ -227,21 +228,25 @@ typedef enum {
|
||||
} MargaretMemoryOccupation_variant;
|
||||
|
||||
typedef struct {
|
||||
U64 taken_size;
|
||||
MargaretMemAllocatorOccupantPosition* ans;
|
||||
MargaretMemoryOccupation_variant variant;
|
||||
union {
|
||||
MargaretMemoryOccupationImage img;
|
||||
MargaretMemoryOccupationBuffer buf;
|
||||
};
|
||||
} MargaretMemoryOccupation;
|
||||
} MargaretMAOccupant;
|
||||
|
||||
#include "../../../gen/l1_5/eve/margaret/RBTree_MapU64ToMargaretMemoryOccupation.h"
|
||||
typedef struct {
|
||||
U64 taken_size;
|
||||
MargaretMemAllocatorOccupantPosition* ans;
|
||||
MargaretMAOccupant me;
|
||||
} MargaretMAOccupation;
|
||||
|
||||
#include "../../../gen/l1_5/eve/margaret/RBTree_MapU64ToMargaretMAOccupation.h"
|
||||
|
||||
/* Not primitive */
|
||||
typedef struct {
|
||||
RBTree_MapU64ToMargaretMemoryOccupation occupied_memory;
|
||||
U64 length;
|
||||
RBTree_MapU64ToMargaretMAOccupation occupied_memory;
|
||||
U64 capacity;
|
||||
/* I am 100% sure that this fields is useless rn. You might use it to show cool infographics on F3 screen */
|
||||
U64 occupation_counter;
|
||||
VkDeviceMemory mem_hand;
|
||||
@ -250,14 +255,14 @@ typedef struct {
|
||||
|
||||
|
||||
void MargaretMemAllocatorOneBlock_drop(MargaretMemAllocatorOneBlock self){
|
||||
RBTree_MapU64ToMargaretMemoryOccupation_drop(self.occupied_memory);
|
||||
RBTree_MapU64ToMargaretMAOccupation_drop(self.occupied_memory);
|
||||
}
|
||||
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretMemAllocatorOneBlock.h"
|
||||
|
||||
struct MargaretMemAllocatorOccupantPosition{
|
||||
U64 device_mem_ind;
|
||||
RBTreeNode_KVPU64ToMargaretMemoryOccupation* occ_it;
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it;
|
||||
};
|
||||
|
||||
typedef MargaretMemAllocatorOccupantPosition* MargaretMemAllocatorRequestFreeOccupant;
|
||||
@ -273,6 +278,7 @@ typedef struct{
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretMemAllocatorRequestResizeBuffer.h"
|
||||
|
||||
typedef struct {
|
||||
U64 allocation_size;
|
||||
VkBufferUsageFlags usage;
|
||||
bool preserve_at_quiet;
|
||||
MargaretMemAllocatorOccupantPosition* ans;
|
||||
@ -341,7 +347,7 @@ bool MargaretFreeMemSegment_less_resp_align(
|
||||
typedef struct {
|
||||
U64 mem_block_ind;
|
||||
U64 old_capacity;
|
||||
RBTreeNode_KVPU64ToMargaretMemoryOccupation* occ_it;
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it;
|
||||
} MargaretMABufferExpansionRecord;
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretMABufferExpansionRecord.h"
|
||||
|
||||
@ -408,7 +414,8 @@ void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man, U64 st
|
||||
}
|
||||
}
|
||||
|
||||
OptionMargaretFreeMemSegment MargaretMemFreeSpaceManager_search(MargaretMemFreeSpaceManager* man, U64 len, U8 alignment_exp) {
|
||||
OptionMargaretFreeMemSegment MargaretMemFreeSpaceManager_search(
|
||||
MargaretMemFreeSpaceManager* man, U8 alignment_exp, U64 req_size) {
|
||||
check(alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
|
||||
if (man->free_space_in_memory[alignment_exp].variant == Option_None) {
|
||||
assert(man->set_present.len > 0);
|
||||
@ -423,7 +430,7 @@ OptionMargaretFreeMemSegment MargaretMemFreeSpaceManager_search(MargaretMemFreeS
|
||||
}
|
||||
assert(man->free_space_in_memory[alignment_exp].variant == Option_Some);
|
||||
U64 sit = BufRBTreeByLenRespAlign_SetMargaretFreeMemSegment_find_min_grtr_or_eq(&man->free_space_in_memory[alignment_exp].some,
|
||||
&(MargaretFreeMemSegment){.start = 0, .len = len, .dev_mem_block = 0});
|
||||
&(MargaretFreeMemSegment){.start = 0, .len = req_size, .dev_mem_block = 0});
|
||||
if (sit == 0)
|
||||
return None_MargaretFreeMemSegment();
|
||||
return Some_MargaretFreeMemSegment(*BufRBTreeByLenRespAlign_SetMargaretFreeMemSegment_at_iter(
|
||||
@ -468,12 +475,117 @@ MargaretMemAllocator MargaretMemAllocator_new(
|
||||
return self;
|
||||
}
|
||||
|
||||
void MargaretMemAllocator__erase_gap(MargaretMemAllocator* self, U32 dev_mem_block, U64Segment gap){
|
||||
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, gap.start, gap.len, dev_mem_block);
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, dev_mem_block);
|
||||
block->occupation_counter += gap.len;
|
||||
assert(block->occupation_counter <= block->capacity);
|
||||
}
|
||||
|
||||
void MargaretMemAllocator__insert_gap(MargaretMemAllocator* self, U32 dev_mem_block, U64 start, U64 len){
|
||||
MargaretMemFreeSpaceManager_insert(&self->mem_free_space, start, len, dev_mem_block);
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, dev_mem_block);
|
||||
assert(len <= block->occupation_counter);
|
||||
block->occupation_counter -= len;
|
||||
}
|
||||
|
||||
bool MargaretMemAllocator__add_new_occupant_any_type(
|
||||
MargaretMemAllocator* self, MargaretMAOccupant occ, const VkMemoryRequirements* requirements,
|
||||
MargaretMemAllocatorOccupantPosition* ans
|
||||
){
|
||||
check(U64_is_2pow(requirements->alignment));
|
||||
U8 alignment_exp = U64_2pow_log(requirements->alignment);
|
||||
OptionMargaretFreeMemSegment free_gap =
|
||||
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, requirements->size);
|
||||
if (free_gap.variant == Option_None)
|
||||
return false;
|
||||
U32 dev_mem_block = free_gap.some.dev_mem_block;
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, dev_mem_block);
|
||||
U64 gap_start = free_gap.some.start;
|
||||
U64 gap_len = free_gap.some.len;
|
||||
|
||||
U64 hit = gap_start & (1ull << alignment_exp) - 1;
|
||||
U64 af = (hit ? (1ull << alignment_exp) - hit : 0);
|
||||
U64 aligned_start = gap_start + af;
|
||||
assert(aligned_start + requirements->size <= gap_start + gap_len);
|
||||
MargaretMemAllocator__erase_gap(self, dev_mem_block, (U64Segment){.start = gap_start, .len = gap_len});
|
||||
MargaretMemAllocator__insert_gap(self, dev_mem_block, gap_start, af);
|
||||
MargaretMemAllocator__insert_gap(self, dev_mem_block, aligned_start + requirements->size,
|
||||
gap_start + gap_len - (aligned_start + requirements->size));
|
||||
|
||||
/* We are doing a dumb crutch here where we first insert key+value, then search for the iterator */
|
||||
check(RBTree_MapU64ToMargaretMAOccupation_insert(&block->occupied_memory, aligned_start, (MargaretMAOccupation){
|
||||
.taken_size = requirements->size, .ans = ans, .me = occ}));
|
||||
/* Lord forgive me */
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* new_it =
|
||||
RBTree_MapU64ToMargaretMAOccupation_find(&block->occupied_memory, aligned_start);
|
||||
assert(new_it);
|
||||
/* Updating answer. occ->ans may be already filled, or it may not. I don't care */
|
||||
ans->device_mem_ind = dev_mem_block;
|
||||
ans->occ_it = new_it;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MargaretMemAllocator__add_new_buffer_occ(
|
||||
MargaretMemAllocator* self, MargaretMemAllocatorOccupantPosition* ans,
|
||||
U64 size, VkBufferUsageFlags usage_flags, bool preserve_at_quiet){
|
||||
VkBuffer buf;
|
||||
check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = size,
|
||||
.usage = usage_flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
}, NULL, &buf) == VK_SUCCESS);
|
||||
VkMemoryRequirements memory_requirements;
|
||||
vkGetBufferMemoryRequirements(self->device, buf, &memory_requirements);
|
||||
bool success = MargaretMemAllocator__add_new_occupant_any_type(self, (MargaretMAOccupant){
|
||||
.variant = MargaretMemoryOccupation_Buffer,
|
||||
.buf = (MargaretMemoryOccupationBuffer){
|
||||
.buffer = buf, .capacity = size, .usage_flags = usage_flags, .preserve_at_quiet = preserve_at_quiet
|
||||
}}, &memory_requirements, ans);
|
||||
if (!success)
|
||||
vkDestroyBuffer(self->device, buf, NULL);
|
||||
return success;
|
||||
}
|
||||
|
||||
bool MargaretMemAllocator__add_new_image_occ(
|
||||
MargaretMemAllocator* self, MargaretMemAllocatorOccupantPosition* ans,
|
||||
U64 width, U64 height, VkFormat format, VkImageUsageFlags usage_flags, bool preserve_at_quiet){
|
||||
VkImage img;
|
||||
check(vkCreateImage(self->device, &(VkImageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
||||
.imageType = VK_IMAGE_TYPE_2D,
|
||||
.format = format,
|
||||
.extent = (VkExtent3D){
|
||||
.width = width,
|
||||
.height = height,
|
||||
.depth = 1,
|
||||
},
|
||||
.mipLevels = 1,
|
||||
.arrayLayers = 1,
|
||||
.samples = VK_SAMPLE_COUNT_1_BIT,
|
||||
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
||||
.usage = usage_flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
||||
}, NULL, &img) == VK_SUCCESS);
|
||||
VkMemoryRequirements memory_requirements;
|
||||
vkGetImageMemoryRequirements(self->device, img, &memory_requirements);
|
||||
return MargaretMemAllocator__add_new_occupant_any_type(self, (MargaretMAOccupant){
|
||||
.variant = MargaretMemoryOccupation_Image,
|
||||
.img = (MargaretMemoryOccupationImage){
|
||||
.image = img, .width = width, .height = height, .current_layout = VK_IMAGE_LAYOUT_UNDEFINED,
|
||||
.usage_flags = usage_flags, .preserve_at_quiet = preserve_at_quiet, .format = format,
|
||||
.tiling = VK_IMAGE_TILING_OPTIMAL
|
||||
}}, &memory_requirements, ans);
|
||||
}
|
||||
|
||||
U64Segment MargaretMemAllocatorOneBlock_get_left_free_space(
|
||||
const MargaretMemAllocatorOneBlock* self, RBTreeNode_KVPU64ToMargaretMemoryOccupation* occ_it){
|
||||
const MargaretMemAllocatorOneBlock* self, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it){
|
||||
U64 occ_start = occ_it->key;
|
||||
|
||||
RBTreeNode_KVPU64ToMargaretMemoryOccupation* prev_occ_it =
|
||||
RBTree_MapU64ToMargaretMemoryOccupation_find_prev(&self->occupied_memory, occ_it);
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* prev_occ_it =
|
||||
RBTree_MapU64ToMargaretMAOccupation_find_prev(&self->occupied_memory, occ_it);
|
||||
if (prev_occ_it != NULL) {
|
||||
U64 prev_occ_start = prev_occ_it->key;
|
||||
U64 prev_occ_taken_size = prev_occ_it->value.taken_size;
|
||||
@ -485,30 +597,27 @@ U64Segment MargaretMemAllocatorOneBlock_get_left_free_space(
|
||||
}
|
||||
|
||||
U64Segment MargaretMemAllocatorOneBlock_get_right_free_space(
|
||||
const MargaretMemAllocatorOneBlock* self, RBTreeNode_KVPU64ToMargaretMemoryOccupation* occ_it){
|
||||
const MargaretMemAllocatorOneBlock* self, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it){
|
||||
U64 occ_start = occ_it->key;
|
||||
U64 occ_taken_size = occ_it->value.taken_size;
|
||||
|
||||
RBTreeNode_KVPU64ToMargaretMemoryOccupation* next_occ_it =
|
||||
RBTree_MapU64ToMargaretMemoryOccupation_find_next(&self->occupied_memory, occ_it);
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* next_occ_it =
|
||||
RBTree_MapU64ToMargaretMAOccupation_find_next(&self->occupied_memory, occ_it);
|
||||
if (next_occ_it != NULL) {
|
||||
U64 next_occ_start = next_occ_it->key;
|
||||
assert(occ_start + occ_taken_size <= next_occ_start);
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
|
||||
}
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = self->length - (occ_start + occ_taken_size)};
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = self->capacity - (occ_start + occ_taken_size)};
|
||||
}
|
||||
|
||||
/* If mem occupant in question is VkBuffer, it won't delete anything from the set of available free mem segments
|
||||
* for that buffer kindred. It is your job to remove free buffer subsegments from this set*/
|
||||
void MargaretMemAllocator__get_rid_of_memory_occupant(
|
||||
MargaretMemAllocator* self, U32 mem_block_id, RBTreeNode_KVPU64ToMargaretMemoryOccupation* occ_it){
|
||||
MargaretMemAllocator* self, U32 mem_block_id, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it){
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, mem_block_id);
|
||||
|
||||
const MargaretMemoryOccupation* occ = &occ_it->value;
|
||||
|
||||
/* Updating block usage counter TODO: do it everywhere */
|
||||
block->occupation_counter -= occ->taken_size;
|
||||
const MargaretMAOccupant* occ = &occ_it->value.me;
|
||||
|
||||
U64Segment left_free_space = MargaretMemAllocatorOneBlock_get_left_free_space(block, occ_it);
|
||||
U64Segment right_free_space = MargaretMemAllocatorOneBlock_get_right_free_space(block, occ_it);
|
||||
@ -519,13 +628,12 @@ void MargaretMemAllocator__get_rid_of_memory_occupant(
|
||||
vkDestroyImage(self->device, occ->img.image, NULL);
|
||||
}
|
||||
|
||||
RBTree_MapU64ToMargaretMemoryOccupation_erase_by_iter(&block->occupied_memory, occ_it);
|
||||
RBTree_MapU64ToMargaretMAOccupation_erase_by_iter(&block->occupied_memory, occ_it);
|
||||
|
||||
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, left_free_space.start, left_free_space.len, mem_block_id);
|
||||
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, right_free_space.start, right_free_space.len, mem_block_id);
|
||||
|
||||
MargaretMemFreeSpaceManager_insert(&self->mem_free_space, left_free_space.start,
|
||||
right_free_space.start + right_free_space.len - left_free_space.start, mem_block_id);
|
||||
MargaretMemAllocator__erase_gap(self, mem_block_id, left_free_space);
|
||||
MargaretMemAllocator__erase_gap(self, mem_block_id, right_free_space);
|
||||
MargaretMemAllocator__insert_gap(self, mem_block_id, left_free_space.start,
|
||||
right_free_space.start + right_free_space.len - left_free_space.start);
|
||||
}
|
||||
|
||||
|
||||
@ -534,15 +642,15 @@ void MargaretMemAllocator__clean_handlers_in_block(const MargaretMemAllocator* s
|
||||
(block->mapped_memory != NULL));
|
||||
if (block->mapped_memory)
|
||||
vkUnmapMemory(self->device, block->mapped_memory);
|
||||
for (RBTreeNode_KVPU64ToMargaretMemoryOccupation* i =
|
||||
RBTree_MapU64ToMargaretMemoryOccupation_find_min(&block->occupied_memory); i;
|
||||
i = RBTree_MapU64ToMargaretMemoryOccupation_find_next(&block->occupied_memory, i))
|
||||
for (RBTreeNode_KVPU64ToMargaretMAOccupation* i =
|
||||
RBTree_MapU64ToMargaretMAOccupation_find_min(&block->occupied_memory); i;
|
||||
i = RBTree_MapU64ToMargaretMAOccupation_find_next(&block->occupied_memory, i))
|
||||
{
|
||||
if (i->value.variant == MargaretMemoryOccupation_Buffer) {
|
||||
vkDestroyBuffer(self->device, i->value.buf.buffer, NULL);
|
||||
if (i->value.me.variant == MargaretMemoryOccupation_Buffer) {
|
||||
vkDestroyBuffer(self->device, i->value.me.buf.buffer, NULL);
|
||||
} else {
|
||||
assert(i->value.variant == MargaretMemoryOccupation_Image);
|
||||
vkDestroyImage(self->device, i->value.img.image, NULL);
|
||||
assert(i->value.me.variant == MargaretMemoryOccupation_Image);
|
||||
vkDestroyImage(self->device, i->value.me.img.image, NULL);
|
||||
}
|
||||
}
|
||||
vkFreeMemory(self->device, block->mem_hand, NULL);
|
||||
@ -569,81 +677,97 @@ void MargaretMemAllocator_wipe_old(MargaretMemAllocator* self){
|
||||
VecMargaretMemAllocatorOneBlock_sink(&self->old_blocks, 0);
|
||||
for (U64 ri = 0; ri < self->old_moved_buffers.len; ri++) {
|
||||
MargaretMANewMovedBufRecord moved = self->old_moved_buffers.buf[ri];
|
||||
assert(moved.old_pos.occ_it->value.variant == MargaretMemoryOccupation_Buffer);
|
||||
assert(moved.old_pos.occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
|
||||
MargaretMemAllocator__get_rid_of_memory_occupant(self, moved.old_pos.device_mem_ind, moved.old_pos.occ_it);
|
||||
}
|
||||
VecMargaretMANewMovedBufRecord_sink(&self->old_moved_buffers, 0);
|
||||
}
|
||||
|
||||
void MargaretMemAllocator__shrink_some_buffer(
|
||||
MargaretMemAllocator* self, MargaretMemAllocatorOccupantPosition pos, size_t smaller_size
|
||||
){
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, pos.device_mem_ind);
|
||||
MargaretMAOccupation* occ = &pos.occ_it->value;
|
||||
assert(occ->me.variant == MargaretMemoryOccupation_Buffer);
|
||||
assert(occ->me.buf.capacity >= smaller_size);
|
||||
U64 buf_start = pos.occ_it->key;
|
||||
U64 buf_taken_size = occ->taken_size;
|
||||
VkBuffer shorter_buf;
|
||||
check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = smaller_size,
|
||||
.usage = occ->me.buf.usage_flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
}, NULL, &shorter_buf) == VK_SUCCESS);
|
||||
VkMemoryRequirements shorter_buf_req;
|
||||
vkGetBufferMemoryRequirements(&self->device, shorter_buf, &shorter_buf_req);
|
||||
check(U64_is_2pow(shorter_buf_req.alignment));
|
||||
check((shorter_buf_req.memoryTypeBits & self->memory_type_id));
|
||||
check((buf_start & (shorter_buf_req.alignment - 1)) == 0)
|
||||
check(shorter_buf_req.size <= buf_taken_size);
|
||||
|
||||
U64Segment right_free_space = MargaretMemAllocatorOneBlock_get_right_free_space(block, pos.occ_it);
|
||||
MargaretMemAllocator__erase_gap(self, pos.device_mem_ind, right_free_space);
|
||||
MargaretMemAllocator__insert_gap(self, pos.device_mem_ind,
|
||||
buf_start + shorter_buf_req.size,
|
||||
right_free_space.len + (buf_taken_size - shorter_buf_req.size));
|
||||
|
||||
vkDestroyBuffer(self->device, occ->me.buf.buffer, NULL);
|
||||
occ->taken_size = shorter_buf_req.size;
|
||||
occ->me.buf.buffer = shorter_buf;
|
||||
occ->me.buf.capacity = smaller_size;
|
||||
}
|
||||
|
||||
void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator* self, VkCommandBuffer cmd_buff, MargaretMemAllocatorRequests* requests,
|
||||
VecMargaretMABufferExpansionRecord buffer_expansion_record,
|
||||
size_t alloc_buf_requests_require_cancel, size_t alloc_img_requests_require_cancel){
|
||||
|
||||
// VkPhysicalDeviceMaintenance4Properties maintenance4_properties = {
|
||||
// .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES,
|
||||
// };
|
||||
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
|
||||
// .pNext = &maintenance4_properties
|
||||
};
|
||||
VkPhysicalDeviceProperties2 properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
|
||||
.pNext = &maintenance3_properties,
|
||||
};
|
||||
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
|
||||
check(vkResetCommandBuffer(cmd_buff, 0) == VK_SUCCESS);
|
||||
check(vkBeginCommandBuffer(cmd_buff, &(VkCommandBufferBeginInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
|
||||
}) == VK_SUCCESS);
|
||||
// todo: do
|
||||
|
||||
VecMargaretMABufferExpansionRecord_drop(buffer_expansion_record);
|
||||
}
|
||||
|
||||
MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemAllocator* self, VkCommandBuffer cmd_buff, MargaretMemAllocatorRequests* requests
|
||||
){
|
||||
MargaretMemAllocator_wipe_old(self);
|
||||
for (size_t i = 0; i < requests->free_buf.len; i++) {
|
||||
MargaretMemAllocatorOccupantPosition pos = *(requests->free_buf.buf[i]);
|
||||
assert(pos.occ_it->value.variant == MargaretMemoryOccupation_Buffer);
|
||||
assert(pos.occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
|
||||
MargaretMemAllocator__get_rid_of_memory_occupant(self, pos.device_mem_ind, pos.occ_it);
|
||||
}
|
||||
for (size_t i = 0; i < requests->free_image.len; i++) {
|
||||
MargaretMemAllocatorOccupantPosition pos = *(requests->free_buf.buf[i]);
|
||||
assert(pos.occ_it->value.variant == MargaretMemoryOccupation_Image);
|
||||
assert(pos.occ_it->value.me.variant == MargaretMemoryOccupation_Image);
|
||||
MargaretMemAllocator__get_rid_of_memory_occupant(self, pos.device_mem_ind, pos.occ_it);
|
||||
}
|
||||
for (size_t i = 0; i < requests->shrink_buf.len; i++) {
|
||||
MargaretMemAllocatorRequestResizeBuffer req = (requests->shrink_buf.buf[i]);
|
||||
MargaretMemAllocatorOccupantPosition pos = *req.ans;
|
||||
assert(pos.occ_it->value.ans == req.ans);
|
||||
assert(pos.occ_it->value.variant == MargaretMemoryOccupation_Buffer);
|
||||
assert(pos.occ_it->value.buf.capacity >= req.new_size);
|
||||
U64 buf_start = pos.occ_it->key;
|
||||
U64 buf_taken_size = buf_taken_size;
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, pos.device_mem_ind);
|
||||
|
||||
VkBuffer shorter_buf;
|
||||
VkBufferCreateInfo shorter_buf_crinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = req.new_size,
|
||||
.usage = pos.occ_it->value.buf.usage_flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
};
|
||||
if (vkCreateBuffer(self->device, &shorter_buf_crinfo, NULL, &shorter_buf) != VK_SUCCESS)
|
||||
abortf("vkCreateBuffer\n");
|
||||
VkMemoryRequirements shorter_buf_req;
|
||||
vkGetBufferMemoryRequirements(&self->device, shorter_buf, &shorter_buf_req);
|
||||
check(U64_is_2pow(shorter_buf_req.alignment));
|
||||
check((shorter_buf_req.memoryTypeBits & self->memory_type_id));
|
||||
check((buf_start & (shorter_buf_req.alignment - 1)) == 0)
|
||||
check(shorter_buf_req.size <= buf_taken_size);
|
||||
|
||||
U64Segment right_free_space = MargaretMemAllocatorOneBlock_get_right_free_space(block, pos.occ_it);
|
||||
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, right_free_space.start, right_free_space.len, pos.device_mem_ind);
|
||||
MargaretMemFreeSpaceManager_insert(&self->mem_free_space,
|
||||
buf_start + shorter_buf_req.size,
|
||||
right_free_space.len + (buf_taken_size - shorter_buf_req.size), pos.device_mem_ind);
|
||||
|
||||
vkDestroyBuffer(self->device, pos.occ_it->value.buf.buffer, NULL);
|
||||
pos.occ_it->value.taken_size = shorter_buf_req.size;
|
||||
pos.occ_it->value.buf.buffer = shorter_buf;
|
||||
pos.occ_it->value.buf.capacity = req.new_size;
|
||||
assert(req.ans->occ_it->value.ans == req.ans);
|
||||
MargaretMemAllocator__shrink_some_buffer(self, *req.ans, req.new_size);
|
||||
}
|
||||
|
||||
VkPhysicalDeviceMaintenance4Properties maintenance4_properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES,
|
||||
};
|
||||
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
|
||||
.pNext = &maintenance4_properties
|
||||
};
|
||||
VkPhysicalDeviceProperties2 properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
|
||||
.pNext = &maintenance3_properties,
|
||||
};
|
||||
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
|
||||
|
||||
VecMargaretMABufferExpansionRecord buffer_expansion_record = VecMargaretMABufferExpansionRecord_new();
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
|
||||
/* We first try to do all the resize requests, that COULD be done using method 1 */
|
||||
/* We first try to do all the expand_buf requests, that COULD be done using method 1 */
|
||||
for (U64 rr = 0; rr < requests->expand_buf.len;) {
|
||||
U64 new_size = requests->expand_buf.buf[rr].new_size;
|
||||
MargaretMemAllocatorOccupantPosition* ans = requests->expand_buf.buf[rr].ans;
|
||||
@ -652,21 +776,19 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemAllocatorOneBlock* block = VecMargaretMemAllocatorOneBlock_mat(&self->blocks, ans->device_mem_ind);
|
||||
|
||||
U64 occ_start = ans->occ_it->key;
|
||||
assert(ans->occ_it->value.variant == MargaretMemoryOccupation_Buffer);
|
||||
MargaretMemoryOccupationBuffer* buf = &ans->occ_it->value.buf;
|
||||
assert(ans->occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
|
||||
MargaretMemoryOccupationBuffer* buf = &ans->occ_it->value.me.buf;
|
||||
|
||||
/* Method 1 */
|
||||
U64Segment right_free_space = MargaretMemAllocatorOneBlock_get_right_free_space(block, ans->occ_it);
|
||||
|
||||
VkBuffer temp_buf_extension;
|
||||
VkBufferCreateInfo temp_buf_extension_crinfo = {
|
||||
check (vkCreateBuffer(self->device, &(VkBufferCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = new_size,
|
||||
.usage = buf->usage_flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
};
|
||||
if (vkCreateBuffer(self->device, &temp_buf_extension_crinfo, NULL, &temp_buf_extension) != VK_SUCCESS)
|
||||
abortf("vkCreateBuffer");
|
||||
}, NULL, &temp_buf_extension) == VK_SUCCESS);
|
||||
VkMemoryRequirements temp_buf_extension_req;
|
||||
vkGetBufferMemoryRequirements(self->device, temp_buf_extension, &temp_buf_extension_req);
|
||||
check(U64_is_2pow(temp_buf_extension_req.alignment));
|
||||
@ -678,11 +800,11 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
rr++;
|
||||
continue;
|
||||
}
|
||||
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, right_free_space.start, right_free_space.len, ans->device_mem_ind);
|
||||
MargaretMemFreeSpaceManager_insert(&self->mem_free_space,
|
||||
vkDestroyBuffer(self->device, temp_buf_extension, NULL);
|
||||
MargaretMemAllocator__erase_gap(self, ans->device_mem_ind, right_free_space);
|
||||
MargaretMemAllocator__insert_gap(self, ans->device_mem_ind,
|
||||
occ_start + temp_buf_extension_req.size,
|
||||
right_free_space.start + right_free_space.len - (occ_start + temp_buf_extension_req.size),
|
||||
ans->device_mem_ind);
|
||||
right_free_space.start + right_free_space.len - (occ_start + temp_buf_extension_req.size));
|
||||
VecMargaretMABufferExpansionRecord_append(&buffer_expansion_record, (MargaretMABufferExpansionRecord){
|
||||
.mem_block_ind = ans->device_mem_ind, .old_capacity = buf->capacity, .occ_it = ans->occ_it
|
||||
});
|
||||
@ -695,21 +817,73 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
}
|
||||
|
||||
check(vkResetCommandBuffer(cmd_buff, 0) == VK_SUCCESS);
|
||||
|
||||
check(vkBeginCommandBuffer(cmd_buff, &(VkCommandBufferBeginInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
|
||||
}) == VK_SUCCESS);
|
||||
MargaretMemAllocatorDemands demands = 0;
|
||||
|
||||
for (U64 ri = 0; ri < requests->expand_buf.len; ri++) {
|
||||
|
||||
U64 new_size = requests->expand_buf.buf[ri].new_size;
|
||||
MargaretMemAllocatorOccupantPosition* ans = requests->expand_buf.buf[ri].ans;
|
||||
MargaretMemAllocatorOccupantPosition old_ans = *ans;
|
||||
assert(old_ans.occ_it->value.ans == ans);
|
||||
assert(ans->occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
|
||||
assert(new_size >= old_ans.occ_it->value.me.buf.capacity);
|
||||
MargaretMemoryOccupationBuffer* old_buf = &old_ans.occ_it->value.me.buf;
|
||||
bool success = MargaretMemAllocator__add_new_buffer_occ(self, ans, )
|
||||
// todo: add addition of occ:: BUfffwe sbool success = __add_occ_niuffw(.... old-Pkjbiuf)
|
||||
if (!success) {
|
||||
MargaretMemAllocator_request_needs_defragmentation(self, cmd_buff, requests, buffer_expansion_record, 0, 0);
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
return MARGARET_MA_DEMANDS_DEFRAGMENTATION;
|
||||
}
|
||||
VecMargaretMANewMovedBufRecord_append(&self->old_moved_buffers,
|
||||
(MargaretMANewMovedBufRecord){.ans = ans, .old_pos = old_ans});
|
||||
if (old_buf->preserve_at_quiet) {
|
||||
demands = MARGARET_MA_DEMANDS_CMD_BUFFER_BIT;
|
||||
vkCmdCopyBuffer(cmd_buff, old_buf->buffer, temp_buf, 1, &(VkBufferCopy){0, 0, old_buf->capacity});
|
||||
}
|
||||
}
|
||||
|
||||
// todo: write
|
||||
for (U64 ri = 0; ri < requests->alloc_buf.len; ri++) {
|
||||
MargaretMemAllocatorRequestAllocBuffer* req = &requests->alloc_buf.buf[ri];
|
||||
|
||||
VkBuffer fresh_buf;
|
||||
check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = req->allocation_size,
|
||||
.usage = req->usage,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
}, NULL, &fresh_buf) == VK_SUCCESS);
|
||||
VkMemoryRequirements mem_requires;
|
||||
vkGetBufferMemoryRequirements(self->device, fresh_buf, &mem_requires);
|
||||
bool success = MargaretMemAllocator__add_new_occupant(self, (MargaretMemoryOccupation){
|
||||
.taken_size = mem_requires.size, .ans = req->ans, .variant = MargaretMemoryOccupation_Buffer,
|
||||
.buf.capacity = req->allocation_size, .buf.buffer = fresh_buf, .buf.preserve_at_quiet = req->preserve_at_quiet,
|
||||
.buf.usage_flags = req->usage
|
||||
}, &mem_requires);
|
||||
if (!success) {
|
||||
vkDestroyBuffer(self->device, fresh_buf, NULL);
|
||||
MargaretMemAllocator_request_needs_defragmentation(self, cmd_buff, requests, buffer_expansion_record, ri, 0);
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
return MARGARET_MA_DEMANDS_DEFRAGMENTATION;
|
||||
}
|
||||
}
|
||||
|
||||
for (U64 ri = 0; ri < requests->alloc_image.len; ri++) {
|
||||
MargaretMemAllocatorRequestAllocImage* req = &requests->alloc_image.buf[ri];
|
||||
|
||||
// todo: add shitafuck
|
||||
}
|
||||
|
||||
return demands;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char* MargaretMemAllocator_get_host_visible_buffer_ptr(
|
||||
const MargaretMemAllocator* self, const MargaretMemAllocatorOccupantPosition* pos){
|
||||
const MargaretMemAllocatorOneBlock* bl = VecMargaretMemAllocatorOneBlock_at(&self->blocks, pos->device_mem_ind);
|
||||
assert(pos->occ_it->value.variant == MargaretMemoryOccupation_Buffer);
|
||||
assert(pos->occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
|
||||
check((self->mem_properties & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
|
||||
assert(bl->mapped_memory);
|
||||
return (char*)bl->mapped_memory + pos->occ_it->key;
|
||||
|
||||
@ -12,6 +12,11 @@ typedef int VkStructureType;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 100;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 200;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 3000;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 645484;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 14542;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 145;
|
||||
const VkStructureType VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 5324;
|
||||
|
||||
|
||||
typedef int VkBufferCreateFlags;
|
||||
typedef int VkBufferUsageFlags;
|
||||
@ -206,8 +211,6 @@ typedef struct VkPhysicalDeviceMaintenance4Properties {
|
||||
VkDeviceSize maxBufferSize;
|
||||
} VkPhysicalDeviceMaintenance4Properties;
|
||||
|
||||
const VkStructureType VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 14542;
|
||||
|
||||
typedef struct VkPhysicalDeviceMaintenance3Properties {
|
||||
VkStructureType sType;
|
||||
void* pNext;
|
||||
@ -215,16 +218,12 @@ typedef struct VkPhysicalDeviceMaintenance3Properties {
|
||||
VkDeviceSize maxMemoryAllocationSize;
|
||||
} VkPhysicalDeviceMaintenance3Properties;
|
||||
|
||||
const VkStructureType VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 145;
|
||||
|
||||
typedef struct VkPhysicalDeviceProperties2 {
|
||||
VkStructureType sType;
|
||||
void* pNext;
|
||||
VkPhysicalDeviceProperties properties;
|
||||
} VkPhysicalDeviceProperties2;
|
||||
|
||||
const VkStructureType VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 5324;
|
||||
|
||||
void vkGetPhysicalDeviceProperties2(
|
||||
VkPhysicalDevice physicalDevice,
|
||||
VkPhysicalDeviceProperties2* pProperties);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user