What I have been doing for two months could actually be have been done in one day effortlessly

This commit is contained in:
Андреев Григорий 2025-12-11 02:12:21 +03:00
parent 6f418827dc
commit aadc346f43
8 changed files with 372 additions and 192 deletions

View File

@ -56,7 +56,7 @@ target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -l
#add_executable(l2t0_2 src/l2/tests/data_structures/t0_2.c) // todo: I will get back
add_executable(l2t0 src/l2/tests/data_structures/t0.c)
add_executable(l2t0_3 src/l2/tests/data_structures/t0_3.c)
add_executable(l2t2 src/l2/tests/data_structures/t2.c)
#add_executable(l2t2 src/l2/tests/data_structures/t2.c)
#add_executable(l2t0 src/l2/tests/data_structures/t0.c)
#add_executable(l2t1 src/l2/tests/data_structures/t1.c)

View File

@ -22,6 +22,7 @@ void generate_margaret_eve_for_vulkan_utils() {
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretFreeSegment")});
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretImgAllocatorOneBlock"), true, false);
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretBufAllocatorOneBlock"), true, false);
}

View File

@ -22,7 +22,6 @@ void generate_l1_5_template_instantiations_for_margaret(){
/* comparison takes additional U8 parameter */
.alternative_less = cstr("MargaretFreeSegment_less_len"),
.alternative_comp_set_name_embed = cstr("Len"),
.guest_data_T = cstr("U8"),
});
}

View File

@ -38,4 +38,10 @@ bool MargaretFreeSegment_less_resp_align(const MargaretFreeSegment* A, const Mar
#include "../../../gen/l1/eve/margaret/VecMargaretFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionMargaretFreeSegment.h"
U64 margaret_bump_buffer_size_to_alignment(U64 A, U8 alignment_exp){
if (A & ((1ull << alignment_exp) - 1))
A = A - (A & ((1ull << alignment_exp) - 1)) + (1ull << alignment_exp);
return A;
}
#endif

View File

@ -8,187 +8,244 @@
typedef struct {
U64 block;
U64 start;
U64 len;
} MargaretBufAllocation;
typedef struct {
BufRBTree_MapU64ToU64 occupants;
U64 capacity;
U64 occupation_counter;
VkDeviceMemory mem_hand;
VkBuffer buf_hand;
void* mapped_memory;
} MargaretBufAllocatorOneBlock;
void MargaretBufAllocatorOneBlock_drop(MargaretBufAllocatorOneBlock self){
BufRBTree_MapU64ToU64_drop(self.occupants);
}
#include "../../../gen/l1/eve/margaret/VecMargaretBufAllocatorOneBlock.h"
#include "../../../gen/l1/VecAndSpan_U8.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLen_SetMargaretFreeSegment.h"
typedef struct{
VecMargaretBufAllocatorOneBlock blocks;
BufRBTreeByLen_SetMargaretFreeSegment mem_free_space;
VkDevice device;
VkPhysicalDevice physical_device;
VkBufferUsageFlags usage;
U8 memory_type_id;
U8 alignment_exp;
bool host_visible;
} MargaretBufAllocator;
void MargaretBufAllocator__erase_gap(MargaretBufAllocator* self, U64 block_id, U64 start, U64 len){
if (len == 0)
return;
bool eret = BufRBTreeByLen_SetMargaretFreeSegment_erase(&self->mem_free_space,
&(MargaretFreeSegment){.block = block_id, .start = start, .len = len});
assert(eret);
MargaretBufAllocatorOneBlock* BLOCK = VecMargaretBufAllocatorOneBlock_mat(&self->blocks, block_id);
BLOCK->occupation_counter += len;
assert(BLOCK->occupation_counter <= BLOCK->capacity);
}
void MargaretBufAllocator__insert_gap(MargaretBufAllocator* self, U64 block_id, U64 start, U64 len){
if (len == 0)
return;
bool iret = BufRBTreeByLen_SetMargaretFreeSegment_insert(&self->mem_free_space,
(MargaretFreeSegment){.block = block_id, .start = start, .len = len});
assert(iret);
MargaretBufAllocatorOneBlock* BLOCK = VecMargaretBufAllocatorOneBlock_mat(&self->blocks, block_id);
assert(len <= BLOCK->occupation_counter);
BLOCK->occupation_counter -= len;
}
OptionMargaretFreeSegment MargaretBufAllocator__search_gap(MargaretBufAllocator* self, U64 req_size){
assert(req_size % (1ull << self->alignment_exp) == 0);
U64 sit = BufRBTreeByLen_SetMargaretFreeSegment_find_min_grtr_or_eq(&self->mem_free_space,
&(MargaretFreeSegment){.len = req_size});
if (sit == 0)
return None_MargaretFreeSegment();
return Some_MargaretFreeSegment(*BufRBTreeByLen_SetMargaretFreeSegment_at_iter(&self->mem_free_space, sit));
}
void MargaretBufAllocator__add_block(MargaretBufAllocator* self, U64 capacity){
VkBuffer buffer;
check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = capacity,
.usage = self->usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE
}, NULL, &buffer) == VK_SUCCESS);
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(self->device, buffer, &memory_requirements);
VkDeviceMemory memory;
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = memory_requirements.size,
.memoryTypeIndex = self->memory_type_id
}, NULL, &memory) == VK_SUCCESS);
check(vkBindBufferMemory(self->device, buffer, memory, 0) == VK_SUCCESS);
void* mapped_memory = NULL;
if (self->host_visible) {
check(vkMapMemory(self->device, memory, 0, capacity, 0, &mapped_memory) == VK_SUCCESS);
}
VecMargaretBufAllocatorOneBlock_append(&self->blocks, (MargaretBufAllocatorOneBlock){
.occupants = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = 0,
.mem_hand = memory, .buf_hand = buffer, .mapped_memory = mapped_memory
});
}
MargaretBufAllocator MargaretBufAllocator_new(
VkDevice device, VkPhysicalDevice physical_device,
VkBufferUsageFlags usage, U8 memory_type_id, U8 alignment_exp, bool host_visible, U64 initial_block_size
){
MargaretBufAllocator self = {
.blocks = VecMargaretBufAllocatorOneBlock_new(),
.mem_free_space = BufRBTreeByLen_SetMargaretFreeSegment_new_reserved(1),
.device = device, .physical_device = physical_device, .usage = usage, .memory_type_id = memory_type_id,
.alignment_exp = alignment_exp, .host_visible = host_visible
};
MargaretBufAllocator__add_block(&self, initial_block_size);
MargaretBufAllocator__insert_gap(&self, 0, 0, initial_block_size);
return self;
}
void MargaretBufAllocator__put_buf_to_a_gap(MargaretBufAllocator* self, MargaretFreeSegment segment, U64 req_size){
assert(req_size <= segment.len);
MargaretBufAllocator__erase_gap(self, segment.block, segment.start, segment.len);
MargaretBufAllocator__insert_gap(self, segment.block,
segment.start + req_size, segment.start + segment.len - req_size);
BufRBTree_MapU64ToU64* images = &VecMargaretBufAllocatorOneBlock_mat(&self->blocks, segment.block)->occupants;
bool iret = BufRBTree_MapU64ToU64_insert(images, segment.start, req_size);
assert(iret);
}
U64Segment MargaretBufAllocator__get_left_free_space(
const MargaretBufAllocator* self, const MargaretBufAllocation* allocation){
const MargaretBufAllocatorOneBlock* block = VecMargaretBufAllocatorOneBlock_at(&self->blocks, allocation->block);
U64 occ_start = allocation->start;
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&block->occupants, allocation->start);
if (prev_occ_it != 0) {
U64 prev_occ_start;
U64 prev_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&block->occupants, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
assert(prev_occ_start + prev_occ_taken_size <= occ_start);
return (U64Segment){
.start = prev_occ_start + prev_occ_taken_size,
.len = occ_start - (prev_occ_start + prev_occ_taken_size)};
}
return (U64Segment){.start = 0, .len = occ_start};
}
U64Segment MargaretBufAllocator__get_right_free_space(
const MargaretBufAllocator* self, const MargaretBufAllocation* allocation){
const MargaretBufAllocatorOneBlock* block = VecMargaretBufAllocatorOneBlock_at(&self->blocks, allocation->block);
U64 occ_start = allocation->start;
U64 occ_taken_size = allocation->len;
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&block->occupants, allocation->start);
if (next_occ_it != 0) {
U64 next_occ_start;
U64 next_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&block->occupants, next_occ_it, &next_occ_start, &next_occ_taken_size);
assert(occ_start + occ_taken_size <= next_occ_start);
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
}
return (U64Segment){.start = occ_start + occ_taken_size, .len = block->capacity - (occ_start + occ_taken_size)};
}
void MargaretBufAllocator_drop(MargaretBufAllocator self){
for (size_t bi = 0; bi < self.blocks.len; bi++) {
vkDestroyBuffer(self.device, self.blocks.buf[bi].buf_hand, NULL);
vkFreeMemory(self.device, self.blocks.buf[bi].mem_hand, NULL);
}
VecMargaretBufAllocatorOneBlock_drop(self.blocks);
BufRBTreeByLen_SetMargaretFreeSegment_drop(self.mem_free_space);
}
void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretBufAllocation allocation){
U64Segment left_free_space = MargaretBufAllocator__get_left_free_space(self, &allocation);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, &allocation);
MargaretBufAllocator__erase_gap(self, allocation.block, left_free_space.start, left_free_space.len);
MargaretBufAllocator__erase_gap(self, allocation.block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation.block,
left_free_space.start,
right_free_space.start + right_free_space.len - left_free_space.start);
}
NODISCARD MargaretBufAllocation MargaretBufAllocator_alloc(MargaretBufAllocator* self, U64 req_size){
req_size = margaret_bump_buffer_size_to_alignment(req_size, self->alignment_exp);
// void MargaretMemAllocator__shrink_some_buffer(
// MargaretMemAllocator* self, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it, size_t smaller_size
// ){
// ListNodeMargaretMemAllocatorOneBlock* block_it = occ_it->value.block;
// MargaretMAOccupant* occ_me = &occ_it->value.me;
// assert(occ_me->variant == MargaretMemoryOccupation_Buffer);
// assert(occ_me->buf.capacity >= smaller_size);
// U64 buf_start = occ_it->key;
// U64 buf_taken_size = occ_it->value.taken_size;
// VkBuffer shorter_buf;
// check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
// .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
// .size = smaller_size,
// .usage = occ_me->buf.usage_flags,
// .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
// }, NULL, &shorter_buf) == VK_SUCCESS);
// VkMemoryRequirements shorter_buf_req;
// vkGetBufferMemoryRequirements(self->device, shorter_buf, &shorter_buf_req);
// check(U64_is_2pow(shorter_buf_req.alignment));
// check((shorter_buf_req.memoryTypeBits & self->memory_type_id));
// check((buf_start & (shorter_buf_req.alignment - 1)) == 0)
// check(shorter_buf_req.size <= buf_taken_size);
//
// U64Segment right_free_space = MargaretMemAllocatorOneBlock_get_right_free_space(&block_it->el, occ_it);
// MargaretMemAllocator__erase_gap(self, block_it, right_free_space.start, right_free_space.len);
// MargaretMemAllocator__insert_gap(self, block_it,
// buf_start + shorter_buf_req.size,
// right_free_space.len + (buf_taken_size - shorter_buf_req.size));
//
// vkDestroyBuffer(self->device, occ_me->buf.buffer, NULL);
// occ_it->value.taken_size = shorter_buf_req.size;
// occ_me->buf.buffer = shorter_buf;
// occ_me->buf.capacity = smaller_size;
// }
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
};
VkPhysicalDeviceProperties2 properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &maintenance3_properties,
};
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
// MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
// MargaretMemAllocator* self, MargaretMemAllocatorRequests* requests
// ){
check(req_size <= maintenance3_properties.maxMemoryAllocationSize);
OptionMargaretFreeSegment free_gap = MargaretBufAllocator__search_gap(self, req_size);
if (free_gap.variant == Option_None) {
assert(self->blocks.len > 0);
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(req_size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
MargaretBufAllocator__add_block(self, new_capacity);
U64 bid = self->blocks.len;
MargaretBufAllocator__insert_gap(self, bid, req_size, new_capacity - req_size);
MargaretBufAllocatorOneBlock* block = VecMargaretBufAllocatorOneBlock_mat(&self->blocks, free_gap.some.block);
block->occupation_counter = req_size;
bool iret = BufRBTree_MapU64ToU64_insert(&block->occupants, 0, req_size);
assert(iret);
}
//
// /* We first try to do all the expand_buf requests, that COULD be done using method 1 */
// for (U64 rr = 0; rr < requests->expand_buf.len;) {
// U64 new_size = requests->expand_buf.buf[rr].new_size;
// RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = requests->expand_buf.buf[rr].occ_it;
//
// U64 occ_start = occ_it->key;
// assert(occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
// MargaretMemoryOccupationBuffer* buf = &occ_it->value.me.buf;
//
// /* Method 1 */
// U64Segment right_free_space = MargaretMemAllocatorOneBlock_get_right_free_space(
// &occ_it->value.block->el, occ_it);
//
// VkBuffer temp_buf_extension;
// check (vkCreateBuffer(self->device, &(VkBufferCreateInfo){
// .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
// .size = new_size,
// .usage = buf->usage_flags,
// .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
// }, NULL, &temp_buf_extension) == VK_SUCCESS);
// VkMemoryRequirements temp_buf_extension_req;
// vkGetBufferMemoryRequirements(self->device, temp_buf_extension, &temp_buf_extension_req);
// check(U64_is_2pow(temp_buf_extension_req.alignment));
// check((temp_buf_extension_req.memoryTypeBits & (1ull << self->memory_type_id)) > 0)
// if ((occ_start + temp_buf_extension_req.size > right_free_space.start + right_free_space.len) ||
// ((occ_start & (temp_buf_extension_req.alignment - 1)) != 0)
// ){
// vkDestroyBuffer(self->device, temp_buf_extension, NULL);
// rr++;
// continue;
// }
// MargaretMemAllocator__erase_gap(self, occ_it->value.block, right_free_space.start, right_free_space.len);
// MargaretMemAllocator__insert_gap(self, occ_it->value.block,
// occ_start + temp_buf_extension_req.size,
// right_free_space.start + right_free_space.len - (occ_start + temp_buf_extension_req.size));
// VecMargaretMABufferExpansionRecord_append(&buffer_expansion_record, (MargaretMABufferExpansionRecord){
// .old_capacity = buf->capacity, .occ_it = occ_it
// });
// /* Success */
// vkDestroyBuffer(self->device, buf->buffer, NULL);
// buf->capacity = new_size;
// buf->buffer = temp_buf_extension;
// occ_it->value.taken_size = temp_buf_extension_req.size;
// VecMargaretMemAllocatorRequestResizeBuffer_unordered_pop(&requests->expand_buf, rr);
// MargaretMemAllocator__bind_buffer_memory(self, occ_it);
// }
//
// check(vkResetCommandBuffer(self->command_buffer, 0) == VK_SUCCESS);
// check(vkBeginCommandBuffer(self->command_buffer, &(VkCommandBufferBeginInfo){
// .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
// }) == VK_SUCCESS);
// MargaretMemAllocatorDemands demands = 0;
//
// for (U64 ri = 0; ri < requests->expand_buf.len; ri++) {
// U64 larger_size = requests->expand_buf.buf[ri].new_size;
// RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = requests->expand_buf.buf[ri].occ_it;
// assert(occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
// assert(larger_size >= occ_it->value.me.buf.capacity);
//
// VkBuffer bigger_buffer;
// check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
// .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
// .size = larger_size,
// .usage = occ_it->value.me.buf.usage_flags,
// .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
// }, NULL, &bigger_buffer) == VK_SUCCESS);
// VkMemoryRequirements mem_requirements;
// vkGetBufferMemoryRequirements(self->device, bigger_buffer, &mem_requirements);
//
// check(U64_is_2pow(mem_requirements.alignment));
// U8 alignment_exp = U64_2pow_log(mem_requirements.alignment);
// OptionMargaretFreeMemSegment free_gap =
// MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
// if (free_gap.variant == Option_None) {
// vkDestroyBuffer(self->device, bigger_buffer, NULL);
// return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
// }
//
// RBTreeNode_KVPU64ToMargaretMAOccupation* replacer = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
// RBTree_MapU64ToMargaretMAOccupation* OLD_TREE = &occ_it->value.block->el.occupied_memory;
// RBTree_steal_neighbours(&OLD_TREE->root, OLD_TREE->NIL, &occ_it->base, &replacer->base);
// replacer->key = occ_it->key;
// replacer->value = occ_it->value;
// assert(replacer->value.me.variant == MargaretMemoryOccupation_Buffer);
// occ_it->value.me.buf.buffer = bigger_buffer;
// occ_it->value.me.buf.capacity = larger_size;
//
// MargaretMemAllocator__add_occupant_node_given_gap_any_type(self, occ_it, free_gap.some, mem_requirements.size, alignment_exp);
// MargaretMemAllocator__bind_buffer_memory(self, occ_it);
//
// VecMargaretMANewMovedBufRecord_append(&self->old_moved_buffers,
// (MargaretMANewMovedBufRecord){.replacement = replacer, .my_occ_it = occ_it});
// if (replacer->value.me.buf.preserve_at_quiet) {
// demands = MARGARET_MA_DEMANDS_CMD_BUFFER_BIT;
// vkCmdCopyBuffer(self->command_buffer, replacer->value.me.buf.buffer, bigger_buffer,
// 1, &(VkBufferCopy){0, 0, replacer->value.me.buf.capacity});
// }
// }
//
// for (U64 ri = 0; ri < requests->alloc_buf.len; ri++) {
// MargaretMemAllocatorRequestAllocBuffer* req = &requests->alloc_buf.buf[ri];
//
// VkBuffer fresh_buf;
// check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
// .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
// .size = req->allocation_size,
// .usage = req->usage,
// .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
// }, NULL, &fresh_buf) == VK_SUCCESS);
// VkMemoryRequirements mem_requirements;
// vkGetBufferMemoryRequirements(self->device, fresh_buf, &mem_requirements);
// check(U64_is_2pow(mem_requirements.alignment));
// U8 alignment_exp = U64_2pow_log(mem_requirements.alignment);
// OptionMargaretFreeMemSegment free_gap =
// MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
// if (free_gap.variant == Option_None) {
// vkDestroyBuffer(self->device, fresh_buf, NULL);
// return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
// }
//
// RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = req->new_node; /* It was allocated for us */
// new_node->value.me = (MargaretMAOccupant){.variant = MargaretMemoryOccupation_Buffer, .buf = {
// .buffer = fresh_buf, .capacity = req->allocation_size, .preserve_at_quiet = req->allocation_size,
// .usage_flags = req->usage
// }};
// MargaretMemAllocator__add_occupant_node_given_gap_any_type(self, new_node, free_gap.some, mem_requirements.size, alignment_exp);
// MargaretMemAllocator__bind_buffer_memory(self, new_node);
// }
//
// MargaretMemAllocatorRequests_sink(requests);
// return demands;
// }
MargaretBufAllocator__put_buf_to_a_gap(self, free_gap.some, req_size);
return (MargaretBufAllocation){.block = free_gap.some.block, .start = free_gap.some.start, req_size};
}
void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretBufAllocation* allocation, U64 smaller_size){
smaller_size = margaret_bump_buffer_size_to_alignment(smaller_size, self->alignment_exp);
assert(smaller_size > 0);
assert(smaller_size <= allocation->len);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation->block,
allocation->start + smaller_size,
right_free_space.len + (allocation->len - smaller_size));
allocation->len = smaller_size;
}
/* It actually may returns a 'null-MargaretBuf-allocation' : if return value .len field is zero it means
* that expansion in-place was possible and the allocator argument was updated with a new size and nothing was returned.
* But if ret value .len field is non-zero it means a valid MargaretBufAllocation object was returned and the
* `allocation` argument was untouched. It remains a valid object, you need to deallocate it yourself
*/
NODISCARD MargaretBufAllocation MargaretBufAllocator_expand(MargaretBufAllocator* self, MargaretBufAllocation* allocation, U64 bigger_size){
bigger_size = margaret_bump_buffer_size_to_alignment(bigger_size, self->alignment_exp);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
if (allocation->start + bigger_size > right_free_space.start + right_free_space.len){
return MargaretBufAllocator_alloc(self, bigger_size);
}
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation->block,
allocation->start + bigger_size,
right_free_space.len + (allocation->len - bigger_size));
allocation->len = bigger_size;
return (MargaretBufAllocation){0};
}
#endif

View File

@ -230,8 +230,7 @@ void MargaretMemFreeSpaceManager_drop(MargaretMemFreeSpaceManager self){
VecU8_drop(self.set_present);
}
void MargaretMemFreeSpaceManager_erase(
MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
if (len == 0)
return;
assert(man->set_present.len > 0);
@ -246,8 +245,7 @@ void MargaretMemFreeSpaceManager_erase(
}
}
void MargaretMemFreeSpaceManager_insert(
MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
if (len == 0)
return;
assert(man->set_present.len > 0); /* MargaretMemFreeSpaceManager will do that for us with 2^3 */
@ -286,7 +284,6 @@ OptionMargaretFreeSegment MargaretMemFreeSpaceManager_search(
/* VkDevice and VkPhysicalDevice stay remembered here. Don't forget that, please */
typedef struct {
U64 total_capacity;
VecMargaretImgAllocatorOneBlock blocks;
MargaretMemFreeSpaceManager mem_free_space;
VkDevice device;
@ -308,26 +305,23 @@ void MargaretImgAllocator__insert_gap(MargaretImgAllocator* self, U64 block_id,
BLOCK->occupation_counter -= len;
}
/* Returns id of the new block */
U64 MargaretImgAllocator__add_block(MargaretImgAllocator* self, U64 capacity){
void MargaretImgAllocator__add_block(MargaretImgAllocator* self, U64 capacity){
VkDeviceMemory memory;
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = capacity, .memoryTypeIndex = self->memory_type_id
}, NULL, &memory) == VK_SUCCESS);
U64 bid = self->blocks.len;
VecMargaretImgAllocatorOneBlock_append(&self->blocks, (MargaretImgAllocatorOneBlock){
.images = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = 0,
.mem_hand = memory,
.mapped_memory = NULL /* not supported */});
return bid;
}
MargaretImgAllocator MargaretImgAllocator_new(
VkDevice device, VkPhysicalDevice physical_device, U8 memory_type_id, U64 initial_block_size){
VkDevice device, VkPhysicalDevice physical_device, U8 memory_type_id, U64 initial_block_size
){
MargaretImgAllocator self = {
.blocks = VecMargaretImgAllocatorOneBlock_new(),
.mem_free_space = MargaretMemFreeSpaceManager_new(),
@ -404,6 +398,9 @@ U64Segment MargaretImgAllocator__get_right_free_space(
}
void MargaretImgAllocator_drop(MargaretImgAllocator self){
for (size_t bi = 0; bi < self.blocks.len; bi++) {
vkFreeMemory(self.device, self.blocks.buf[bi].mem_hand, NULL);
}
VecMargaretImgAllocatorOneBlock_drop(self.blocks);
MargaretMemFreeSpaceManager_drop(self.mem_free_space);
}
@ -421,7 +418,7 @@ void MargaretImgAllocator_free(MargaretImgAllocator* self, MargaretImgAllocation
right_free_space.start + right_free_space.len - left_free_space.start);
}
MargaretImgAllocation MargaretImgAllocator_alloc(
NODISCARD MargaretImgAllocation MargaretImgAllocator_alloc(
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
VkImageUsageFlags usage_flags, VkImageLayout current_layout
){
@ -462,13 +459,14 @@ MargaretImgAllocation MargaretImgAllocator_alloc(
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(mem_requirements.size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
U64 bid = MargaretImgAllocator__add_block(self, new_capacity);
MargaretImgAllocator__add_img_given_gap(self,
(MargaretFreeSegment){.block = bid, .start = 0, .len = new_capacity}, mem_requirements.size, alignment_exp);
MargaretImgAllocator__add_block(self, new_capacity);
U64 bid = self->blocks.len;
MargaretImgAllocator__insert_gap(self, bid, mem_requirements.size, new_capacity - mem_requirements.size);
bool iret = BufRBTree_MapU64ToU64_insert(&self->blocks.buf[bid].images, 0, mem_requirements.size);
VkDeviceMemory memory = VecMargaretImgAllocatorOneBlock_at(&self->blocks, free_gap.some.block)->mem_hand;
check(vkBindImageMemory(self->device, fresh_img, memory, 0) == VK_SUCCESS);
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, free_gap.some.block);
block->occupation_counter = mem_requirements.size;
bool iret = BufRBTree_MapU64ToU64_insert(&block->images, 0, mem_requirements.size);
assert(iret);
check(vkBindImageMemory(self->device, fresh_img, block->mem_hand, 0) == VK_SUCCESS);
}
U64 aligned_pos = MargaretImgAllocator__add_img_given_gap(self, free_gap.some, mem_requirements.size, alignment_exp);

View File

@ -2,6 +2,7 @@
#define prototype1_src_l2_margaret_vulkan_memory_h
#include <vulkan/vulkan.h>
#include "vulkan_memory_claire.h"
#include "vulkan_images_claire.h"
#include "vulkan_buffer_claire.h"
#endif

View File

@ -235,6 +235,23 @@ void vkGetPhysicalDeviceProperties2(
typedef int VkCommandBufferResetFlags;
VkResult vkResetCommandBuffer(
VkCommandBuffer commandBuffer,
VkCommandBufferResetFlags flags);
typedef int VkCommandBufferUsageFlags;
typedef struct VkCommandBufferBeginInfo {
VkStructureType sType;
const void* pNext;
VkCommandBufferUsageFlags flags;
const void* pInheritanceInfo; /* will be NULL */
} VkCommandBufferBeginInfo;
VkResult vkBeginCommandBuffer(
VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo* pBeginInfo);
typedef int VkAccessFlags;
const VkAccessFlags VK_ACCESS_TRANSFER_READ_BIT = 0x100;
const VkAccessFlags VK_ACCESS_TRANSFER_WRITE_BIT = 0x100000;
@ -242,9 +259,110 @@ const VkAccessFlags VK_ACCESS_TRANSFER_WRITE_BIT = 0x100000;
typedef int VkImageAspectFlags;
const VkImageAspectFlags VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001;
typedef struct VkImageSubresourceRange {
VkImageAspectFlags aspectMask;
uint32_t baseMipLevel;
uint32_t levelCount;
uint32_t baseArrayLayer;
uint32_t layerCount;
} VkImageSubresourceRange;
const uint32_t VK_QUEUE_FAMILY_IGNORED = (~0U);
#include "../../margaret/vulkan_memory_claire.h"
typedef struct VkImageMemoryBarrier {
VkStructureType sType;
const void* pNext;
VkAccessFlags srcAccessMask;
VkAccessFlags dstAccessMask;
VkImageLayout oldLayout;
VkImageLayout newLayout;
uint32_t srcQueueFamilyIndex;
uint32_t dstQueueFamilyIndex;
VkImage image;
VkImageSubresourceRange subresourceRange;
} VkImageMemoryBarrier;
typedef int VkPipelineStageFlags;
const VkPipelineStageFlags VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x1;
const VkPipelineStageFlags VK_PIPELINE_STAGE_TRANSFER_BIT = 0x2;
typedef int VkDependencyFlags;
void vkCmdPipelineBarrier(
VkCommandBuffer commandBuffer,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount,
const void* /* VkMemoryBarrier */ pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const void* /* VkBufferMemoryBarrier */ pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier* pImageMemoryBarriers);
typedef struct VkImageSubresourceLayers {
VkImageAspectFlags aspectMask;
uint32_t mipLevel;
uint32_t baseArrayLayer;
uint32_t layerCount;
} VkImageSubresourceLayers;
typedef struct VkOffset3D {
int32_t x;
int32_t y;
int32_t z;
} VkOffset3D;
typedef struct VkBufferImageCopy {
VkDeviceSize bufferOffset;
uint32_t bufferRowLength;
uint32_t bufferImageHeight;
VkImageSubresourceLayers imageSubresource;
VkOffset3D imageOffset;
VkExtent3D imageExtent;
} VkBufferImageCopy;
void vkCmdCopyBufferToImage(
VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkImage dstImage,
VkImageLayout dstImageLayout,
uint32_t regionCount,
const VkBufferImageCopy* pRegions);
typedef struct VkBufferCopy {
VkDeviceSize srcOffset;
VkDeviceSize dstOffset;
VkDeviceSize size;
} VkBufferCopy;
void vkCmdCopyBuffer(
VkCommandBuffer commandBuffer,
VkBuffer srcBuffer,
VkBuffer dstBuffer,
uint32_t regionCount,
const VkBufferCopy* pRegions);
typedef struct VkImageCopy {
VkImageSubresourceLayers srcSubresource;
VkOffset3D srcOffset;
VkImageSubresourceLayers dstSubresource;
VkOffset3D dstOffset;
VkExtent3D extent;
} VkImageCopy;
void vkCmdCopyImage(
VkCommandBuffer commandBuffer,
VkImage srcImage,
VkImageLayout srcImageLayout,
VkImage dstImage,
VkImageLayout dstImageLayout,
uint32_t regionCount,
const VkImageCopy* pRegions);
// #include "../../margaret/vulkan_memory_claire.h"
// #include "../../margaret/vulkan_me"
int main(){
return 0;