Saving progress. I am getting insane just by looking at this crap. November is over. Engine is still not done. I can't take this anymore
This commit is contained in:
parent
438015b842
commit
dc67475e7a
@ -13,7 +13,7 @@ void generate_headers_for_r0_r1_r2_r3() {
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshInstance"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshVertex"), true, true);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshInstance"), true, false);
|
||||
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("ShinyMeshInSceneTemplate"), true, false);
|
||||
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("ShinyMeshTopology"), true, false);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("Pipeline0Spotlight"), true, false);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("Pipeline0PointLight"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("Wimbzle"), true, false);
|
||||
|
||||
@ -39,7 +39,6 @@ void generate_util_templ_inst_for_vulkan_headers() {
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("VkImageView"), vulkan_dep, true, false);
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("VkFramebuffer"), vulkan_dep, true, false);
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("VkSemaphore"), vulkan_dep, true, false);
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("VkDescriptorPoolSize"), vulkan_dep, true, false);
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("VkBufferCopy"), vulkan_dep, true, false);
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("VkImageMemoryBarrier"), vulkan_dep, true, false);
|
||||
}
|
||||
|
||||
@ -172,6 +172,9 @@
|
||||
* const VkAllocationCallbacks* pAllocator)
|
||||
*/
|
||||
|
||||
// todo: get rid of this whole VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT crap. MargaretMA is for non-host-visible
|
||||
// todo: for staging buffers you better use MargaretBufferAllocator. Ou, yeah, I have yet to write them
|
||||
|
||||
#include "../../l1/core/int_primitives.h"
|
||||
|
||||
/* Demands + Warnings */
|
||||
@ -360,7 +363,7 @@ typedef struct {
|
||||
typedef struct {
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* replacement;
|
||||
/* Pointer points to the same address, but the neighbours+key+value at that address are different.
|
||||
* We relocated out node, replacing it with a NEW node that holds the old buffer value and tyhe old start U64
|
||||
* We relocated out node, replacing it with a NEW node that holds the old buffer value and the old start U64
|
||||
*/
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* my_occ_it;
|
||||
} MargaretMANewMovedBufRecord;
|
||||
@ -390,12 +393,6 @@ void MargaretMemFreeSpaceManager_drop(MargaretMemFreeSpaceManager self){
|
||||
VecU8_drop(self.set_present);
|
||||
}
|
||||
|
||||
// void MargaretMemFreeSpaceManager_sink(MargaretMemFreeSpaceManager* self){
|
||||
// for (U8 ae = 0; ae < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; ae++)
|
||||
// if (self->free_space_in_memory[ae].variant == Option_Some)
|
||||
// BufRBTreeByLenRespAlign_SetMargaretFreeMemSegment_sink(&self->free_space_in_memory[ae].some);
|
||||
// }
|
||||
|
||||
void MargaretMemFreeSpaceManager_erase(
|
||||
MargaretMemFreeSpaceManager* man, ListNodeMargaretMemAllocatorOneBlock* dev_mem_block, U64 start, U64 len){
|
||||
if (len == 0)
|
||||
@ -466,24 +463,26 @@ typedef struct {
|
||||
|
||||
MargaretMemFreeSpaceManager mem_free_space;
|
||||
|
||||
VkMemoryPropertyFlags mem_properties;
|
||||
U8 memory_type_id;
|
||||
VkDevice device;
|
||||
VkPhysicalDevice physical_device;
|
||||
VkCommandBuffer command_buffer;
|
||||
VkMemoryPropertyFlags mem_properties;
|
||||
U8 memory_type_id;
|
||||
} MargaretMemAllocator;
|
||||
|
||||
MargaretMemAllocator MargaretMemAllocator_new(
|
||||
VkDevice device, VkPhysicalDevice physical_device, VkMemoryPropertyFlags mem_properties, U8 memory_type_id
|
||||
){
|
||||
VkDevice device, VkPhysicalDevice physical_device, VkCommandBuffer command_buffer,
|
||||
VkMemoryPropertyFlags mem_properties, U8 memory_type_id){
|
||||
MargaretMemAllocator self = {
|
||||
.blocks = ListMargaretMemAllocatorOneBlock_new(),
|
||||
.old_blocks = ListMargaretMemAllocatorOneBlock_new(),
|
||||
.old_moved_buffers = VecMargaretMANewMovedBufRecord_new(),
|
||||
.mem_free_space = MargaretMemFreeSpaceManager_new(),
|
||||
.device = device,
|
||||
.physical_device = physical_device,
|
||||
.command_buffer = command_buffer,
|
||||
.memory_type_id = memory_type_id,
|
||||
.mem_properties = mem_properties,
|
||||
.device = device,
|
||||
.physical_device = physical_device
|
||||
};
|
||||
|
||||
return self;
|
||||
@ -725,42 +724,52 @@ void MargaretMemAllocator__keep_building_up_cur_block(
|
||||
VkMemoryRequirements mem_requirements, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it,
|
||||
U64 maxMemoryAllocationSize
|
||||
){
|
||||
check(U64_is_2pow(mem_requirements.alignment));
|
||||
if (mem_requirements.size > maxMemoryAllocationSize)
|
||||
abortf("Your object asks too much :(\n");
|
||||
U64 af = margaret_get_alignment_left_padding(*cur_block_size_needed, U64_2pow_log(mem_requirements.alignment));
|
||||
if (*cur_block_size_needed + af + mem_requirements.size > maxMemoryAllocationSize) {
|
||||
MargaretMemAllocator__insert_gap(self, *cur_block,
|
||||
*cur_block_size_needed, maxMemoryAllocationSize - *cur_block_size_needed);
|
||||
vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.memoryTypeIndex = self->memory_type_id,
|
||||
.allocationSize = maxMemoryAllocationSize}, NULL, &((*cur_block)->el.mem_hand));
|
||||
(*cur_block)->el.capacity = maxMemoryAllocationSize;
|
||||
*updated_total_capacity += maxMemoryAllocationSize;
|
||||
if (self->mem_properties & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
||||
vkMapMemory(self->device, (*cur_block)->el.mem_hand, 0, VK_WHOLE_SIZE, 0, &(*cur_block)->el.mapped_memory);
|
||||
}
|
||||
|
||||
*cur_block = (ListNodeMargaretMemAllocatorOneBlock*)safe_calloc(1, sizeof(ListNodeMargaretMemAllocatorOneBlock));
|
||||
(*cur_block)->el.occupied_memory = RBTree_MapU64ToMargaretMAOccupation_new();
|
||||
ListMargaretMemAllocatorOneBlock_insert_node(&self->blocks, *cur_block);
|
||||
|
||||
*cur_block_size_needed = 0;
|
||||
af = 0;
|
||||
check(U64_is_2pow(mem_requirements.alignment));
|
||||
if (mem_requirements.size > maxMemoryAllocationSize)
|
||||
abortf("Your object asks too much :(\n");
|
||||
U64 af = margaret_get_alignment_left_padding(*cur_block_size_needed, U64_2pow_log(mem_requirements.alignment));
|
||||
if (*cur_block_size_needed + af + mem_requirements.size > maxMemoryAllocationSize) {
|
||||
MargaretMemAllocator__insert_gap(self, *cur_block,
|
||||
*cur_block_size_needed, maxMemoryAllocationSize - *cur_block_size_needed);
|
||||
vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.memoryTypeIndex = self->memory_type_id,
|
||||
.allocationSize = maxMemoryAllocationSize}, NULL, &((*cur_block)->el.mem_hand));
|
||||
(*cur_block)->el.capacity = maxMemoryAllocationSize;
|
||||
*updated_total_capacity += maxMemoryAllocationSize;
|
||||
if (self->mem_properties & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
||||
vkMapMemory(self->device, (*cur_block)->el.mem_hand, 0, VK_WHOLE_SIZE, 0, &(*cur_block)->el.mapped_memory);
|
||||
}
|
||||
MargaretMemAllocator__insert_gap(self, *cur_block, *cur_block_size_needed, af);
|
||||
occ_it->key = *cur_block_size_needed + af;
|
||||
occ_it->value.block = *cur_block;
|
||||
occ_it->value.taken_size = mem_requirements.size;
|
||||
RBTree_MapU64ToMargaretMAOccupation_insert_node(&(*cur_block)->el.occupied_memory, occ_it);
|
||||
|
||||
/* Updating important counter */
|
||||
*cur_block_size_needed = *cur_block_size_needed + af + mem_requirements.size;
|
||||
*cur_block = (ListNodeMargaretMemAllocatorOneBlock*)safe_calloc(1, sizeof(ListNodeMargaretMemAllocatorOneBlock));
|
||||
(*cur_block)->el.occupied_memory = RBTree_MapU64ToMargaretMAOccupation_new();
|
||||
ListMargaretMemAllocatorOneBlock_insert_node(&self->blocks, *cur_block);
|
||||
|
||||
*cur_block_size_needed = 0;
|
||||
af = 0;
|
||||
}
|
||||
MargaretMemAllocator__insert_gap(self, *cur_block, *cur_block_size_needed, af);
|
||||
occ_it->key = *cur_block_size_needed + af;
|
||||
occ_it->value.block = *cur_block;
|
||||
occ_it->value.taken_size = mem_requirements.size;
|
||||
RBTree_MapU64ToMargaretMAOccupation_insert_node(&(*cur_block)->el.occupied_memory, occ_it);
|
||||
|
||||
/* Updating important counter */
|
||||
*cur_block_size_needed = *cur_block_size_needed + af + mem_requirements.size;
|
||||
}
|
||||
|
||||
void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator* self, VkCommandBuffer cmd_buff, MargaretMemAllocatorRequests* requests,
|
||||
void MargaretMemAllocator__bind_buffer_memory(const MargaretMemAllocator* self, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it){
|
||||
assert(occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
|
||||
check(vkBindImageMemory(self->device, occ_it->value.me.img.image, occ_it->value.block->el.mem_hand, occ_it->key) == VK_SUCCESS);
|
||||
}
|
||||
|
||||
void MargaretMemAllocator__bind_image_memory(const MargaretMemAllocator* self, RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it){
|
||||
assert(occ_it->value.me.variant == MargaretMemoryOccupation_Image);
|
||||
check(vkBindImageMemory(self->device, occ_it->value.me.img.image, occ_it->value.block->el.mem_hand, occ_it->key) == VK_SUCCESS);
|
||||
}
|
||||
|
||||
MargaretMemAllocatorDemands MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator* self, MargaretMemAllocatorRequests* requests,
|
||||
VecMargaretMABufferExpansionRecord buffer_expansion_record,
|
||||
size_t alloc_buf_requests_require_cancel, size_t alloc_img_requests_require_cancel){
|
||||
|
||||
@ -772,8 +781,8 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
.pNext = &maintenance3_properties,
|
||||
};
|
||||
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
|
||||
check(vkResetCommandBuffer(cmd_buff, 0) == VK_SUCCESS);
|
||||
check(vkBeginCommandBuffer(cmd_buff, &(VkCommandBufferBeginInfo){
|
||||
check(vkResetCommandBuffer(self->command_buffer, 0) == VK_SUCCESS);
|
||||
check(vkBeginCommandBuffer(self->command_buffer, &(VkCommandBufferBeginInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
|
||||
}) == VK_SUCCESS);
|
||||
|
||||
@ -786,6 +795,7 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
assert(occ_it);
|
||||
U64 needed_capacity = occ_it->value.me.buf.capacity;
|
||||
MargaretMemAllocator__shrink_some_buffer(self, occ_it, old_capacity);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, occ_it);
|
||||
VecMargaretMemAllocatorRequestResizeBuffer_append(&requests->expand_buf,
|
||||
(MargaretMemAllocatorRequestResizeBuffer){.new_size = needed_capacity, .occ_it = occ_it});
|
||||
}
|
||||
@ -862,6 +872,8 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
cur_block->el.occupied_memory = RBTree_MapU64ToMargaretMAOccupation_new();
|
||||
ListMargaretMemAllocatorOneBlock_insert_node(&self->blocks, cur_block);
|
||||
|
||||
MargaretMemAllocatorDemands demands = requests->expand_buf.len ? MARGARET_MA_DEMANDS_CMD_BUFFER_BIT : 0;
|
||||
|
||||
for (size_t i = 0; i < requests->expand_buf.len; i++) {
|
||||
U64 needed_buf_capacity = requests->expand_buf.buf[i].new_size;
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = requests->expand_buf.buf[i].occ_it;
|
||||
@ -892,9 +904,10 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator__keep_building_up_cur_block(self,
|
||||
&cur_block_size_needed, &cur_block, &updated_total_capacity, mem_requirements,
|
||||
occ_it, maintenance3_properties.maxMemoryAllocationSize);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, occ_it);
|
||||
|
||||
if (occ_it->value.me.buf.preserve_at_quiet) {
|
||||
vkCmdCopyBuffer(cmd_buff, replacer->value.me.buf.buffer, occ_it->value.me.buf.buffer, 1, &(VkBufferCopy){
|
||||
vkCmdCopyBuffer(self->command_buffer, replacer->value.me.buf.buffer, occ_it->value.me.buf.buffer, 1, &(VkBufferCopy){
|
||||
.srcOffset = 0, .dstOffset = 0, .size = replacer->value.me.buf.capacity});
|
||||
}
|
||||
}
|
||||
@ -924,6 +937,7 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator__keep_building_up_cur_block(self, &cur_block_size_needed,
|
||||
&cur_block, &updated_total_capacity, mem_requirements,
|
||||
occ_it, maintenance3_properties.maxMemoryAllocationSize);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, occ_it);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < requests->alloc_image.len; i++) {
|
||||
@ -959,12 +973,15 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator__keep_building_up_cur_block(self, &cur_block_size_needed,
|
||||
&cur_block, &updated_total_capacity, mem_requirements,
|
||||
occ_it, maintenance3_properties.maxMemoryAllocationSize);
|
||||
MargaretMemAllocator__bind_image_memory(self, occ_it);
|
||||
}
|
||||
|
||||
/* We move blocks here, but not because some request asked, no, we migrate all unmoved blocks from */
|
||||
for (ListNodeMargaretMemAllocatorOneBlock* block_it = self->old_blocks.first; block_it; block_it = block_it->next) {
|
||||
RBTree_MapU64ToMargaretMAOccupation* OLD_TREE = &block_it->el.occupied_memory;
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = RBTree_MapU64ToMargaretMAOccupation_find_min(OLD_TREE);
|
||||
if (occ_it)
|
||||
demands |= MARGARET_MA_DEMANDS_CMD_BUFFER_BIT;
|
||||
while (occ_it) {
|
||||
if (occ_it->value.me.variant == MargaretMemoryOccupation_Buffer && occ_it->value.me.buf.moved_already) {
|
||||
occ_it = RBTree_MapU64ToMargaretMAOccupation_find_next(OLD_TREE, occ_it);
|
||||
@ -992,9 +1009,10 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator__keep_building_up_cur_block(self, &cur_block_size_needed,
|
||||
&cur_block, &updated_total_capacity, mem_requirements,
|
||||
occ_it, maintenance3_properties.maxMemoryAllocationSize);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, occ_it);
|
||||
|
||||
if (occ_it->value.me.buf.preserve_at_quiet) {
|
||||
vkCmdCopyBuffer(cmd_buff, replacer->value.me.buf.buffer, occ_it->value.me.buf.buffer, 1, &(VkBufferCopy){
|
||||
vkCmdCopyBuffer(self->command_buffer, replacer->value.me.buf.buffer, occ_it->value.me.buf.buffer, 1, &(VkBufferCopy){
|
||||
.srcOffset = 0, .dstOffset = 0, .size = replacer->value.me.buf.capacity});
|
||||
}
|
||||
} else {
|
||||
@ -1021,6 +1039,7 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
MargaretMemAllocator__keep_building_up_cur_block(self, &cur_block_size_needed,
|
||||
&cur_block, &updated_total_capacity, mem_requirements,
|
||||
occ_it, maintenance3_properties.maxMemoryAllocationSize);
|
||||
MargaretMemAllocator__bind_image_memory(self, occ_it);
|
||||
|
||||
if (occ_it->value.me.img.preserve_at_quiet) {
|
||||
VkImageMemoryBarrier first_barriers[2] = {
|
||||
@ -1053,10 +1072,10 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
},
|
||||
}
|
||||
};
|
||||
vkCmdPipelineBarrier(cmd_buff, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
vkCmdPipelineBarrier(self->command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
0 /* Flags */, 0, NULL /* not here */, 0, NULL /* not here */,
|
||||
2, first_barriers);
|
||||
vkCmdCopyImage(cmd_buff, replacer->value.me.img.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
||||
vkCmdCopyImage(self->command_buffer, replacer->value.me.img.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
||||
fresh_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &(VkImageCopy){
|
||||
.srcSubresource = (VkImageSubresourceLayers){
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0,
|
||||
@ -1087,7 +1106,7 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
|
||||
},
|
||||
};
|
||||
vkCmdPipelineBarrier(cmd_buff,
|
||||
vkCmdPipelineBarrier(self->command_buffer,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT, occ_it->value.me.img.current_dest_stage_mask,
|
||||
0 /* Flags*/, 0, NULL, 0, NULL, 1, &finishing_barrier);
|
||||
}
|
||||
@ -1114,10 +1133,12 @@ void MargaretMemAllocator_request_needs_defragmentation(
|
||||
}
|
||||
self->total_capacity = updated_total_capacity;
|
||||
MargaretMemAllocatorRequests_sink(requests);
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
return demands | MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT;
|
||||
}
|
||||
|
||||
MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemAllocator* self, VkCommandBuffer cmd_buff, MargaretMemAllocatorRequests* requests
|
||||
MargaretMemAllocator* self, MargaretMemAllocatorRequests* requests
|
||||
){
|
||||
MargaretMemAllocator_wipe_old(self);
|
||||
for (size_t i = 0; i < requests->free_buf.len; i++) {
|
||||
@ -1133,6 +1154,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
for (size_t i = 0; i < requests->shrink_buf.len; i++) {
|
||||
MargaretMemAllocatorRequestResizeBuffer req = (requests->shrink_buf.buf[i]);
|
||||
MargaretMemAllocator__shrink_some_buffer(self, req.occ_it, req.new_size);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, req.occ_it);
|
||||
}
|
||||
|
||||
VecMargaretMABufferExpansionRecord buffer_expansion_record = VecMargaretMABufferExpansionRecord_new();
|
||||
@ -1182,10 +1204,11 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
buf->buffer = temp_buf_extension;
|
||||
occ_it->value.taken_size = temp_buf_extension_req.size;
|
||||
VecMargaretMemAllocatorRequestResizeBuffer_unordered_pop(&requests->expand_buf, rr);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, occ_it);
|
||||
}
|
||||
|
||||
check(vkResetCommandBuffer(cmd_buff, 0) == VK_SUCCESS);
|
||||
check(vkBeginCommandBuffer(cmd_buff, &(VkCommandBufferBeginInfo){
|
||||
check(vkResetCommandBuffer(self->command_buffer, 0) == VK_SUCCESS);
|
||||
check(vkBeginCommandBuffer(self->command_buffer, &(VkCommandBufferBeginInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO
|
||||
}) == VK_SUCCESS);
|
||||
MargaretMemAllocatorDemands demands = 0;
|
||||
@ -1212,9 +1235,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
|
||||
if (free_gap.variant == Option_None) {
|
||||
vkDestroyBuffer(self->device, bigger_buffer, NULL);
|
||||
MargaretMemAllocator_request_needs_defragmentation(self, cmd_buff, requests, buffer_expansion_record, 0, 0);
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
return MARGARET_MA_DEMANDS_DEFRAGMENTATION;
|
||||
return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
|
||||
}
|
||||
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* replacer = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
|
||||
@ -1227,12 +1248,13 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
occ_it->value.me.buf.capacity = larger_size;
|
||||
|
||||
MargaretMemAllocator__add_occupant_node_given_gap_any_type(self, occ_it, free_gap.some, mem_requirements.size, alignment_exp);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, occ_it);
|
||||
|
||||
VecMargaretMANewMovedBufRecord_append(&self->old_moved_buffers,
|
||||
(MargaretMANewMovedBufRecord){.replacement = replacer, .my_occ_it = occ_it});
|
||||
if (replacer->value.me.buf.preserve_at_quiet) {
|
||||
demands = MARGARET_MA_DEMANDS_CMD_BUFFER_BIT;
|
||||
vkCmdCopyBuffer(cmd_buff, replacer->value.me.buf.buffer, bigger_buffer,
|
||||
vkCmdCopyBuffer(self->command_buffer, replacer->value.me.buf.buffer, bigger_buffer,
|
||||
1, &(VkBufferCopy){0, 0, replacer->value.me.buf.capacity});
|
||||
}
|
||||
}
|
||||
@ -1255,9 +1277,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
|
||||
if (free_gap.variant == Option_None) {
|
||||
vkDestroyBuffer(self->device, fresh_buf, NULL);
|
||||
MargaretMemAllocator_request_needs_defragmentation(self, cmd_buff, requests, buffer_expansion_record, 0, 0);
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
return MARGARET_MA_DEMANDS_DEFRAGMENTATION;
|
||||
return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
|
||||
}
|
||||
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
|
||||
@ -1266,6 +1286,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
.usage_flags = req->usage
|
||||
}};
|
||||
MargaretMemAllocator__add_occupant_node_given_gap_any_type(self, new_node, free_gap.some, mem_requirements.size, alignment_exp);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, new_node);
|
||||
}
|
||||
|
||||
for (U64 ri = 0; ri < requests->alloc_image.len; ri++) {
|
||||
@ -1294,9 +1315,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
|
||||
if (free_gap.variant == Option_None) {
|
||||
vkDestroyImage(self->device, fresh_img, NULL);
|
||||
MargaretMemAllocator_request_needs_defragmentation(self, cmd_buff, requests, buffer_expansion_record, 0, 0);
|
||||
assert(self->old_moved_buffers.len == 0);
|
||||
return MARGARET_MA_DEMANDS_DEFRAGMENTATION;
|
||||
return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
|
||||
}
|
||||
|
||||
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
|
||||
@ -1307,6 +1326,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
.current_dest_access_mask = req->current_dest_access_mask, .preserve_at_quiet = req->preserve_at_quiet
|
||||
}};
|
||||
MargaretMemAllocator__add_occupant_node_given_gap_any_type(self, new_node, free_gap.some, mem_requirements.size, alignment_exp);
|
||||
MargaretMemAllocator__bind_buffer_memory(self, new_node);
|
||||
}
|
||||
|
||||
MargaretMemAllocatorRequests_sink(requests);
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
#include "../../../gen/l1/vulkan/VecVkPhysicalDevice.h"
|
||||
#include "../../../gen/l1/vulkan/SpanVkFormat.h"
|
||||
#include "../../../gen/l1/vulkan/OptionVkFormat.h"
|
||||
#include "../../../gen/l1/vulkan/VecVkDescriptorPoolSize.h"
|
||||
#include "../../../gen/l1/vulkan/VecVkQueueFamilyProperties.h"
|
||||
#include "../../../gen/l1/vulkan/OptionVkCompositeAlphaFlagBitsKHR.h"
|
||||
#include "../../../gen/l1/vulkan/VecVkPresentModeKHR.h"
|
||||
@ -936,6 +935,7 @@ typedef struct {
|
||||
|
||||
typedef MargaretImageInMemoryInfo* PtrMargaretImageInMemoryInfo;
|
||||
|
||||
// todo: remove all this useless crap for sissies
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretBufferInMemoryInfo.h"
|
||||
#include "../../../gen/l1/eve/margaret/VecAndSpan_PtrMargaretBufferInMemoryInfo.h"
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretImageInMemoryInfo.h"
|
||||
@ -1025,6 +1025,7 @@ VkDeviceMemory margaret_initialize_buffers_and_images(
|
||||
}
|
||||
|
||||
|
||||
// todo: bleh, delete this fucking bullshit
|
||||
#define margaret_prep_buffer_mem_info_of_gpu_vbo_Definition(TV) \
|
||||
MargaretBufferInMemoryInfo TV##_buffer_crinfo_of_gpu_vbo(size_t n) { \
|
||||
return (MargaretBufferInMemoryInfo){ \
|
||||
@ -1033,89 +1034,60 @@ MargaretBufferInMemoryInfo TV##_buffer_crinfo_of_gpu_vbo(size_t n) { \
|
||||
}; \
|
||||
}
|
||||
|
||||
// todo: delete this fucking crap
|
||||
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ebo(size_t n) {
|
||||
return (MargaretBufferInMemoryInfo){ .sz = sizeof(uint32_t) * n,
|
||||
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT };
|
||||
}
|
||||
|
||||
// Not very useful (but I used it anyway)
|
||||
|
||||
// todo: wow 'Not very useful (but I used it anyway)' actually meant something
|
||||
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_small_local_ubo(size_t struct_sz) {
|
||||
return (MargaretBufferInMemoryInfo){ .sz = struct_sz, .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT };
|
||||
}
|
||||
|
||||
// todo: get rid of this disgusting crap
|
||||
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_srgba(uint32_t w, uint32_t h) {
|
||||
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_SRGB,
|
||||
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
|
||||
}
|
||||
|
||||
// todo: remove fucking bullshit
|
||||
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_8(uint32_t w, uint32_t h){
|
||||
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8_UNORM,
|
||||
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
|
||||
}
|
||||
|
||||
// todo: delete this crap
|
||||
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_32(uint32_t w, uint32_t h) {
|
||||
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_UNORM,
|
||||
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
|
||||
}
|
||||
|
||||
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_zbuffer(uint32_t max_width, uint32_t max_height, VkFormat zbuf_format) {
|
||||
// todo: aDKLFJHFKLHJKDSFljdfj DLETE THIS GHIJHITU
|
||||
MargaretImageInMemoryInfo
|
||||
margaret_prep_image_mem_info_of_zbuffer(uint32_t max_width, uint32_t max_height, VkFormat zbuf_format) {
|
||||
return (MargaretImageInMemoryInfo){ .width = max_width, .height = max_height, .format = zbuf_format,
|
||||
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT };
|
||||
}
|
||||
|
||||
/* Used both for sampling and as a color attachment */
|
||||
/* todo: deltete dljj klfjks */
|
||||
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_colorbuffer(U32 width, U32 height, VkFormat format) {
|
||||
return (MargaretImageInMemoryInfo){.width = width, .height = height, .format = format,
|
||||
.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT};
|
||||
}
|
||||
|
||||
// toodk}:; l;kjd; dletlel
|
||||
// todo: deellte kfuckijgn ojsos
|
||||
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ubo(size_t struct_sz) {
|
||||
return (MargaretBufferInMemoryInfo){ .sz = struct_sz,
|
||||
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT };
|
||||
}
|
||||
|
||||
// Crutch for vulkan
|
||||
VkCommandBuffer margaret_alloc_and_begin_single_use_command_buffer(VkDevice device, VkCommandPool command_pool) {
|
||||
VkCommandBuffer command_buffers[1];
|
||||
VkCommandBufferAllocateInfo alloc_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
|
||||
.commandPool = command_pool,
|
||||
.commandBufferCount = ARRAY_SIZE(command_buffers),
|
||||
};
|
||||
|
||||
if (vkAllocateCommandBuffers(device, &alloc_info, command_buffers) != VK_SUCCESS)
|
||||
abortf("vkAllocateCommandBuffers");
|
||||
VkCommandBuffer copying_command_buffer = command_buffers[0];
|
||||
VkCommandBufferBeginInfo beginfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||
};
|
||||
|
||||
if (vkBeginCommandBuffer(copying_command_buffer, &beginfo) != VK_SUCCESS)
|
||||
abortf("vkBeginCommandBuffer");
|
||||
return command_buffers[0];
|
||||
}
|
||||
|
||||
void margaret_end_and_submit_and_free_command_buffer(
|
||||
VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue,
|
||||
VkCommandBuffer cmd_buffer
|
||||
) {
|
||||
if (vkEndCommandBuffer(cmd_buffer) != VK_SUCCESS)
|
||||
abortf("vkEndCommandBuffer");
|
||||
|
||||
VkSubmitInfo submits_info[1] = {(VkSubmitInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = &cmd_buffer,
|
||||
}};
|
||||
if (vkQueueSubmit(graphics_queue, ARRAY_SIZE(submits_info), submits_info, VK_NULL_HANDLE) != VK_SUCCESS)
|
||||
abortf("vkQueueSubmit");
|
||||
if (vkQueueWaitIdle(graphics_queue) != VK_SUCCESS)
|
||||
abortf("vkQueueWaitIdle");
|
||||
vkFreeCommandBuffers(device, command_pool, 1, &cmd_buffer);
|
||||
}
|
||||
|
||||
// todo: rename this shit apropriately
|
||||
typedef struct {
|
||||
size_t host_mem_buff_offset;
|
||||
const MargaretImageInMemoryInfo* dst_image;
|
||||
@ -1219,24 +1191,19 @@ void margaret_rerecord_cmd_buff_for_texture_init (
|
||||
|
||||
// For texture
|
||||
VkImageView margaret_create_view_for_image (
|
||||
VkDevice device, const MargaretImageInMemoryInfo* image, VkImageAspectFlags aspect_flags
|
||||
) {
|
||||
VkImageViewCreateInfo crinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||
.image = image->image,
|
||||
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
||||
.format = image->format,
|
||||
.subresourceRange = (VkImageSubresourceRange){
|
||||
.aspectMask = aspect_flags,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = 1,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
};
|
||||
VkDevice device, VkImage image, VkFormat format, VkImageAspectFlags aspect_flags
|
||||
){
|
||||
VkImageView view;
|
||||
if (vkCreateImageView(device, &crinfo, NULL, &view) != VK_SUCCESS)
|
||||
abortf("vkCreateImageView");
|
||||
check(vkCreateImageView(device, &(VkImageViewCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
||||
.image = image,
|
||||
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
||||
.format = format,
|
||||
.subresourceRange = (VkImageSubresourceRange){
|
||||
.aspectMask = aspect_flags, .baseMipLevel = 0, .levelCount = 1,
|
||||
.baseArrayLayer = 0, .layerCount = 1,
|
||||
},
|
||||
}, NULL, &view) == VK_SUCCESS)
|
||||
return view;
|
||||
}
|
||||
|
||||
@ -1246,11 +1213,12 @@ VkSampler margaret_create_sampler(VkPhysicalDevice physical_device, VkDevice dev
|
||||
vkGetPhysicalDeviceProperties(physical_device, &physical_device_properties);
|
||||
VkPhysicalDeviceFeatures physical_device_features;
|
||||
vkGetPhysicalDeviceFeatures(physical_device, &physical_device_features);
|
||||
VkSamplerCreateInfo crinfo = {
|
||||
VkSampler sampler;
|
||||
check(vkCreateSampler(device, &(VkSamplerCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
|
||||
.magFilter = make_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST,
|
||||
.minFilter = make_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST,
|
||||
.mipmapMode = make_linear? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST,
|
||||
.mipmapMode = make_linear ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST,
|
||||
.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
|
||||
.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
|
||||
.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
|
||||
@ -1264,37 +1232,37 @@ VkSampler margaret_create_sampler(VkPhysicalDevice physical_device, VkDevice dev
|
||||
.maxLod = 0.f,
|
||||
.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK,
|
||||
.unnormalizedCoordinates = VK_FALSE,
|
||||
};
|
||||
VkSampler sampler;
|
||||
if (vkCreateSampler(device, &crinfo, NULL, &sampler) != VK_SUCCESS)
|
||||
abortf("vkCreateSampler");
|
||||
}, NULL, &sampler) == VK_SUCCESS);
|
||||
|
||||
return sampler;
|
||||
}
|
||||
|
||||
VkDescriptorPool margaret_create_descriptor_set_pool(VkDevice device,
|
||||
uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t max_sets
|
||||
VkDescriptorPool margaret_create_descriptor_set_pool(
|
||||
VkDevice device, uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t max_sets
|
||||
) {
|
||||
VecVkDescriptorPoolSize sizes = VecVkDescriptorPoolSize_new_reserved(2);
|
||||
if (ubo_descriptor_count > 0)
|
||||
VecVkDescriptorPoolSize_append(&sizes, (VkDescriptorPoolSize){
|
||||
VkDescriptorPoolSize sizes[2];
|
||||
int sizes_c = 0;
|
||||
if (ubo_descriptor_count > 0) {
|
||||
sizes[sizes_c] = (VkDescriptorPoolSize){
|
||||
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
||||
.descriptorCount = ubo_descriptor_count
|
||||
});
|
||||
if (image_sampler_descriptor_count > 0)
|
||||
VecVkDescriptorPoolSize_append(&sizes, (VkDescriptorPoolSize){
|
||||
};
|
||||
sizes_c++;
|
||||
}
|
||||
if (image_sampler_descriptor_count > 0) {
|
||||
sizes[sizes_c] = (VkDescriptorPoolSize){
|
||||
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
||||
.descriptorCount = image_sampler_descriptor_count
|
||||
});
|
||||
VkDescriptorPoolCreateInfo crinfo = {
|
||||
};
|
||||
sizes_c++;
|
||||
}
|
||||
VkDescriptorPool descriptor_pool;
|
||||
check(vkCreateDescriptorPool(device, &(VkDescriptorPoolCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
|
||||
.maxSets = max_sets,
|
||||
.poolSizeCount = sizes.len,
|
||||
.pPoolSizes = sizes.buf,
|
||||
};
|
||||
VkDescriptorPool descriptor_pool;
|
||||
if (vkCreateDescriptorPool(device, &crinfo, NULL, &descriptor_pool) != VK_SUCCESS)
|
||||
abortf("vkCreateDescriptorPool");
|
||||
VecVkDescriptorPoolSize_drop(sizes);
|
||||
.poolSizeCount = sizes_c,
|
||||
.pPoolSizes = sizes,
|
||||
}, NULL, &descriptor_pool) == VK_SUCCESS)
|
||||
return descriptor_pool;
|
||||
}
|
||||
|
||||
|
||||
@ -1010,13 +1010,21 @@ typedef struct {
|
||||
PipelineHands pipeline_hands_1;
|
||||
VkSampler linear_sampler;
|
||||
VkSampler nearest_sampler;
|
||||
|
||||
VkCommandPool command_pool;
|
||||
VkCommandBuffer rendering_command_buf_0;
|
||||
VkCommandBuffer rendering_command_buf_1;
|
||||
VkCommandBuffer transfer_command_buf;
|
||||
VkCommandBuffer device_local_mem_mv_command_buf;
|
||||
// todo: use MargaretBufAllocator for staging buffers.
|
||||
// todo: but first, write the damn thing
|
||||
VkCommandBuffer host_visible_mem_mv_command_buf; /* This is just pure blasphemy: todo: remove it nahyu */
|
||||
VkDescriptorPool descriptor_pool; // todo: write dynamic allocator wrapper for descriptor pools
|
||||
|
||||
SceneTemplate scene_template;
|
||||
|
||||
MargaretMemAllocator host_visible_coherent_mem;
|
||||
MargaretMemAllocator device_local_mem;
|
||||
Jane_r0 jane; // todo: figure out my own design
|
||||
MargaretSwapchainBundle swfb;
|
||||
|
||||
SceneTemplate scene_template;
|
||||
|
||||
Scene scene;
|
||||
|
||||
@ -1032,9 +1040,6 @@ typedef struct {
|
||||
VkDescriptorSet descriptor_set_for_pipeline_1;
|
||||
// Descriptor sets for pipeline_0a are stored in generic_model_tex_vulk_pointers
|
||||
|
||||
Jane_r0 jane; // todo: figure out my own design
|
||||
MargaretSwapchainBundle swfb;
|
||||
|
||||
bool dt_transfer_required;
|
||||
} vulkan_ctx_r0;
|
||||
|
||||
@ -1141,28 +1146,24 @@ void update_state(state_r0* state, uint32_t dur) {
|
||||
}
|
||||
}
|
||||
|
||||
void vulkan_frame_drawing(state_r0* state) {
|
||||
and_again:
|
||||
vkWaitForFences(state->vk.device, 1, &state->vk.jane.in_flight_fence, VK_TRUE, UINT64_MAX);
|
||||
void vulkano_frame_drawing(state_r0* state) {
|
||||
check(vkWaitForFences(state->vk.device, 1, &state->vk.jane.in_flight_fence, VK_TRUE, UINT64_MAX) == VK_SUCCESS);
|
||||
check(vkResetFences(state->vk.device, 1, &state->vk.jane.in_flight_fence) == VK_SUCCESS);
|
||||
uint32_t ij;
|
||||
VkResult aq_ret = vkAcquireNextImageKHR(
|
||||
VkResult aq_ret;
|
||||
and_again:
|
||||
aq_ret = vkAcquireNextImageKHR(
|
||||
state->vk.device, state->vk.swfb.swapchain,
|
||||
UINT64_MAX, state->vk.jane.image_available_semaphore, VK_NULL_HANDLE, &ij
|
||||
);
|
||||
if (aq_ret == VK_ERROR_OUT_OF_DATE_KHR) {
|
||||
fprintf(stderr, "vkAcquireNextImageKHR: VK_ERROR_OUT_OF_DATE_KHR\n");
|
||||
recreate_swapchain(state);
|
||||
goto and_again;
|
||||
} else if (aq_ret == VK_SUBOPTIMAL_KHR) {
|
||||
fprintf(stderr, "vkAcquireNextImageKHR: VK_SUBOPTIMAL_KHR\n");
|
||||
if (aq_ret == VK_ERROR_OUT_OF_DATE_KHR || aq_ret == VK_SUBOPTIMAL_KHR) {
|
||||
fprintf(stderr, "vkAcquireNextImageKHR in { VK_ERROR_OUT_OF_DATE_KHR, VK_SUBOPTIMAL_KHR }\n");
|
||||
recreate_swapchain(state);
|
||||
goto and_again;
|
||||
} else if (aq_ret != VK_SUCCESS) {
|
||||
abortf("vkAcquireNextImageKHR");
|
||||
}
|
||||
|
||||
vkResetFences(state->vk.device, 1, &state->vk.jane.in_flight_fence);
|
||||
|
||||
state->vk.scene.color = (VkClearColorValue){{0, 0.5f, 0.9f, 1}};
|
||||
mat4 projection_matrix = marie_perspective_projection_fov_mat4(
|
||||
(float)state->width_confirmed, (float)state->height_confirmed,
|
||||
@ -1172,137 +1173,93 @@ void vulkan_frame_drawing(state_r0* state) {
|
||||
mat4 t_mat = mat4_mul_mat4(projection_matrix, mat4_mul_mat4(camera_rotation_matrix, camera_translation_matrix));
|
||||
|
||||
if (state->vk.dt_transfer_required){
|
||||
assert(state->vk.scene.spotlights.len < pipeline_0_ubo_spotlight_max_count);
|
||||
assert(state->vk.scene.point_lights.len < pipeline_0_ubo_point_light_max_count);
|
||||
copy_scene_info_to_buffer_and_rerecord_full_copy_command_buffer(
|
||||
state->vk.transfer_command_buffer, state->vk.host_mem_buffer.buffer,
|
||||
state->vk.host_mem_buffer_mem, &state->vk.scene, state->vk.device_lighting_ubo.buffer,
|
||||
state->vk.device_instance_attrs_for_models.buffer);
|
||||
VkCommandBuffer command_buffers[1] = { state->vk.transfer_command_buffer };
|
||||
VkSemaphore signaling_semaphores[1] = { state->vk.jane.in_frame_transfer_complete };
|
||||
VkSubmitInfo submit_info = {
|
||||
record_copying_entire_scene_from_staging_to_device_local(state->vk.transfer_command_buf, &state->vk.scene);
|
||||
check(vkQueueSubmit(state->vk.queues.graphics_queue, 1, &(VkSubmitInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = ARRAY_SIZE(command_buffers),
|
||||
.pCommandBuffers = command_buffers,
|
||||
.signalSemaphoreCount = ARRAY_SIZE(signaling_semaphores),
|
||||
.pSignalSemaphores = signaling_semaphores,
|
||||
};
|
||||
if (vkQueueSubmit(state->vk.queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
|
||||
abortf("vkQueueSubmit\n");
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = (VkCommandBuffer[]){ state->vk.transfer_command_buf },
|
||||
.signalSemaphoreCount = 1,
|
||||
.pSignalSemaphores = (VkSemaphore[]){ state->vk.jane.in_frame_transfer_complete },
|
||||
// todo: add waiting for device_local_movement command buffer
|
||||
// todo: but first: write a use case for it
|
||||
}, NULL) == VK_SUCCESS);
|
||||
}
|
||||
|
||||
reset_and_record_command_buffer_0(
|
||||
state->vk.rendering_command_buffer_0, state->vk.render_pass_0,
|
||||
state->vk.rendering_command_buf_0, state->vk.render_pass_0,
|
||||
&state->vk.pipeline_hands_0a, &state->vk.pipeline_hands_0b,
|
||||
state->vk.IT1_framebuffer, state->vk.swfb.extent,
|
||||
&state->vk.scene,
|
||||
&state->vk.device_generic_models_top_and_tex, /* Needed just to get descriptor sets for generic models */
|
||||
&state->vk.generic_model_tex_vulk_pointers, /* Needed just to get descriptor sets for generic models */
|
||||
state->vk.descriptor_set_for_pipeline_0b,
|
||||
t_mat, state->vk.scene.cam.pos);
|
||||
|
||||
reset_and_record_command_buffer_1(state->vk.rendering_command_buffer_1, state->vk.render_pass_1,
|
||||
reset_and_record_command_buffer_1(state->vk.rendering_command_buf_1, state->vk.render_pass_1,
|
||||
&state->vk.pipeline_hands_1,
|
||||
*VecVkFramebuffer_at(&state->vk.swfb.framebuffers, ij),
|
||||
state->vk.swfb.extent,
|
||||
state->sane_image_extent_limit, &state->vk.scene, state->vk.descriptor_set_for_pipeline_1);
|
||||
|
||||
{
|
||||
VkSemaphore waiting_for_semaphores_if_dt_transfer_required[1] = {
|
||||
state->vk.jane.in_frame_transfer_complete
|
||||
};
|
||||
VkPipelineStageFlags waiting_stages_if_dt_transfer_required[1] = {
|
||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
|
||||
};
|
||||
assert(ARRAY_SIZE(waiting_for_semaphores_if_dt_transfer_required) ==
|
||||
ARRAY_SIZE(waiting_stages_if_dt_transfer_required));
|
||||
VkCommandBuffer command_buffers[1] = {state->vk.rendering_command_buffer_0};
|
||||
VkSemaphore signaling_semaphores[1] = { state->vk.jane.rendered_to_IT1_semaphore };
|
||||
VkSubmitInfo submit_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
VkSemaphore waiting_for_semaphores_if_dt_transfer_required[1] = {
|
||||
state->vk.jane.in_frame_transfer_complete
|
||||
};
|
||||
VkPipelineStageFlags waiting_stages_if_dt_transfer_required[1] = {
|
||||
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
|
||||
};
|
||||
check(vkQueueSubmit(state->vk.queues.graphics_queue, 1, &(VkSubmitInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
|
||||
// We wait for `waiting_for_semaphores` before THESE stages
|
||||
// waitSemaphoreCount specifies size for both pWaitSemaphores and pWaitDstStageMask
|
||||
.waitSemaphoreCount = state->vk.dt_transfer_required ?
|
||||
ARRAY_SIZE(waiting_for_semaphores_if_dt_transfer_required) : 0,
|
||||
.pWaitSemaphores = state->vk.dt_transfer_required ?
|
||||
waiting_for_semaphores_if_dt_transfer_required : NULL,
|
||||
.pWaitDstStageMask = state->vk.dt_transfer_required ?
|
||||
waiting_stages_if_dt_transfer_required : NULL,
|
||||
// We wait for `waiting_for_semaphores` before THESE stages
|
||||
// waitSemaphoreCount specifies size for both pWaitSemaphores and pWaitDstStageMask
|
||||
.waitSemaphoreCount = state->vk.dt_transfer_required ?
|
||||
ARRAY_SIZE(waiting_for_semaphores_if_dt_transfer_required) : 0,
|
||||
.pWaitSemaphores = state->vk.dt_transfer_required ?
|
||||
waiting_for_semaphores_if_dt_transfer_required : NULL,
|
||||
.pWaitDstStageMask = state->vk.dt_transfer_required ?
|
||||
waiting_stages_if_dt_transfer_required : NULL,
|
||||
|
||||
.commandBufferCount = ARRAY_SIZE(command_buffers),
|
||||
.pCommandBuffers = command_buffers,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = (VkCommandBuffer[]){ state->vk.rendering_command_buf_0 },
|
||||
|
||||
.signalSemaphoreCount = ARRAY_SIZE(signaling_semaphores),
|
||||
.pSignalSemaphores = signaling_semaphores,
|
||||
};
|
||||
if (vkQueueSubmit(state->vk.queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
|
||||
abortf("vkQueueSubmit");
|
||||
}
|
||||
{
|
||||
VkSemaphore waiting_for_semaphores[2] = {
|
||||
state->vk.jane.image_available_semaphore,
|
||||
state->vk.jane.rendered_to_IT1_semaphore };
|
||||
VkPipelineStageFlags waiting_stages[2] = {
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
// VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
|
||||
};
|
||||
assert(ARRAY_SIZE(waiting_for_semaphores) == ARRAY_SIZE(waiting_stages));
|
||||
VkCommandBuffer command_buffers[1] = { state->vk.rendering_command_buffer_1 };
|
||||
VkSemaphore signaling_semaphores[1] = {
|
||||
*VecVkSemaphore_at(&state->vk.swfb.rendering_finished_here_semaphores, ij)
|
||||
};
|
||||
VkSubmitInfo cmd_submit_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.signalSemaphoreCount = 1,
|
||||
.pSignalSemaphores = (VkSemaphore[]){ state->vk.jane.rendered_to_IT1_semaphore },
|
||||
}, NULL) == VK_SUCCESS);
|
||||
|
||||
.waitSemaphoreCount = ARRAY_SIZE(waiting_for_semaphores),
|
||||
.pWaitSemaphores = waiting_for_semaphores,
|
||||
.pWaitDstStageMask = waiting_stages,
|
||||
check(vkQueueSubmit(state->vk.queues.graphics_queue, 1, &(VkSubmitInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.waitSemaphoreCount = 2,
|
||||
.pWaitSemaphores = (VkSemaphore[]){
|
||||
state->vk.jane.image_available_semaphore, state->vk.jane.rendered_to_IT1_semaphore },
|
||||
.pWaitDstStageMask = (VkPipelineStageFlags[]){
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT },
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = (VkCommandBuffer[]){ state->vk.rendering_command_buf_1 },
|
||||
.signalSemaphoreCount = 1,
|
||||
.pSignalSemaphores = (VkSemaphore[]){
|
||||
*VecVkSemaphore_at(&state->vk.swfb.rendering_finished_here_semaphores, ij) },
|
||||
}, state->vk.jane.in_flight_fence) == VK_SUCCESS);
|
||||
|
||||
.commandBufferCount = ARRAY_SIZE(command_buffers),
|
||||
.pCommandBuffers = command_buffers,
|
||||
|
||||
.signalSemaphoreCount = ARRAY_SIZE(signaling_semaphores),
|
||||
.pSignalSemaphores = signaling_semaphores,
|
||||
};
|
||||
|
||||
if (vkQueueSubmit(state->vk.queues.graphics_queue, 1, &cmd_submit_info, state->vk.jane.in_flight_fence) != VK_SUCCESS)
|
||||
abortf("vkQueueSubmit");
|
||||
VkPresentInfoKHR present_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||
.waitSemaphoreCount = 1,
|
||||
.pWaitSemaphores = (VkSemaphore[]){
|
||||
*VecVkSemaphore_at(&state->vk.swfb.rendering_finished_here_semaphores, ij) },
|
||||
.swapchainCount = 1,
|
||||
.pSwapchains = (VkSwapchainKHR[]){ state->vk.swfb.swapchain },
|
||||
.pImageIndices = (U32[]){ ij },
|
||||
.pResults = NULL,
|
||||
};
|
||||
VkResult present_ret = vkQueuePresentKHR(state->vk.queues.presentation_queue, &present_info);
|
||||
if (present_ret == VK_ERROR_OUT_OF_DATE_KHR || present_ret == VK_SUBOPTIMAL_KHR) {
|
||||
fprintf(stderr, "vkQueuePresentKHR: VK_ERROR_OUT_OF_DATE_KHR or \n");
|
||||
check(vkWaitForFences(state->vk.device, 1, &state->vk.jane.in_flight_fence, VK_TRUE, UINT64_MAX) == VK_SUCCESS);
|
||||
check(vkResetFences(state->vk.device, 1, &state->vk.jane.in_flight_fence) == VK_SUCCESS);
|
||||
recreate_swapchain(state);
|
||||
goto and_again;
|
||||
} else if (present_ret != VK_SUCCESS) {
|
||||
abortf("vkQueuePresentKHR");
|
||||
}
|
||||
|
||||
{
|
||||
VkSemaphore waiting_for_semaphores[] = {
|
||||
*VecVkSemaphore_at(&state->vk.swfb.rendering_finished_here_semaphores, ij)
|
||||
};
|
||||
VkSwapchainKHR swapchains[] = { state->vk.swfb.swapchain };
|
||||
uint32_t image_indices[] = { ij };
|
||||
assert( ARRAY_SIZE(swapchains) == ARRAY_SIZE(image_indices) );
|
||||
|
||||
VkPresentInfoKHR present_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||
.waitSemaphoreCount = ARRAY_SIZE(waiting_for_semaphores),
|
||||
.pWaitSemaphores = waiting_for_semaphores,
|
||||
|
||||
.swapchainCount = ARRAY_SIZE(swapchains),
|
||||
.pSwapchains = swapchains,
|
||||
.pImageIndices = image_indices,
|
||||
.pResults = NULL,
|
||||
};
|
||||
|
||||
VkResult pres_ret = vkQueuePresentKHR(state->vk.queues.presentation_queue, &present_info);
|
||||
// todo: ponder more over this
|
||||
if (pres_ret == VK_ERROR_OUT_OF_DATE_KHR) {
|
||||
fprintf(stderr, "vkQueuePresentKHR: VK_ERROR_OUT_OF_DATE_KHR\n");
|
||||
recreate_swapchain(state);
|
||||
goto and_again;
|
||||
} else if (pres_ret == VK_SUBOPTIMAL_KHR) {
|
||||
fprintf(stderr, "vkQueuePresentKHR: VK_SUBOPTIMAL_KHR\n");
|
||||
recreate_swapchain(state);
|
||||
goto and_again;
|
||||
} else if (pres_ret != VK_SUCCESS) {
|
||||
abortf("vkQueuePresentKHR");
|
||||
}
|
||||
}
|
||||
state->vk.dt_transfer_required = false;
|
||||
margaret_ns_time frame_B0 = margaret_clock_gettime_monotonic_raw();
|
||||
state->frame_count_since_key++;
|
||||
@ -1421,7 +1378,11 @@ static void main_h_wl_keyboard_key(
|
||||
if (key_action == WL_KEYBOARD_KEY_STATE_RELEASED) {
|
||||
if (keysym == XKB_KEY_1) {
|
||||
vec3 p = state->vk.scene.cam.pos;
|
||||
VecPipeline0PointLight_mat(&state->vk.scene.point_lights, 0)->pos = p;
|
||||
ShinyModelOnSceneMem* model = VecShinyModelOnSceneMem_mat(&state->vk.scene.shiny_models, 0);
|
||||
assert(model->instance_vec_len == 1);
|
||||
ShinyMeshInstance* instances = (ShinyMeshInstance*)MargaretMAIterator_get_mapped(model->instance_attr_buf);
|
||||
instances[0].model_t = marie_translation_mat4(p);
|
||||
state->vk.dt_transfer_required = true;
|
||||
printf("Point light source pos set to %f %f %f\n", p.x, p.y, p.z);
|
||||
state->vk.dt_transfer_required = true;
|
||||
} else if (keysym == XKB_KEY_2) {
|
||||
@ -1470,7 +1431,7 @@ static void main_h_wl_pointer_leave(
|
||||
}
|
||||
|
||||
static void main_h_wl_pointer_motion(
|
||||
void *data,struct wl_pointer *wl_pointer, uint32_t time, wl_fixed_t surface_x, wl_fixed_t surface_y
|
||||
void *data, struct wl_pointer *wl_pointer, uint32_t time, wl_fixed_t surface_x, wl_fixed_t surface_y
|
||||
) {
|
||||
state_r0 *state = data;
|
||||
CamControlInfo_update_direction(&state->vk.scene.cam,
|
||||
@ -1583,8 +1544,6 @@ static const struct wl_callback_listener main_h_wl_surface_frame_listener = {
|
||||
.done = main_h_wl_surface_frame_done,
|
||||
};
|
||||
|
||||
|
||||
|
||||
void compile_shader_dir(SpanU8 name) {
|
||||
mkdir_nofail("shaders/spv");
|
||||
VecU8 spv_shader_dir_name = VecU8_fmt("shaders/spv/%s%c", name, 0);
|
||||
@ -1701,141 +1660,67 @@ int main() {
|
||||
vk->render_pass_1 = create_render_pass_1(vk->device, swapchain_details_res.ok.surface_format.format);
|
||||
vk->pipeline_hands_1 = create_graphics_pipeline_1(vk->device, vk->render_pass_1, 0);
|
||||
|
||||
// These samplers are global for a lot of my future textures
|
||||
vk->linear_sampler = margaret_create_sampler(vk->physical_device, vk->device, true);
|
||||
vk->nearest_sampler = margaret_create_sampler(vk->physical_device, vk->device, false);
|
||||
|
||||
vk->command_pool = margaret_create_resettable_command_pool(vk->device, vk->queue_fam.for_graphics);
|
||||
vk->rendering_command_buf_0 = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
vk->rendering_command_buf_1 = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
vk->transfer_command_buf = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
vk->device_local_mem_mv_command_buf = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
// todo: git rid of this line. Rn it won't even be used, like, at all... I won't actually use it in my test, yes, I am not a psyco
|
||||
vk->host_visible_mem_mv_command_buf = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
|
||||
// todo: write a descriptor set allocator (in Margaret) that manages synamic descript or pool allocatrinonsasdasdasd
|
||||
vk->descriptor_pool = margaret_create_descriptor_set_pool(vk->device, 100 ,100, 100);
|
||||
|
||||
/* Here we search physical device memory types for the one with host-visible flag and the other with device-local flag */
|
||||
VkPhysicalDeviceMemoryProperties mem_properties;
|
||||
vkGetPhysicalDeviceMemoryProperties(vk->physical_device, &mem_properties);
|
||||
const VkMemoryPropertyFlags host_visible_coherent = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
const VkMemoryPropertyFlags device_local = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
assert(mem_properties.memoryTypeCount < 32);
|
||||
int ham_sandwich = 0;
|
||||
for (U32 i = 0; i < mem_properties.memoryTypeCount; i++) {
|
||||
if ((mem_properties.memoryTypes[i].propertyFlags & host_visible_coherent) == host_visible_coherent) {
|
||||
// todo: replace ths MargaretMemAllocator with MargaretBufferAllocator
|
||||
vk->host_visible_coherent_mem = MargaretMemAllocator_new(vk->device, vk->physical_device,
|
||||
vk->host_visible_mem_mv_command_buf, host_visible_coherent, i);
|
||||
}
|
||||
if ((mem_properties.memoryTypes[i].propertyFlags & device_local) == device_local) {
|
||||
vk->device_local_mem = MargaretMemAllocator_new(vk->device, vk->physical_device,
|
||||
vk->device_local_mem_mv_command_buf, host_visible_coherent, i);
|
||||
}
|
||||
}
|
||||
if (ham_sandwich != 2)
|
||||
abortf("Can't find memory types\n");
|
||||
|
||||
vk->jane = Jane_r0_create(vk->device);
|
||||
/* Luckily, swapchain image allocation is not managed by me */
|
||||
vk->swfb = MargaretSwapchainBundle_new(
|
||||
vk->device, vk->queue_fam, swapchain_details_res.ok,
|
||||
vk->surface, vk->render_pass_1, NULL);
|
||||
|
||||
vk->scene_template = (SceneTemplate){
|
||||
.generic_models = VecGenericMeshInSceneTemplate_new(),
|
||||
.shiny_models = VecShinyMeshInSceneTemplate_new(),
|
||||
.point_lights_max_count = pipeline_0_ubo_point_light_max_count,
|
||||
.spotlights_max_count = pipeline_0_ubo_spotlight_max_count};
|
||||
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
GenericMeshInSceneTemplate_for_log(10, 2, 6, 100));
|
||||
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
GenericMeshInSceneTemplate_for_log(5, 5, 10, 1));
|
||||
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
GenericMeshInSceneTemplate_for_log(1, 10, 4, 2));
|
||||
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
GenericMeshInSceneTemplate_for_log(2, 1, 6, 2));
|
||||
VecShinyMeshInSceneTemplate_append(&vk->scene_template.shiny_models, (ShinyMeshInSceneTemplate){
|
||||
.topology = generate_shiny_rhombicuboctahedron(0.5f), .max_instance_count = 5
|
||||
});
|
||||
VecShinyMeshInSceneTemplate_append(&vk->scene_template.shiny_models, (ShinyMeshInSceneTemplate){
|
||||
.topology = generate_shiny_cube(0.5f), .max_instance_count = 5
|
||||
});
|
||||
|
||||
vk->device_generic_models_top_and_tex = VecGenericModelTopAndTexInMemoryInfo_new_reserved(vk->scene_template.generic_models.len);
|
||||
for (size_t i = 0; i < vk->scene_template.generic_models.len; i++) {
|
||||
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk->scene_template.generic_models, i);
|
||||
TextureDataR8G8B8A8 reading_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path));
|
||||
TextureDataR8G8B8A8 reading_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->normal_texture_path));
|
||||
TextureDataR8 reading_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&M->specular_texture_path));
|
||||
VecGenericModelTopAndTexInMemoryInfo_append(&vk->device_generic_models_top_and_tex,
|
||||
(GenericModelTopAndTexInMemoryInfo){
|
||||
.vbo = GenericMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
|
||||
.ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
|
||||
.reading_diffuse = reading_diffuse, .reading_normal = reading_normal, .reading_specular = reading_specular,
|
||||
.diffuse = margaret_prep_image_mem_info_of_gpu_texture_srgba(reading_diffuse.width, reading_diffuse.height),
|
||||
.normal = margaret_prep_image_mem_info_of_gpu_texture_unorm_32(reading_normal.width, reading_normal.height),
|
||||
.specular = margaret_prep_image_mem_info_of_gpu_texture_unorm_8(reading_specular.width, reading_specular.height),
|
||||
/* image views will be created after the images are allocated */
|
||||
/* descriptor set for each model will be allocated later */
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
VecU64 offset_of_image_in_host_mem_buff_during_init = VecU64_new_zeroinit(vk->device_generic_models_top_and_tex.len * 3);
|
||||
U64 grand_total_texture_size_in_host_mem = 0;
|
||||
{
|
||||
U64 offset = 0;
|
||||
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
|
||||
offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 0] = offset;
|
||||
offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_diffuse);
|
||||
offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 1] = offset;
|
||||
offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_normal);
|
||||
offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 2] = offset;
|
||||
offset += TextureDataR8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_specular);
|
||||
}
|
||||
grand_total_texture_size_in_host_mem = offset;
|
||||
}
|
||||
|
||||
vk->device_shiny_models_top = VecShinyModelTopInMemoryInfo_new_reserved(vk->scene_template.shiny_models.len);
|
||||
for (size_t i = 0; i < vk->scene_template.shiny_models.len; i++) {
|
||||
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk->scene_template.shiny_models, i);
|
||||
VecShinyModelTopInMemoryInfo_append(&vk->device_shiny_models_top,
|
||||
(ShinyModelTopInMemoryInfo){
|
||||
.vbo = ShinyMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
|
||||
.ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
|
||||
});
|
||||
}
|
||||
|
||||
// todo: kill myself (update: still WiP (update: it became a growing technical debt now))
|
||||
// todo: commit suicide
|
||||
|
||||
// We have only one staging buffer in host memory (because we don't really need more)
|
||||
vk->host_mem_buffer = (MargaretBufferInMemoryInfo){ .sz =
|
||||
MAX_U64(SceneTemplate_get_space_for_initial_model_topology_transfer(&vk->scene_template),
|
||||
MAX_U64(SceneTemplate_get_space_needed_for_widest_state_transfer(&vk->scene_template),
|
||||
MAX_U64(grand_total_texture_size_in_host_mem, 0)))
|
||||
, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT };
|
||||
PtrMargaretBufferInMemoryInfo host_mem_buffer_SPAN[1] = {&vk->host_mem_buffer};
|
||||
vk->host_mem = margaret_initialize_buffers_and_images(vk->physical_device, vk->device,
|
||||
(MutSpanPtrMargaretBufferInMemoryInfo){.data = host_mem_buffer_SPAN, .len = 1},
|
||||
(MutSpanPtrMargaretImageInMemoryInfo){ 0 },
|
||||
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
||||
|
||||
vk->device_lighting_ubo = margaret_prep_buffer_mem_info_of_gpu_ubo(sizeof(Pipeline0UBO));
|
||||
vk->device_instance_attrs_for_models = (MargaretBufferInMemoryInfo){
|
||||
.sz = SceneTemplate_get_space_needed_for_all_instance_attributes(&vk->scene_template),
|
||||
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
|
||||
.shiny_models = VecShinyMeshTopology_new()
|
||||
};
|
||||
|
||||
VecPtrMargaretBufferInMemoryInfo device_mem_buffers_SPAN = VecPtrMargaretBufferInMemoryInfo_new_reserved(
|
||||
vk->device_generic_models_top_and_tex.len + vk->device_shiny_models_top.len);
|
||||
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk->device_lighting_ubo);
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk->device_instance_attrs_for_models);
|
||||
|
||||
vk->device_IT1_image = margaret_prep_image_mem_info_of_colorbuffer(
|
||||
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, IT1_format.some);
|
||||
vk->device_zbuffer_image = margaret_prep_image_mem_info_of_zbuffer(
|
||||
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, zbuffer_format.some);
|
||||
|
||||
VecPtrMargaretImageInMemoryInfo device_mem_images_SPAN =
|
||||
VecPtrMargaretImageInMemoryInfo_new_reserved(2 + 3 * vk->scene_template.generic_models.len);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk->device_IT1_image);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk->device_zbuffer_image);
|
||||
|
||||
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
|
||||
GenericModelTopAndTexInMemoryInfo* M = &vk->device_generic_models_top_and_tex.buf[i];
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->diffuse);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->normal);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->specular);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < vk->device_shiny_models_top.len; i++) {
|
||||
ShinyModelTopInMemoryInfo* M = &vk->device_shiny_models_top.buf[i];
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
|
||||
}
|
||||
|
||||
vk->device_mem = margaret_initialize_buffers_and_images(vk->physical_device, vk->device,
|
||||
VecPtrMargaretBufferInMemoryInfo_to_mspan(&device_mem_buffers_SPAN),
|
||||
VecPtrMargaretImageInMemoryInfo_to_mspan(&device_mem_images_SPAN),
|
||||
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
|
||||
/* device_mem_buffers_SPAN, device_mem_images_SPAN invalidated */
|
||||
VecPtrMargaretBufferInMemoryInfo_drop(device_mem_buffers_SPAN);
|
||||
VecPtrMargaretImageInMemoryInfo_drop(device_mem_images_SPAN);
|
||||
|
||||
vk->command_pool = margaret_create_resettable_command_pool(vk->device, vk->queue_fam.for_graphics);
|
||||
vk->rendering_command_buffer_0 = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
vk->rendering_command_buffer_1 = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
vk->transfer_command_buffer = margaret_allocate_command_buffer(vk->device, vk->command_pool);
|
||||
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
GenericMeshInSceneTemplate_for_log(10, 2, 6));
|
||||
// VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
// GenericMeshInSceneTemplate_for_log(5, 5, 10));
|
||||
// VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
// GenericMeshInSceneTemplate_for_log(1, 10, 4));
|
||||
// VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
|
||||
// GenericMeshInSceneTemplate_for_log(2, 1, 6));
|
||||
VecShinyMeshTopology_append(&vk->scene_template.shiny_models, generate_shiny_rhombicuboctahedron(0.5f));
|
||||
// VecShinyMeshInSceneTemplate_append(&vk->scene_template.shiny_models, (ShinyMeshInSceneTemplate){
|
||||
// .topology = generate_shiny_cube(0.5f), .max_instance_count = 5
|
||||
// });
|
||||
|
||||
// todo: continue from here
|
||||
vk->my_cam_control_info = CamControlInfo_new();
|
||||
vk->Buba_control_info = (vec3){0};
|
||||
|
||||
@ -1905,17 +1790,117 @@ int main() {
|
||||
(ShinyMeshInstance){ .model_t = marie_translation_mat4((vec3){-5, 0, 3}), .color_off = (vec3){0.3f, 0.5f, 0.5f}});
|
||||
// todo: synchronize them with my cool light sources)
|
||||
|
||||
if (vkMapMemory(vk->device, vk->host_mem, 0, VK_WHOLE_SIZE, 0, &vk->host_mem_buffer_mem) != VK_SUCCESS)
|
||||
abortf("vkMapMemory");
|
||||
// vk->device_generic_models_top_and_tex = VecGenericModelTopAndTexInMemoryInfo_new_reserved(vk->scene_template.generic_models.len);
|
||||
// for (size_t i = 0; i < vk->scene_template.generic_models.len; i++) {
|
||||
// const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk->scene_template.generic_models, i);
|
||||
// TextureDataR8G8B8A8 reading_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path));
|
||||
// TextureDataR8G8B8A8 reading_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->normal_texture_path));
|
||||
// TextureDataR8 reading_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&M->specular_texture_path));
|
||||
// VecGenericModelTopAndTexInMemoryInfo_append(&vk->device_generic_models_top_and_tex,
|
||||
// (GenericModelTopAndTexInMemoryInfo){
|
||||
// .vbo = GenericMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
|
||||
// .ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
|
||||
// .reading_diffuse = reading_diffuse, .reading_normal = reading_normal, .reading_specular = reading_specular,
|
||||
// .diffuse = margaret_prep_image_mem_info_of_gpu_texture_srgba(reading_diffuse.width, reading_diffuse.height),
|
||||
// .normal = margaret_prep_image_mem_info_of_gpu_texture_unorm_32(reading_normal.width, reading_normal.height),
|
||||
// .specular = margaret_prep_image_mem_info_of_gpu_texture_unorm_8(reading_specular.width, reading_specular.height),
|
||||
// /* image views will be created after the images are allocated */
|
||||
// /* descriptor set for each model will be allocated later */
|
||||
// });
|
||||
// }
|
||||
//
|
||||
//
|
||||
// VecU64 offset_of_image_in_host_mem_buff_during_init = VecU64_new_zeroinit(vk->device_generic_models_top_and_tex.len * 3);
|
||||
// U64 grand_total_texture_size_in_host_mem = 0;
|
||||
// {
|
||||
// U64 offset = 0;
|
||||
// for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
|
||||
// offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 0] = offset;
|
||||
// offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_diffuse);
|
||||
// offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 1] = offset;
|
||||
// offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_normal);
|
||||
// offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 2] = offset;
|
||||
// offset += TextureDataR8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_specular);
|
||||
// }
|
||||
// grand_total_texture_size_in_host_mem = offset;
|
||||
// }
|
||||
|
||||
vk->device_shiny_models_top = VecShinyModelTopInMemoryInfo_new_reserved(vk->scene_template.shiny_models.len);
|
||||
for (size_t i = 0; i < vk->scene_template.shiny_models.len; i++) {
|
||||
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk->scene_template.shiny_models, i);
|
||||
VecShinyModelTopInMemoryInfo_append(&vk->device_shiny_models_top,
|
||||
(ShinyModelTopInMemoryInfo){
|
||||
.vbo = ShinyMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
|
||||
.ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
|
||||
});
|
||||
}
|
||||
|
||||
// We have only one staging buffer in host memory (because we don't really need more)
|
||||
vk->host_mem_buffer = (MargaretBufferInMemoryInfo){ .sz =
|
||||
MAX_U64(SceneTemplate_get_space_for_initial_model_topology_transfer(&vk->scene_template),
|
||||
MAX_U64(SceneTemplate_get_space_needed_for_widest_state_transfer(&vk->scene_template),
|
||||
MAX_U64(grand_total_texture_size_in_host_mem, 0)))
|
||||
, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT };
|
||||
PtrMargaretBufferInMemoryInfo host_mem_buffer_SPAN[1] = {&vk->host_mem_buffer};
|
||||
vk->host_mem = margaret_initialize_buffers_and_images(vk->physical_device, vk->device,
|
||||
(MutSpanPtrMargaretBufferInMemoryInfo){.data = host_mem_buffer_SPAN, .len = 1},
|
||||
(MutSpanPtrMargaretImageInMemoryInfo){ 0 },
|
||||
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
||||
|
||||
vk->device_lighting_ubo = margaret_prep_buffer_mem_info_of_gpu_ubo(sizeof(Pipeline0UBO));
|
||||
vk->device_instance_attrs_for_models = (MargaretBufferInMemoryInfo){
|
||||
.sz = SceneTemplate_get_space_needed_for_all_instance_attributes(&vk->scene_template),
|
||||
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
|
||||
};
|
||||
|
||||
VecPtrMargaretBufferInMemoryInfo device_mem_buffers_SPAN = VecPtrMargaretBufferInMemoryInfo_new_reserved(
|
||||
vk->device_generic_models_top_and_tex.len + vk->device_shiny_models_top.len);
|
||||
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk->device_lighting_ubo);
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk->device_instance_attrs_for_models);
|
||||
|
||||
vk->device_IT1_image = margaret_prep_image_mem_info_of_colorbuffer(
|
||||
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, IT1_format.some);
|
||||
vk->device_zbuffer_image = margaret_prep_image_mem_info_of_zbuffer(
|
||||
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, zbuffer_format.some);
|
||||
|
||||
VecPtrMargaretImageInMemoryInfo device_mem_images_SPAN =
|
||||
VecPtrMargaretImageInMemoryInfo_new_reserved(2 + 3 * vk->scene_template.generic_models.len);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk->device_IT1_image);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk->device_zbuffer_image);
|
||||
|
||||
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
|
||||
GenericModelTopAndTexInMemoryInfo* M = &vk->device_generic_models_top_and_tex.buf[i];
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->diffuse);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->normal);
|
||||
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->specular);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < vk->device_shiny_models_top.len; i++) {
|
||||
ShinyModelTopInMemoryInfo* M = &vk->device_shiny_models_top.buf[i];
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
|
||||
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
|
||||
}
|
||||
|
||||
vk->device_mem = margaret_initialize_buffers_and_images(vk->physical_device, vk->device,
|
||||
VecPtrMargaretBufferInMemoryInfo_to_mspan(&device_mem_buffers_SPAN),
|
||||
VecPtrMargaretImageInMemoryInfo_to_mspan(&device_mem_images_SPAN),
|
||||
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
|
||||
/* device_mem_buffers_SPAN, device_mem_images_SPAN invalidated */
|
||||
VecPtrMargaretBufferInMemoryInfo_drop(device_mem_buffers_SPAN);
|
||||
VecPtrMargaretImageInMemoryInfo_drop(device_mem_images_SPAN);
|
||||
|
||||
{
|
||||
SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
|
||||
&vk->scene_template, &vk->scene, vk->host_mem_buffer_mem,
|
||||
vk->transfer_command_buffer, vk->host_mem_buffer.buffer);
|
||||
vk->transfer_command_buf, vk->host_mem_buffer.buffer);
|
||||
|
||||
VkSubmitInfo submit_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buffer,
|
||||
.commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buf,
|
||||
};
|
||||
if (vkQueueSubmit(vk->queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
|
||||
abortf("vkQueueSubmit\n");
|
||||
@ -1944,14 +1929,14 @@ int main() {
|
||||
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
|
||||
.dst_image = &M->specular, .host_mem_buff_offset = specular_offset});
|
||||
}
|
||||
margaret_rerecord_cmd_buff_for_texture_init(vk->transfer_command_buffer, vk->host_mem_buffer.buffer,
|
||||
margaret_rerecord_cmd_buff_for_texture_init(vk->transfer_command_buf, vk->host_mem_buffer.buffer,
|
||||
VecMargaretCommandForImageCopying_to_span(&commands),
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
|
||||
VecMargaretCommandForImageCopying_drop(commands);
|
||||
|
||||
VkSubmitInfo submit_info = {
|
||||
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
||||
.commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buffer,
|
||||
.commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buf,
|
||||
};
|
||||
if (vkQueueSubmit(vk->queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
|
||||
abortf("vkQueueSubmit\n");
|
||||
@ -1980,15 +1965,9 @@ int main() {
|
||||
// Right now I only have one light source
|
||||
VecPipeline0PointLight_append(&vk->scene.point_lights, (Pipeline0PointLight){.pos = {0}, .color = {100, 100, 100}});
|
||||
|
||||
|
||||
// These samplers are global for a lot of my future textures
|
||||
vk->linear_sampler = margaret_create_sampler(vk->physical_device, vk->device, true);
|
||||
vk->nearest_sampler = margaret_create_sampler(vk->physical_device, vk->device, false);
|
||||
|
||||
vk->descriptor_pool = margaret_create_descriptor_set_pool(vk->device,
|
||||
1 + 1 * vk->device_generic_models_top_and_tex.len,
|
||||
1 + 3 * vk->device_generic_models_top_and_tex.len,
|
||||
2 + 1 * vk->device_generic_models_top_and_tex.len);
|
||||
// todo: create descripto sets for generic model textures
|
||||
// todo: and then fill it up (using writes) after each defarmentatoon
|
||||
// todo: right now there are no defaragmentations, but I will create a function for this stuff later
|
||||
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
|
||||
GenericModelTopAndTexInMemoryInfo* M = &vk->device_generic_models_top_and_tex.buf[i];
|
||||
M->p_0a_set_0 = margaret_allocate_descriptor_set(
|
||||
@ -2098,8 +2077,6 @@ int main() {
|
||||
};
|
||||
vkUpdateDescriptorSets(vk->device, ARRAY_SIZE(writes_in_descriptor_sets), writes_in_descriptor_sets, 0, NULL);
|
||||
|
||||
vk->jane = Jane_r0_create(vk->device);
|
||||
|
||||
state.prev_key_frame_time = margaret_clock_gettime_monotonic_raw();
|
||||
state.frame_count_since_key = 0;
|
||||
/* Will happen mid-frame */
|
||||
|
||||
@ -78,21 +78,7 @@ ShinyMeshTopology ShinyMeshTopology_clone(const ShinyMeshTopology* self) {
|
||||
return (ShinyMeshTopology){.vertices = VecShinyMeshVertex_clone(&self->vertices), VecU32_clone(&self->indexes)};
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
ShinyMeshTopology topology;
|
||||
U32 max_instance_count;
|
||||
} ShinyMeshInSceneTemplate;
|
||||
|
||||
void ShinyMeshInSceneTemplate_drop(ShinyMeshInSceneTemplate self) {
|
||||
ShinyMeshTopology_drop(self.topology);
|
||||
}
|
||||
|
||||
ShinyMeshInSceneTemplate ShinyMeshInSceneTemplate_clone(const ShinyMeshInSceneTemplate* self) {
|
||||
return (ShinyMeshInSceneTemplate){.topology = ShinyMeshTopology_clone(&self->topology),
|
||||
.max_instance_count = self->max_instance_count};
|
||||
}
|
||||
|
||||
#include "../../../../gen/l1/eve/r0/VecShinyMeshInSceneTemplate.h"
|
||||
#include "../../../../gen/l1/eve/r0/VecShinyMeshTopology.h"
|
||||
|
||||
typedef struct {
|
||||
mat4 model_t;
|
||||
@ -131,7 +117,7 @@ typedef struct {
|
||||
|
||||
typedef struct {
|
||||
VecGenericMeshInSceneTemplate generic_models;
|
||||
VecShinyMeshInSceneTemplate shiny_models;
|
||||
VecShinyMeshTopology shiny_models;
|
||||
} SceneTemplate;
|
||||
|
||||
void SceneTemplate_drop(SceneTemplate self) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user