Finished rewriting r0 to MargaretMemAllocator. Changed MMA interface during refactoring. It compiles. Finally. It took only 5 weeks to write compiling version. Unfortunately, it crashed before even starting. Today will be the long day

This commit is contained in:
Андреев Григорий 2025-12-02 04:52:06 +03:00
parent dc67475e7a
commit fac2fde22b
7 changed files with 504 additions and 709 deletions

View File

@ -13,20 +13,6 @@ void generate_margaret_eve_for_vulkan_utils() {
.mut_span = true, .collab_vec_span = true, .span_sort = true
});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretCommandForImageCopying"), true, true);
/* Под снос */
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBufferInMemoryInfo"), true, false);
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("PtrMargaretBufferInMemoryInfo"), .t_primitive = true, .vec = true, .span = true, .mut_span = true,
.collab_vec_span = true
});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretImageInMemoryInfo"), true, false);
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("PtrMargaretImageInMemoryInfo"), .t_primitive = true, .vec = true, .span = true, .mut_span = true,
.collab_vec_span = true
});
/* For l2/margaret/vulkan_memory_claire.h */
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretMemAllocatorOneBlock")}, false);

View File

@ -23,6 +23,7 @@ void generate_headers_for_r0_r1_r2_r3() {
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyModelOnSceneMem"), true, false);
/* r0 */
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelTexVulkPointers"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("CommandForImageCopying"), true, true);
}
mkdir_nofail("l1/eve/r2");
{ /* r2 */

View File

@ -55,6 +55,14 @@ void generate_util_temp_very_base_headers() {
cstr(""), cstr("VecU8"), cstr("#include \"VecAndSpan_U8.h\""), true, false);
generate_ResultType_templ_inst_guarded_header(cstr("l1"), cstr(""),
cstr(""), cstr("SpanU8"), cstr("#include \"VecAndSpan_U8.h\""), true, true);
/* Not very basic but definitely very common */
generate_guarded_span_company_for_non_primitive_clonable(cstr("l1"), cstr(""), cstr("TextureDataR8G8B8A8"),
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
generate_guarded_span_company_for_non_primitive_clonable(cstr("l1"), cstr(""), cstr("TextureDataR8G8B8"),
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
generate_guarded_span_company_for_non_primitive_clonable(cstr("l1"), cstr(""), cstr("TextureDataR8"),
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
}
#endif

View File

@ -246,6 +246,7 @@ typedef struct {
typedef struct ListNodeMargaretMemAllocatorOneBlock ListNodeMargaretMemAllocatorOneBlock;
typedef struct {
// todo: remove this field, replace it's getter with a 'virtual' method that queries VkBuffer/VkImage VkMemoryRequirements
U64 taken_size;
ListNodeMargaretMemAllocatorOneBlock* block;
MargaretMAOccupant me;
@ -285,7 +286,8 @@ typedef struct {
U64 allocation_size;
VkBufferUsageFlags usage;
bool preserve_at_quiet;
RBTreeNode_KVPU64ToMargaretMAOccupation** ret_ans;
/* Memory block owned by request. Doesn't contain information or even base rbtree structure */
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node;
} MargaretMemAllocatorRequestAllocBuffer;
#include "../../../gen/l1/eve/margaret/VecMargaretMemAllocatorRequestAllocBuffer.h"
@ -298,7 +300,8 @@ typedef struct {
VkPipelineStageFlags current_dest_stage_mask;
VkAccessFlags current_dest_access_mask;
bool preserve_at_quiet;
RBTreeNode_KVPU64ToMargaretMAOccupation** ret_ans;
/* Memory block owned by request. Doesn't contain information or even base rbtree structure */
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node;
} MargaretMemAllocatorRequestAllocImage;
#include "../../../gen/l1/eve/margaret/VecMargaretMemAllocatorRequestAllocImage.h"
@ -311,13 +314,24 @@ typedef struct {
VecMargaretMemAllocatorRequestAllocImage alloc_image;
} MargaretMemAllocatorRequests;
#define MargaretMemAllocatorRequests_new() \
(MargaretMemAllocatorRequests){ \
VecMargaretMemAllocatorRequestFreeOccupant_new(), VecMargaretMemAllocatorRequestFreeOccupant_new(), \
VecMargaretMemAllocatorRequestResizeBuffer_new(), VecMargaretMemAllocatorRequestResizeBuffer_new(), \
VecMargaretMemAllocatorRequestAllocBuffer_new(), VecMargaretMemAllocatorRequestAllocImage_new() }
void MargaretMemAllocatorRequests_sink(MargaretMemAllocatorRequests* self){
VecMargaretMemAllocatorRequestFreeOccupant_sink(&self->free_buf, 0);
VecMargaretMemAllocatorRequestFreeOccupant_sink(&self->free_image, 0);
VecMargaretMemAllocatorRequestResizeBuffer_sink(&self->shrink_buf, 0);
VecMargaretMemAllocatorRequestResizeBuffer_sink(&self->expand_buf, 0);
VecMargaretMemAllocatorRequestAllocBuffer_sink(&self->alloc_buf, 0);
VecMargaretMemAllocatorRequestAllocImage_sink(&self->alloc_image, 0);
for (size_t i = 0; i < self->alloc_buf.len; i++)
free(self->alloc_buf.buf[i].new_node);
self->alloc_buf.len = 0;
for (size_t i = 0; i < self->alloc_image.len; i++)
free(self->alloc_image.buf[i].new_node);
self->alloc_image.len = 0;
}
void MargaretMemAllocatorRequests_drop(MargaretMemAllocatorRequests self){
@ -325,10 +339,39 @@ void MargaretMemAllocatorRequests_drop(MargaretMemAllocatorRequests self){
VecMargaretMemAllocatorRequestFreeOccupant_drop(self.free_image);
VecMargaretMemAllocatorRequestResizeBuffer_drop(self.shrink_buf);
VecMargaretMemAllocatorRequestResizeBuffer_drop(self.expand_buf);
for (size_t i = 0; i < self.alloc_buf.len; i++)
free(self.alloc_buf.buf[i].new_node);
for (size_t i = 0; i < self.alloc_image.len; i++)
free(self.alloc_image.buf[i].new_node);
VecMargaretMemAllocatorRequestAllocBuffer_drop(self.alloc_buf);
VecMargaretMemAllocatorRequestAllocImage_drop(self.alloc_image);
}
RBTreeNode_KVPU64ToMargaretMAOccupation* MargaretMemAllocatorRequests_alloc_buf(
MargaretMemAllocatorRequests* self,
U64 allocation_size, VkBufferUsageFlags usage, bool preserve_at_quiet
){
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
VecMargaretMemAllocatorRequestAllocBuffer_append(&self->alloc_buf, (MargaretMemAllocatorRequestAllocBuffer){
.allocation_size = allocation_size, .usage = usage, .preserve_at_quiet = preserve_at_quiet, .new_node = new_node
});
return new_node;
}
RBTreeNode_KVPU64ToMargaretMAOccupation* MargaretMemAllocatorRequests_alloc_image(
MargaretMemAllocatorRequests* self, U64 width, U64 height, VkFormat format, VkImageUsageFlags usage,
VkImageLayout current_layout,
VkPipelineStageFlags current_dest_stage_mask, VkAccessFlags current_dest_access_mask,
bool preserve_at_quiet){
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
VecMargaretMemAllocatorRequestAllocImage_append(&self->alloc_image, (MargaretMemAllocatorRequestAllocImage){
.width = width, .height = height, .format = format, .usage_flags = usage, .current_layout = current_layout,
.current_dest_stage_mask = current_dest_stage_mask, .current_dest_access_mask = current_dest_access_mask,
.preserve_at_quiet = preserve_at_quiet
});
return new_node;
}
typedef struct {
U64 start;
U64 len;
@ -699,7 +742,7 @@ void MargaretMemAllocator__shrink_some_buffer(
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
}, NULL, &shorter_buf) == VK_SUCCESS);
VkMemoryRequirements shorter_buf_req;
vkGetBufferMemoryRequirements(&self->device, shorter_buf, &shorter_buf_req);
vkGetBufferMemoryRequirements(self->device, shorter_buf, &shorter_buf_req);
check(U64_is_2pow(shorter_buf_req.alignment));
check((shorter_buf_req.memoryTypeBits & self->memory_type_id));
check((buf_start & (shorter_buf_req.alignment - 1)) == 0)
@ -832,12 +875,12 @@ MargaretMemAllocatorDemands MargaretMemAllocator_request_needs_defragmentation(
self->old_moved_buffers.len = 0;
for (size_t i = 0; i < alloc_buf_requests_require_cancel; i++) {
RBTreeNode_KVPU64ToMargaretMAOccupation* given_occ_it = *requests->alloc_buf.buf[i].ret_ans;
RBTreeNode_KVPU64ToMargaretMAOccupation* given_occ_it = requests->alloc_buf.buf[i].new_node;
assert(given_occ_it && given_occ_it->value.me.variant == MargaretMemoryOccupation_Buffer);
MargaretMemAllocator__get_rid_of_memory_occupant_and_node(self, given_occ_it);
}
for (size_t i = 0; i < alloc_img_requests_require_cancel; i++) {
RBTreeNode_KVPU64ToMargaretMAOccupation* given_occ_it = *requests->alloc_image.buf[i].ret_ans;
RBTreeNode_KVPU64ToMargaretMAOccupation* given_occ_it = requests->alloc_image.buf[i].new_node;
assert(given_occ_it && given_occ_it->value.me.variant == MargaretMemoryOccupation_Image);
MargaretMemAllocator__get_rid_of_memory_occupant_and_node(self, given_occ_it);
}
@ -926,8 +969,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_request_needs_defragmentation(
VkMemoryRequirements mem_requirements;
vkGetBufferMemoryRequirements(self->device, fresh_buf, &mem_requirements);
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = safe_calloc(1, sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
*(req->ret_ans) = occ_it;
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = req->new_node; /* It was allocated for us */
occ_it->value.me.variant = MargaretMemoryOccupation_Buffer;
occ_it->value.me.buf.buffer = fresh_buf;
occ_it->value.me.buf.capacity = needed_buf_capacity;
@ -960,8 +1002,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_request_needs_defragmentation(
VkMemoryRequirements mem_requirements;
vkGetImageMemoryRequirements(self->device, fresh_image, &mem_requirements);
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = safe_calloc(1, sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
*(req->ret_ans) = occ_it;
RBTreeNode_KVPU64ToMargaretMAOccupation* occ_it = req->new_node;
occ_it->value.me.variant = MargaretMemoryOccupation_Image;
occ_it->value.me.img = (MargaretMemoryOccupationImage){
.width = req->width, .height = req->height, .usage_flags = req->usage_flags,
@ -1280,7 +1321,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
}
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = req->new_node; /* It was allocated for us */
new_node->value.me = (MargaretMAOccupant){.variant = MargaretMemoryOccupation_Buffer, .buf = {
.buffer = fresh_buf, .capacity = req->allocation_size, .preserve_at_quiet = req->allocation_size,
.usage_flags = req->usage
@ -1318,7 +1359,7 @@ MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
return MargaretMemAllocator_request_needs_defragmentation(self, requests, buffer_expansion_record, 0, 0);
}
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = safe_malloc(sizeof(RBTreeNode_KVPU64ToMargaretMAOccupation));
RBTreeNode_KVPU64ToMargaretMAOccupation* new_node = req->new_node; /* MMARequests class allocated it for us */
new_node->value.me = (MargaretMAOccupant){.variant = MargaretMemoryOccupation_Image, .img = {
.image = fresh_img, .width = req->width, .height = req->height, .format = req->format,
.usage_flags = req->usage_flags, .current_layout = req->current_layout,

View File

@ -27,6 +27,7 @@
#include "../../../gen/l1/vulkan/VecVkSurfaceFormatKHR.h"
#include "../../../gen/l1/vulkan/OptionVkSurfaceFormatKHR.h"
#include <vulkan/vulkan_wayland.h>
#include "../../../gen/l1/vulkan/VecVkImageMemoryBarrier.h"
void margaret_create_debug_utils_messenger_EXT(
VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
@ -908,287 +909,6 @@ VkDeviceSize margaret_align_start_of_buffer(VkDeviceSize was, VkDeviceSize align
return was % alignment ? (was + alignment - was % alignment) : was;
}
// We first specify the necessary fields `sz`, `usage` and then Snow White creation function fills `
// Used in autogenerated code
typedef struct {
// necessary
VkDeviceSize sz;
VkBufferUsageFlags usage;
// filled
VkDeviceSize offset;
VkBuffer buffer;
} MargaretBufferInMemoryInfo;
typedef MargaretBufferInMemoryInfo* PtrMargaretBufferInMemoryInfo;
// Used in autogenerated code
typedef struct {
// necessary
uint32_t width;
uint32_t height;
VkFormat format;
VkImageUsageFlags usage;
// filled
VkDeviceSize offset;
VkImage image;
} MargaretImageInMemoryInfo;
typedef MargaretImageInMemoryInfo* PtrMargaretImageInMemoryInfo;
// todo: remove all this useless crap for sissies
#include "../../../gen/l1/eve/margaret/VecMargaretBufferInMemoryInfo.h"
#include "../../../gen/l1/eve/margaret/VecAndSpan_PtrMargaretBufferInMemoryInfo.h"
#include "../../../gen/l1/eve/margaret/VecMargaretImageInMemoryInfo.h"
#include "../../../gen/l1/eve/margaret/VecAndSpan_PtrMargaretImageInMemoryInfo.h"
// A handy function to initialize buffers and images (attaching them to allocated memory)
VkDeviceMemory margaret_initialize_buffers_and_images(
VkPhysicalDevice physical_device, VkDevice device,
MutSpanPtrMargaretBufferInMemoryInfo buffer_hands, MutSpanPtrMargaretImageInMemoryInfo image_hands,
VkMemoryPropertyFlags properties
) {
uint32_t memory_types_allowed = -1;
VkDeviceSize offset = 0;
for (size_t i = 0; i < buffer_hands.len; i++) {
MargaretBufferInMemoryInfo* buf_hand = *MutSpanPtrMargaretBufferInMemoryInfo_at(buffer_hands, i);
VkBufferCreateInfo create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = buf_hand->sz,
.usage = buf_hand->usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
};
if (vkCreateBuffer(device, &create_info, NULL, &buf_hand->buffer) != VK_SUCCESS) {
abortf("vkCreateBuffer");
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(device, buf_hand->buffer, &memory_requirements);
memory_types_allowed &= memory_requirements.memoryTypeBits;
offset = margaret_align_start_of_buffer(offset, memory_requirements.alignment);
buf_hand->offset = offset;
offset = offset + memory_requirements.size;
}
for (size_t i = 0; i < image_hands.len; i++) {
MargaretImageInMemoryInfo* img_hand = *MutSpanPtrMargaretImageInMemoryInfo_at(image_hands, i);
VkImageCreateInfo crinfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = VK_IMAGE_TYPE_2D,
.format = img_hand->format,
.extent = (VkExtent3D){
.width = img_hand->width,
.height = img_hand->height,
.depth = 1,
},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = img_hand->usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
};
if (vkCreateImage(device, &crinfo, NULL, &img_hand->image) != VK_SUCCESS)
abortf("vkCreateImage");
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(device, img_hand->image, &memory_requirements);
memory_types_allowed &= memory_requirements.memoryTypeBits;
offset = margaret_align_start_of_buffer(offset, memory_requirements.alignment);
img_hand->offset = offset;
offset = offset + memory_requirements.size;
}
VkMemoryAllocateInfo alloc_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = offset,
.memoryTypeIndex = margaret_find_memory_type (physical_device, memory_types_allowed, properties),
};
VkDeviceMemory memory;
if (vkAllocateMemory(device, &alloc_info, NULL, &memory) != VK_SUCCESS) {
abortf("Having trouble allocating %lu bytes with memory type %u\n", alloc_info.allocationSize, alloc_info.memoryTypeIndex);
}
for (size_t i = 0; i < buffer_hands.len; i++) {
MargaretBufferInMemoryInfo* buf_hand = *MutSpanPtrMargaretBufferInMemoryInfo_at(buffer_hands, i);
if (vkBindBufferMemory(device, buf_hand->buffer, memory, buf_hand->offset) != VK_SUCCESS)
abortf("vkBindBufferMemory");
}
for (size_t i = 0; i < image_hands.len; i++) {
MargaretImageInMemoryInfo* img_hand = *MutSpanPtrMargaretImageInMemoryInfo_at(image_hands, i);
if (vkBindImageMemory(device, img_hand->image, memory, img_hand->offset) != VK_SUCCESS)
abortf("vkBindImageMemory");
}
return memory;
}
// todo: bleh, delete this fucking bullshit
#define margaret_prep_buffer_mem_info_of_gpu_vbo_Definition(TV) \
MargaretBufferInMemoryInfo TV##_buffer_crinfo_of_gpu_vbo(size_t n) { \
return (MargaretBufferInMemoryInfo){ \
.sz = sizeof(TV) * n, \
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT \
}; \
}
// todo: delete this fucking crap
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ebo(size_t n) {
return (MargaretBufferInMemoryInfo){ .sz = sizeof(uint32_t) * n,
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT };
}
// todo: wow 'Not very useful (but I used it anyway)' actually meant something
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_small_local_ubo(size_t struct_sz) {
return (MargaretBufferInMemoryInfo){ .sz = struct_sz, .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT };
}
// todo: get rid of this disgusting crap
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_srgba(uint32_t w, uint32_t h) {
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_SRGB,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
// todo: remove fucking bullshit
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_8(uint32_t w, uint32_t h){
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8_UNORM,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
// todo: delete this crap
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_32(uint32_t w, uint32_t h) {
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_UNORM,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
// todo: aDKLFJHFKLHJKDSFljdfj DLETE THIS GHIJHITU
MargaretImageInMemoryInfo
margaret_prep_image_mem_info_of_zbuffer(uint32_t max_width, uint32_t max_height, VkFormat zbuf_format) {
return (MargaretImageInMemoryInfo){ .width = max_width, .height = max_height, .format = zbuf_format,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT };
}
/* todo: deltete dljj klfjks */
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_colorbuffer(U32 width, U32 height, VkFormat format) {
return (MargaretImageInMemoryInfo){.width = width, .height = height, .format = format,
.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT};
}
// toodk}:; l;kjd; dletlel
// todo: deellte kfuckijgn ojsos
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ubo(size_t struct_sz) {
return (MargaretBufferInMemoryInfo){ .sz = struct_sz,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT };
}
// todo: rename this shit apropriately
typedef struct {
size_t host_mem_buff_offset;
const MargaretImageInMemoryInfo* dst_image;
} MargaretCommandForImageCopying;
#include "../../../gen/l1/eve/margaret/VecAndSpan_MargaretCommandForImageCopying.h"
#include "../../../gen/l1/vulkan/VecVkImageMemoryBarrier.h"
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void margaret_rerecord_cmd_buff_for_texture_init (
VkCommandBuffer command_buffer, VkBuffer host_mem_buffer,
SpanMargaretCommandForImageCopying commands,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask
){
if (vkResetCommandBuffer(command_buffer, 0) != VK_SUCCESS)
abortf("vkResetCommandBuffer\n");
VkCommandBufferBeginInfo begin_info = {.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,};
if (vkBeginCommandBuffer(command_buffer, &begin_info) != VK_SUCCESS)
abortf("vkBeginCommandBuffer\n");
VecVkImageMemoryBarrier barriers = VecVkImageMemoryBarrier_new_reserved(commands.len);
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = img.dst_image->image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, /* Flags */
0, NULL, 0, NULL,
barriers.len, barriers.buf);
barriers.len = 0; /* It's ok, VkImageMemoryBarrier is primitive */
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VkBufferImageCopy region = {
.bufferOffset = img.host_mem_buff_offset,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = {
.width = img.dst_image->width,
.height = img.dst_image->height,
.depth = 1
},
};
vkCmdCopyBufferToImage(command_buffer, host_mem_buffer, img.dst_image->image,
// We assume that image was already transitioned to optimal layout transition_image_layout
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
/* filling buffers Vec again */
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = img.dst_image->image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0, /* Flags */
0, NULL, 0, NULL,
barriers.len, barriers.buf
);
VecVkImageMemoryBarrier_drop(barriers);
if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS)
abortf("vkEndCommandBuffer");
}
// For texture
VkImageView margaret_create_view_for_image (
VkDevice device, VkImage image, VkFormat format, VkImageAspectFlags aspect_flags

View File

@ -97,15 +97,6 @@ VkRenderPass create_render_pass_0(VkDevice logical_device, VkFormat colorbuffer_
return render_pass;
}
MargaretBufferInMemoryInfo GenericMeshVertex_buffer_crinfo_of_gpu_vbo(size_t n) {
return (MargaretBufferInMemoryInfo){ .sz = sizeof(GenericMeshVertex) * n,
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT };
}
MargaretBufferInMemoryInfo ShinyMeshVertex_buffer_crinfo_of_gpu_vbo(size_t n) {
return (MargaretBufferInMemoryInfo){ .sz = sizeof(ShinyMeshVertex) * n,
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT };
}
PipelineHands create_graphics_pipeline_0(
VkDevice device, VkRenderPass render_pass, uint32_t subpass
) {
@ -789,6 +780,8 @@ typedef struct {
VkSemaphore image_available_semaphore;
VkSemaphore rendered_to_IT1_semaphore;
VkFence in_flight_fence;
// For miscellaneous uses (for init)
VkFence roxy;
} Jane_r0;
NODISCARD Jane_r0 Jane_r0_create(VkDevice device) {
@ -796,15 +789,17 @@ NODISCARD Jane_r0 Jane_r0_create(VkDevice device) {
.in_frame_transfer_complete = margaret_create_semaphore(device),
.image_available_semaphore = margaret_create_semaphore(device),
.rendered_to_IT1_semaphore = margaret_create_semaphore(device),
.in_flight_fence = margaret_create_fence(device, true)
.in_flight_fence = margaret_create_fence(device, true),
.roxy = margaret_create_fence(device, false),
};
}
void Jane_r0_destroy(VkDevice device, Jane_r0 jane) {
vkDestroyFence(device, jane.in_flight_fence, NULL);
vkDestroySemaphore(device, jane.rendered_to_IT1_semaphore, NULL);
vkDestroySemaphore(device, jane.image_available_semaphore, NULL);
vkDestroySemaphore(device, jane.in_frame_transfer_complete, NULL);
vkDestroyFence(device, jane.in_flight_fence, NULL);
vkDestroyFence(device, jane.roxy, NULL);
}
// todo: handle case where presentation and graphics are from the same family
@ -1027,15 +1022,13 @@ typedef struct {
SceneTemplate scene_template;
Scene scene;
MargaretMAIterator device_IT1_image;
MargaretMAIterator device_zbuffer_image;
VecGenericModelTexVulkPointers generic_model_tex_vulk_pointers;
VkImageView zbuffer_view;
VkImageView IT1_view;
VkFramebuffer IT1_framebuffer;
/* Descriptor sets */
VkDescriptorSet descriptor_set_for_pipeline_0b;
VkDescriptorSet descriptor_set_for_pipeline_1;
// Descriptor sets for pipeline_0a are stored in generic_model_tex_vulk_pointers
@ -1146,6 +1139,136 @@ void update_state(state_r0* state, uint32_t dur) {
}
}
/* It recreates image views, descriptor sets, framebuffers. */
void recreate_vulkan_references_objects(state_r0* state){
vulkan_ctx_r0* vk = &state->vk;
vk->zbuffer_view = margaret_create_view_for_image(vk->device, vk->device_zbuffer_image->value.me.img.image,
vk->zbuffer_format, VK_IMAGE_ASPECT_DEPTH_BIT);
vk->IT1_view = margaret_create_view_for_image(vk->device, vk->device_IT1_image->value.me.img.image,
vk->IT1_format, VK_IMAGE_ASPECT_COLOR_BIT);
vk->IT1_framebuffer = create_IT1_framebuffer(vk->device,
vk->IT1_view, vk->zbuffer_view, vk->render_pass_0,
state->sane_image_extent_limit.width, state->sane_image_extent_limit.height);
/* Busy creating views for all my textures */
vk->generic_model_tex_vulk_pointers = VecGenericModelTexVulkPointers_new_reserved(vk->scene.generic_models.len);
for (size_t i = 0; i < vk->scene.generic_models.len; i++) {
GenericModelOnSceneMem* model = &vk->scene.generic_models.buf[i];
GenericModelTexVulkPointers P = (GenericModelTexVulkPointers){
.diffuse_view = margaret_create_view_for_image(vk->device, model->diffuse_texture->value.me.img.image,
model->diffuse_texture->value.me.img.format, VK_IMAGE_ASPECT_COLOR_BIT),
.normal_view = margaret_create_view_for_image(vk->device, model->normal_texture->value.me.img.image,
model->normal_texture->value.me.img.format, VK_IMAGE_ASPECT_COLOR_BIT),
.specular_view = margaret_create_view_for_image(vk->device, model->specular_texture->value.me.img.image,
model->specular_texture->value.me.img.format, VK_IMAGE_ASPECT_COLOR_BIT),
.p_0a_set_0 = margaret_allocate_descriptor_set( vk->device,
vk->descriptor_pool, vk->pipeline_hands_0a.descriptor_set_layout),
};
VecGenericModelTexVulkPointers_append(&vk->generic_model_tex_vulk_pointers, P);
// Configuring my descriptor sets, that I just allocated
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0a = {
.buffer = vk->scene.pipeline0_ubo->value.me.buf.buffer,
.offset = 0, .range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_1_in_set_0a = {
.sampler = vk->linear_sampler, .imageView = P.diffuse_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorImageInfo image_info_for_descriptor_2_in_set_0a = {
.sampler = vk->nearest_sampler, .imageView = P.normal_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorImageInfo image_info_for_descriptor_3_in_set_0a = {
.sampler = vk->nearest_sampler, .imageView = P.specular_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet writes_in_descriptor_set[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = P.p_0a_set_0,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info_for_descriptor_0_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = P.p_0a_set_0,
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_1_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = P.p_0a_set_0,
.dstBinding = 2,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_2_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = P.p_0a_set_0,
.dstBinding = 3,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_3_in_set_0a,
},
};
vkUpdateDescriptorSets(vk->device, ARRAY_SIZE(writes_in_descriptor_set), writes_in_descriptor_set, 0, NULL);
}
vk->descriptor_set_for_pipeline_0b = margaret_allocate_descriptor_set(
vk->device, vk->descriptor_pool, vk->pipeline_hands_0b.descriptor_set_layout);
vk->descriptor_set_for_pipeline_1 = margaret_allocate_descriptor_set(
vk->device, vk->descriptor_pool, vk->pipeline_hands_1.descriptor_set_layout);
// todo: update the others + ACTUALLY CARRY OUT DEVIEL LOCAL ALLOCATION MARAGERReeuqs request
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0b = {
.buffer = vk->scene.pipeline0_ubo->value.me.buf.buffer,
.offset = 0, .range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_0_in_set_1 = {
.sampler = vk->nearest_sampler, .imageView = vk->IT1_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet writes_in_descriptor_sets[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk->descriptor_set_for_pipeline_0b,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info_for_descriptor_0_in_set_0b,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk->descriptor_set_for_pipeline_1,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_0_in_set_1,
},
};
vkUpdateDescriptorSets(vk->device, ARRAY_SIZE(writes_in_descriptor_sets), writes_in_descriptor_sets, 0, NULL);
}
void destroy_vulkan_reference_objects(state_r0* state){
vulkan_ctx_r0* vk = &state->vk;
// todo: vkdestro all the views all the framebuffers
// todo: drop everything,
}
void vulkano_frame_drawing(state_r0* state) {
check(vkWaitForFences(state->vk.device, 1, &state->vk.jane.in_flight_fence, VK_TRUE, UINT64_MAX) == VK_SUCCESS);
check(vkResetFences(state->vk.device, 1, &state->vk.jane.in_flight_fence) == VK_SUCCESS);
@ -1271,6 +1394,86 @@ void vulkano_frame_drawing(state_r0* state) {
}
}
typedef struct {
// todo: this iterator better be MargaretBA instead of MargaretMA
MargaretMAIterator staging_buffer;
MargaretMAIterator image;
} CommandForImageCopying;
#include "../../../../gen/l1/eve/r0/VecAndSpan_CommandForImageCopying.h"
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void copying_buffer_to_image_color_aspect_record_cmd_buf (
VkCommandBuffer command_buffer, SpanCommandForImageCopying commands,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask
){
VecVkImageMemoryBarrier barriers = VecVkImageMemoryBarrier_new_reserved(commands.len);
for (size_t i = 0; i < commands.len; i++) {
CommandForImageCopying pair = commands.data[i];
assert(pair.image->value.me.variant == MargaretMemoryOccupation_Image);
assert(pair.staging_buffer->value.me.variant == MargaretMemoryOccupation_Buffer);
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = pair.image->value.me.img.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0 /* Flags */, 0, NULL, 0, NULL, barriers.len, barriers.buf);
VecVkImageMemoryBarrier_sink(&barriers, 0);
for (size_t i = 0; i < commands.len; i++) {
CommandForImageCopying pair = commands.data[i];
VkBufferImageCopy region = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = {
.width = pair.image->value.me.img.width, .height = pair.image->value.me.img.height, .depth = 1
},
};
vkCmdCopyBufferToImage(command_buffer, pair.staging_buffer->value.me.buf.buffer,
pair.staging_buffer->value.me.img.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
/* filling buffers Vec again */
for (size_t i = 0; i < commands.len; i++) {
CommandForImageCopying pair = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = pair.image->value.me.img.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0 /* Flags */, 0, NULL, 0, NULL, barriers.len, barriers.buf );
VecVkImageMemoryBarrier_drop(barriers);
}
static void main_h_xdg_surface_configure(void *data, struct xdg_surface *xdg_surface, uint32_t serial){
state_r0 *state = data;
@ -1644,16 +1847,12 @@ int main() {
if (swapchain_details_res.variant != Result_Ok)
abortf("swapchain_details_res.variant != Result_Ok");
OptionVkFormat zbuffer_format = margaret_find_supported_zbuffer_format(vk->physical_device);
if (zbuffer_format.variant != Option_Some)
abortf("Could not find supported zbuffer format\n");
vk->zbuffer_format = zbuffer_format.some;
OptionVkFormat IT1_format = margaret_find_supported_hdr_buffer_format(vk->physical_device);
if (IT1_format.variant != Option_Some)
abortf("Could not find supported hdr buffer format\n");
vk->IT1_format = IT1_format.some;
OptionVkFormat zbuffer_format_found = margaret_find_supported_zbuffer_format(vk->physical_device);
vk->zbuffer_format = OptionVkFormat_expect(zbuffer_format_found);
OptionVkFormat IT1_format_found = margaret_find_supported_hdr_buffer_format(vk->physical_device);
vk->IT1_format = OptionVkFormat_expect(IT1_format_found);
vk->render_pass_0 = create_render_pass_0(vk->device, IT1_format.some, zbuffer_format.some);
vk->render_pass_0 = create_render_pass_0(vk->device, vk->IT1_format, vk->zbuffer_format);
vk->pipeline_hands_0a = create_graphics_pipeline_0(vk->device, vk->render_pass_0, 0);
vk->pipeline_hands_0b = create_graphics_pipeline_0_b(vk->device, vk->render_pass_0, 0);
@ -1720,362 +1919,193 @@ int main() {
// .topology = generate_shiny_cube(0.5f), .max_instance_count = 5
// });
// todo: continue from here
vk->my_cam_control_info = CamControlInfo_new();
vk->Buba_control_info = (vec3){0};
// vk->my_cam_control_info = CamControlInfo_new();
// vk->Buba_control_info = (vec3){0};
vk->scene = Scene_new();
{
size_t offset_in_attr_buffer = 0;
for (size_t mi = 0; mi < vk->scene_template.generic_models.len; mi++) {
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk->scene_template.generic_models, mi);
const GenericModelTopAndTexInMemoryInfo* MM = VecGenericModelTopAndTexInMemoryInfo_at(&vk->device_generic_models_top_and_tex, mi);
VecUsedGenericModelOnScene_append(&vk->scene.generic_models, (UsedGenericModelOnScene){
.model = (GenericModelOnSceneMem){
.vbo = MM->vbo.buffer,
.ebo = MM->ebo.buffer,
.indexes = M->topology.indexes.len,
.instance_attr_buf = vk->device_instance_attrs_for_models.buffer,
.instance_attr_buf_offset = offset_in_attr_buffer,
.limit_max_instance_count = M->max_instance_count
// todo: remove vbo, ebo from here (we don't need it here).
// as you may see, I didn't specifid images (that's becasuse I fpn need to)
},
.instances = VecGenericMeshInstance_new(),
});
offset_in_attr_buffer += M->max_instance_count * sizeof(GenericMeshInstance);
}
for (size_t mi = 0; mi < vk->scene_template.shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk->scene_template.shiny_models, mi);
const ShinyModelTopInMemoryInfo* MM = VecShinyModelTopInMemoryInfo_at(&vk->device_shiny_models_top, mi);
VecUsedShinyModelOnScene_append(&vk->scene.shiny_models, (UsedShinyModelOnScene){
.model = (ShinyModelOnSceneMem){
.vbo = MM->vbo.buffer,
.ebo = MM->ebo.buffer,
.indexes = M->topology.indexes.len,
.instance_attr_buf = vk->device_instance_attrs_for_models.buffer,
.instance_attr_buf_offset = offset_in_attr_buffer,
.limit_max_instance_count = M->max_instance_count
},
.instances = VecShinyMeshInstance_new(),
});
offset_in_attr_buffer += M->max_instance_count * sizeof(ShinyMeshInstance);
}
MargaretMemAllocatorRequests initial_req_for_staging = MargaretMemAllocatorRequests_new();
MargaretMemAllocatorRequests initial_req_for_device_local = MargaretMemAllocatorRequests_new();
VecGenericModelOnSceneMem generic_model_mem = VecGenericModelOnSceneMem_new();
VecShinyModelOnSceneMem shiny_model_mem = VecShinyModelOnSceneMem_new();
for (size_t i = 0; i < vk->scene_template.generic_models.len; i++) {
const GenericMeshInSceneTemplate* template = &vk->scene_template.generic_models.buf[i];
// TextureDataR8G8B8A8 pixe
GenericModelOnSceneMem mem;
mem.indexes = template->topology.indexes.len;
mem.instance_vec_capacity = 100;
mem.instance_vec_len = 0;
mem.staging_vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
template->topology.vertices.len * sizeof(GenericMeshVertex), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
template->topology.indexes.len * sizeof(U32), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.instance_vec_capacity * sizeof(GenericMeshInstance), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true);
mem.pixels_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&template->diffuse_texture_path)),
mem.pixels_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&template->normal_texture_path)),
mem.pixels_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&template->specular_texture_path)),
mem.staging_diffuse_tex_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.pixels_diffuse.pixels.len * sizeof(cvec4), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_normal_tex_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.pixels_normal.pixels.len * sizeof(cvec4), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_specular_tex_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.pixels_specular.pixels.len * sizeof(U8), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
template->topology.vertices.len * sizeof(GenericMeshVertex),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
template->topology.indexes.len * sizeof(U32),
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
mem.instance_vec_capacity * sizeof(GenericMeshInstance),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.diffuse_texture = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
mem.pixels_diffuse.width, mem.pixels_diffuse.height,
VK_FORMAT_R8G8B8A8_SRGB,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, true);
mem.normal_texture = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
mem.pixels_normal.width, mem.pixels_normal.height,
VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, true);
mem.normal_texture = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
mem.pixels_normal.width, mem.pixels_normal.height,
VK_FORMAT_R8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, true);
VecGenericModelOnSceneMem_append(&generic_model_mem, mem);
}
for (int X = 0; X < 10; X++) {
for (int Z = 0; Z < 10; Z++) {
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk->scene.generic_models, 0)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4((vec3){11.f * (float)X, -6, 4.f * (float)Z}) });
}
}
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk->scene.generic_models, 1)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4(vk->Buba_control_info)
});
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk->scene.generic_models, 2)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4((vec3){5, -7, 6})});
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk->scene.generic_models, 3)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4((vec3){5, -7, -6})});
for (U64 i = 0; i < 5; i++) {
VecShinyMeshInstance_append(&VecUsedShinyModelOnScene_mat(&vk->scene.shiny_models, 0)->instances,
(ShinyMeshInstance){ .model_t = marie_translation_mat4((vec3){
(float)((i * i * 10 + i * 4 + 1) % 13) + 2,
(float)((i * i * 11 + i * 2 + 2) % 13),
(float)((i * i * 9 + i * 1 + 4) % 13) + 3}), .color_off = (vec3){0.6f, 0.2f, 0.2f}});
}
VecShinyMeshInstance_append(&VecUsedShinyModelOnScene_mat(&vk->scene.shiny_models, 1)->instances,
(ShinyMeshInstance){ .model_t = marie_translation_mat4((vec3){-5, 0, 3}), .color_off = (vec3){0.3f, 0.5f, 0.5f}});
// todo: synchronize them with my cool light sources)
// vk->device_generic_models_top_and_tex = VecGenericModelTopAndTexInMemoryInfo_new_reserved(vk->scene_template.generic_models.len);
// for (size_t i = 0; i < vk->scene_template.generic_models.len; i++) {
// const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk->scene_template.generic_models, i);
// TextureDataR8G8B8A8 reading_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path));
// TextureDataR8G8B8A8 reading_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->normal_texture_path));
// TextureDataR8 reading_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&M->specular_texture_path));
// VecGenericModelTopAndTexInMemoryInfo_append(&vk->device_generic_models_top_and_tex,
// (GenericModelTopAndTexInMemoryInfo){
// .vbo = GenericMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
// .ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
// .reading_diffuse = reading_diffuse, .reading_normal = reading_normal, .reading_specular = reading_specular,
// .diffuse = margaret_prep_image_mem_info_of_gpu_texture_srgba(reading_diffuse.width, reading_diffuse.height),
// .normal = margaret_prep_image_mem_info_of_gpu_texture_unorm_32(reading_normal.width, reading_normal.height),
// .specular = margaret_prep_image_mem_info_of_gpu_texture_unorm_8(reading_specular.width, reading_specular.height),
// /* image views will be created after the images are allocated */
// /* descriptor set for each model will be allocated later */
// });
// }
//
//
// VecU64 offset_of_image_in_host_mem_buff_during_init = VecU64_new_zeroinit(vk->device_generic_models_top_and_tex.len * 3);
// U64 grand_total_texture_size_in_host_mem = 0;
// {
// U64 offset = 0;
// for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
// offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 0] = offset;
// offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_diffuse);
// offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 1] = offset;
// offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_normal);
// offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 2] = offset;
// offset += TextureDataR8_get_size_in_bytes(&vk->device_generic_models_top_and_tex.buf[i].reading_specular);
// }
// grand_total_texture_size_in_host_mem = offset;
// }
vk->device_shiny_models_top = VecShinyModelTopInMemoryInfo_new_reserved(vk->scene_template.shiny_models.len);
for (size_t i = 0; i < vk->scene_template.shiny_models.len; i++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk->scene_template.shiny_models, i);
VecShinyModelTopInMemoryInfo_append(&vk->device_shiny_models_top,
(ShinyModelTopInMemoryInfo){
.vbo = ShinyMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
.ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
});
const ShinyMeshTopology* temp_topology = &vk->scene_template.shiny_models.buf[i];
ShinyModelOnSceneMem mem;
mem.indexes = temp_topology->indexes.len;
mem.instance_vec_capacity = 100;
mem.instance_vec_len = 0;
mem.staging_vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
temp_topology->vertices.len * sizeof(ShinyMeshVertex), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
temp_topology->indexes.len * sizeof(U32), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.instance_vec_capacity * sizeof(ShinyMeshInstance), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true);
mem.vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
temp_topology->vertices.len * sizeof(ShinyMeshVertex),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
temp_topology->indexes.len * sizeof(U32),
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
mem.instance_vec_capacity * sizeof(ShinyMeshInstance),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
VecShinyModelOnSceneMem_append(&shiny_model_mem, mem);
}
// We have only one staging buffer in host memory (because we don't really need more)
vk->host_mem_buffer = (MargaretBufferInMemoryInfo){ .sz =
MAX_U64(SceneTemplate_get_space_for_initial_model_topology_transfer(&vk->scene_template),
MAX_U64(SceneTemplate_get_space_needed_for_widest_state_transfer(&vk->scene_template),
MAX_U64(grand_total_texture_size_in_host_mem, 0)))
, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT };
PtrMargaretBufferInMemoryInfo host_mem_buffer_SPAN[1] = {&vk->host_mem_buffer};
vk->host_mem = margaret_initialize_buffers_and_images(vk->physical_device, vk->device,
(MutSpanPtrMargaretBufferInMemoryInfo){.data = host_mem_buffer_SPAN, .len = 1},
(MutSpanPtrMargaretImageInMemoryInfo){ 0 },
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
MargaretMAIterator pipeline0_staging_ubo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
sizeof(Pipeline0UBO),
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true);
MargaretMAIterator pipeline0_ubo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
sizeof(Pipeline0UBO),
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
vk->device_lighting_ubo = margaret_prep_buffer_mem_info_of_gpu_ubo(sizeof(Pipeline0UBO));
vk->device_instance_attrs_for_models = (MargaretBufferInMemoryInfo){
.sz = SceneTemplate_get_space_needed_for_all_instance_attributes(&vk->scene_template),
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
};
vk->scene = Scene_new(generic_model_mem, shiny_model_mem, pipeline0_staging_ubo, pipeline0_ubo);
vk->device_IT1_image = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, vk->IT1_format, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
/* We do layout transitions in renderpass (the easy way) + we don't copy this image */
VK_IMAGE_LAYOUT_UNDEFINED, 0, 0, false);
vk->device_zbuffer_image = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, vk->zbuffer_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
/* We do layout transitions in renderpass (the easy way) + we don't copy this image */
VK_IMAGE_LAYOUT_UNDEFINED, 0, 0, false);
VecPtrMargaretBufferInMemoryInfo device_mem_buffers_SPAN = VecPtrMargaretBufferInMemoryInfo_new_reserved(
vk->device_generic_models_top_and_tex.len + vk->device_shiny_models_top.len);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk->device_lighting_ubo);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk->device_instance_attrs_for_models);
vk->device_IT1_image = margaret_prep_image_mem_info_of_colorbuffer(
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, IT1_format.some);
vk->device_zbuffer_image = margaret_prep_image_mem_info_of_zbuffer(
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, zbuffer_format.some);
VecPtrMargaretImageInMemoryInfo device_mem_images_SPAN =
VecPtrMargaretImageInMemoryInfo_new_reserved(2 + 3 * vk->scene_template.generic_models.len);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk->device_IT1_image);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk->device_zbuffer_image);
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = &vk->device_generic_models_top_and_tex.buf[i];
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->diffuse);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->normal);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->specular);
}
for (size_t i = 0; i < vk->device_shiny_models_top.len; i++) {
ShinyModelTopInMemoryInfo* M = &vk->device_shiny_models_top.buf[i];
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
}
vk->device_mem = margaret_initialize_buffers_and_images(vk->physical_device, vk->device,
VecPtrMargaretBufferInMemoryInfo_to_mspan(&device_mem_buffers_SPAN),
VecPtrMargaretImageInMemoryInfo_to_mspan(&device_mem_images_SPAN),
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
/* device_mem_buffers_SPAN, device_mem_images_SPAN invalidated */
VecPtrMargaretBufferInMemoryInfo_drop(device_mem_buffers_SPAN);
VecPtrMargaretImageInMemoryInfo_drop(device_mem_images_SPAN);
MargaretMemAllocatorDemands tunturun = MargaretMemAllocator_carry_out_request(&vk->host_visible_coherent_mem, &initial_req_for_staging);
check(tunturun == MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT);
tunturun = MargaretMemAllocator_carry_out_request(&vk->device_local_mem, &initial_req_for_device_local);
check(tunturun == MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT);
{
SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
&vk->scene_template, &vk->scene, vk->host_mem_buffer_mem,
vk->transfer_command_buf, vk->host_mem_buffer.buffer);
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buf,
};
if (vkQueueSubmit(vk->queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
abortf("vkQueueSubmit\n");
}
vkDeviceWaitIdle(vk->device);
{
VecMargaretCommandForImageCopying commands =
VecMargaretCommandForImageCopying_new_reserved(vk->device_generic_models_top_and_tex.len);
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
const GenericModelTopAndTexInMemoryInfo* M =
VecGenericModelTopAndTexInMemoryInfo_at(&vk->device_generic_models_top_and_tex, i);
U64 diffuse_offset = *VecU64_at(&offset_of_image_in_host_mem_buff_during_init, 3 * i + 0);
U64 normal_offset = *VecU64_at(&offset_of_image_in_host_mem_buff_during_init, 3 * i + 1);
U64 specular_offset = *VecU64_at(&offset_of_image_in_host_mem_buff_during_init, 3 * i + 2);
memcpy(vk->host_mem_buffer_mem + diffuse_offset,
M->reading_diffuse.pixels.buf, TextureDataR8G8B8A8_get_size_in_bytes(&M->reading_diffuse));
memcpy(vk->host_mem_buffer_mem + normal_offset,
M->reading_normal.pixels.buf, TextureDataR8G8B8A8_get_size_in_bytes(&M->reading_normal));
memcpy(vk->host_mem_buffer_mem + specular_offset,
M->reading_specular.pixels.buf, TextureDataR8_get_size_in_bytes(&M->reading_specular));
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
.dst_image = &M->diffuse, .host_mem_buff_offset = diffuse_offset});
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
.dst_image = &M->normal, .host_mem_buff_offset = normal_offset});
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
.dst_image = &M->specular, .host_mem_buff_offset = specular_offset});
GenericModelOnSceneMem *model_g = VecGenericModelOnSceneMem_mat(&vk->scene.generic_models, 0);
GenericMeshInstance* g_instances = (GenericMeshInstance*)MargaretMAIterator_get_mapped(model_g->staging_instance_attr_buf);
assert(model_g->instance_vec_capacity == 100);
for (int X = 0; X < 10; X++) {
for (int Z = 0; Z < 10; Z++) {
g_instances[X * 10 + Z] = (GenericMeshInstance){
.model_t = marie_translation_mat4((vec3){11.f * (float)X, -6, 4.f * (float)Z}) };
}
}
margaret_rerecord_cmd_buff_for_texture_init(vk->transfer_command_buf, vk->host_mem_buffer.buffer,
VecMargaretCommandForImageCopying_to_span(&commands),
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
VecMargaretCommandForImageCopying_drop(commands);
model_g->instance_vec_len = 0;
ShinyModelOnSceneMem* model_sh = VecShinyModelOnSceneMem_mat(&vk->scene.shiny_models, 0);
ShinyMeshInstance* sh_instances = (ShinyMeshInstance*)MargaretMAIterator_get_mapped(model_sh->staging_instance_attr_buf);
assert(model_sh->instance_vec_capacity == 100);
for (int X = 0; X < 10; X++) {
for (int Z = 0; Z < 10; Z++) {
sh_instances[X * 10 + Z] = (ShinyMeshInstance){
.model_t = marie_translation_mat4((vec3){11.f * (float)X, 10, 4.f * (float)Z}),
.color_on = {0, 1, 0}, .color_off = {1, 0.4f, 0.5f} };
}
}
model_sh->instance_vec_len = 0;
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buf,
};
if (vkQueueSubmit(vk->queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
abortf("vkQueueSubmit\n");
}
vkDeviceWaitIdle(vk->device);
// We sent everything we needed. but host_mem_buffer_mem may be used later
// My zbuffer also needs a view
vk->zbuffer_view = margaret_create_view_for_image(vk->device,
&vk->device_zbuffer_image, VK_IMAGE_ASPECT_DEPTH_BIT);
/* Here we create an image view into a temporary IT1 texture and a framebuffer for scene rendering */
vk->IT1_view = margaret_create_view_for_image(vk->device,
&vk->device_IT1_image, VK_IMAGE_ASPECT_COLOR_BIT);
/* Busy creating views for all my textures */
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = VecGenericModelTopAndTexInMemoryInfo_mat(&vk->device_generic_models_top_and_tex, i);
M->diffuse_view = margaret_create_view_for_image(vk->device, &M->diffuse, VK_IMAGE_ASPECT_COLOR_BIT);
M->normal_view = margaret_create_view_for_image(vk->device, &M->normal, VK_IMAGE_ASPECT_COLOR_BIT);
M->specular_view = margaret_create_view_for_image(vk->device, &M->specular, VK_IMAGE_ASPECT_COLOR_BIT);
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretMAIterator_get_mapped(vk->scene.pipeline0_staging_ubo);
assert(pipeline_0_ubo_point_light_max_count >= 100);
ubo->point_light_count = 100;
ubo->spotlight_count = 0;
for (int X = 0; X < 10; X++) {
for (int Z = 0; Z < 10; Z++) {
ubo->point_light_arr[X * 10 + Z] = (Pipeline0PointLight){
.pos = (vec3){11.f * (float)X, 10, 4.f * (float)Z},
.color = {0, 1, 0}
};
}
}
// todo: synchronize them with my cool light sources)
}
/* Here we both copy from topology + textures to staging buffers and record commands that will copy staging data
* to device local memory */
margaret_reset_and_begin_command_buffer(vk->transfer_command_buf);
SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
&vk->scene_template, &vk->scene, vk->transfer_command_buf);
{
VecCommandForImageCopying init = VecCommandForImageCopying_new_reserved(3 * vk->scene.generic_models.len);
for (U64 i = 0; i < vk->scene.generic_models.len; i++) {
GenericModelOnSceneMem* model = &vk->scene.generic_models.buf[i];
memcpy(MargaretMAIterator_get_mapped(model->staging_diffuse_tex_buf), model->pixels_diffuse.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&model->pixels_diffuse));
memcpy(MargaretMAIterator_get_mapped(model->staging_normal_tex_buf), model->pixels_normal.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&model->pixels_normal));
memcpy(MargaretMAIterator_get_mapped(model->staging_specular_tex_buf), model->pixels_specular.pixels.buf,
TextureDataR8_get_size_in_bytes(&model->pixels_specular));
VecCommandForImageCopying_append(&init, (CommandForImageCopying){
.staging_buffer = model->staging_diffuse_tex_buf, .image = model->diffuse_texture });
VecCommandForImageCopying_append(&init, (CommandForImageCopying){
.staging_buffer = model->staging_normal_tex_buf, .image = model->normal_texture });
VecCommandForImageCopying_append(&init, (CommandForImageCopying){
.staging_buffer = model->staging_specular_tex_buf, .image = model->specular_texture });
}
vk->IT1_framebuffer = create_IT1_framebuffer(vk->device,
vk->IT1_view, vk->zbuffer_view, vk->render_pass_0, MAX_WIN_WIDTH, MAX_WIN_HEIGHT);
// Right now I only have one light source
VecPipeline0PointLight_append(&vk->scene.point_lights, (Pipeline0PointLight){.pos = {0}, .color = {100, 100, 100}});
// todo: create descripto sets for generic model textures
// todo: and then fill it up (using writes) after each defarmentatoon
// todo: right now there are no defaragmentations, but I will create a function for this stuff later
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = &vk->device_generic_models_top_and_tex.buf[i];
M->p_0a_set_0 = margaret_allocate_descriptor_set(
vk->device, vk->descriptor_pool, vk->pipeline_hands_0a.descriptor_set_layout);
copying_buffer_to_image_color_aspect_record_cmd_buf(vk->transfer_command_buf,
VecCommandForImageCopying_to_span(&init), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
VecCommandForImageCopying_drop(init);
}
vk->descriptor_set_for_pipeline_0b = margaret_allocate_descriptor_set(
vk->device, vk->descriptor_pool, vk->pipeline_hands_0b.descriptor_set_layout);
vk->descriptor_set_for_pipeline_1 = margaret_allocate_descriptor_set(
vk->device, vk->descriptor_pool, vk->pipeline_hands_1.descriptor_set_layout);
margaret_end_command_buffer(vk->transfer_command_buf);
check(vkQueueSubmit(vk->queues.graphics_queue, 1, &(VkSubmitInfo){
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, .commandBufferCount = 1, .pCommandBuffers = &vk->transfer_command_buf,
}, NULL) == VK_SUCCESS);
check(vkWaitForFences(vk->device, 1, &vk->jane.roxy, VK_TRUE, UINT64_MAX));
// Configuring my descriptor sets, that I just allocated
for (size_t i = 0; i < vk->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = &vk->device_generic_models_top_and_tex.buf[i];
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0a = {
.buffer = vk->device_lighting_ubo.buffer,
.offset = 0,
.range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_1_in_set_0a = {
.sampler = vk->linear_sampler,
.imageView = M->diffuse_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorImageInfo image_info_for_descriptor_2_in_set_0a = {
.sampler = vk->nearest_sampler,
.imageView = M->normal_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorImageInfo image_info_for_descriptor_3_in_set_0a = {
.sampler = vk->nearest_sampler,
.imageView = M->specular_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet writes_in_descriptor_set[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info_for_descriptor_0_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_1_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 2,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_2_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 3,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_3_in_set_0a,
},
};
vkUpdateDescriptorSets(vk->device, ARRAY_SIZE(writes_in_descriptor_set), writes_in_descriptor_set, 0, NULL);
}
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0b = {
.buffer = vk->device_lighting_ubo.buffer,
.offset = 0,
.range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_0_in_set_1 = {
.sampler = vk->nearest_sampler,
.imageView = vk->IT1_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet writes_in_descriptor_sets[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk->descriptor_set_for_pipeline_0b,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info_for_descriptor_0_in_set_0b,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk->descriptor_set_for_pipeline_1,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_0_in_set_1,
},
};
vkUpdateDescriptorSets(vk->device, ARRAY_SIZE(writes_in_descriptor_sets), writes_in_descriptor_sets, 0, NULL);
recreate_vulkan_references_objects(&state);
state.prev_key_frame_time = margaret_clock_gettime_monotonic_raw();
state.frame_count_since_key = 0;
@ -2093,33 +2123,11 @@ int main() {
vkDestroySampler(vk->device, vk->linear_sampler, NULL);
vkDestroySampler(vk->device, vk->nearest_sampler, NULL);
// vkDestroyImageView(vk->device, vk->cyl_1_normal_texture_view, NULL);
// vkDestroyImageView(vk->device, vk->cyl_1_diffuse_texture_view, NULL);
vkDestroyFramebuffer(vk->device, vk->IT1_framebuffer, NULL);
vkDestroyImageView(vk->device, vk->IT1_view, NULL);
vkDestroyImageView(vk->device, vk->zbuffer_view, NULL);
Scene_drop(vk->scene);
vkDestroyCommandPool(vk->device, vk->command_pool, NULL);
vkUnmapMemory(vk->device, vk->host_mem);
vkFreeMemory(vk->device, vk->host_mem, NULL);
vkFreeMemory(vk->device, vk->device_mem, NULL);
// todo: delete all the crap
// vkDestroyImage(vk->device, vk->device_cyl_1_diffuse_texture.image, NULL);
// vkDestroyImage(vk->device, vk->device_cyl_1_normal_texture.image, NULL);
vkDestroyImage(vk->device, vk->device_IT1_image.image, NULL);
vkDestroyImage(vk->device, vk->device_zbuffer_image.image, NULL);
vkDestroyBuffer(vk->device, vk->device_lighting_ubo.buffer, NULL);
vkDestroyBuffer(vk->device, vk->device_instance_attrs_for_models.buffer, NULL);
vkDestroyBuffer(vk->device, vk->host_mem_buffer.buffer, NULL);
// TextureDataR8G8B8A8_drop(vk->cyl_1_normal_tex);
// TextureDataR8G8B8A8_drop(vk->cyl_1_diffuse_tex);
SceneTemplate_drop(vk->scene_template);
MargaretSwapchainBundle_drop_with_device(vk->device, vk->swfb);

View File

@ -7,18 +7,26 @@
#include "../../margaret/vulkan_memory_claire.h"
typedef struct {
MargaretMAIterator staging_vbo;
MargaretMAIterator staging_ebo;
MargaretMAIterator vbo;
MargaretMAIterator ebo;
size_t indexes;
MargaretMAIterator staging_instance_attr_buf;
MargaretMAIterator instance_attr_buf;
U64 instance_vec_len;
U64 instance_vec_capacity;
// todo: replace TextureDataXXX with MargaretPngPromises
MargaretMAIterator staging_vbo;
MargaretMAIterator staging_ebo;
MargaretMAIterator staging_instance_attr_buf;
TextureDataR8G8B8A8 pixels_diffuse;
TextureDataR8G8B8A8 pixels_normal;
TextureDataR8 pixels_specular;
MargaretMAIterator staging_diffuse_tex_buf;
MargaretMAIterator staging_normal_tex_buf;
MargaretMAIterator staging_specular_tex_buf;
MargaretMAIterator vbo;
MargaretMAIterator ebo;
MargaretMAIterator instance_attr_buf;
MargaretMAIterator diffuse_texture;
MargaretMAIterator normal_texture;
MargaretMAIterator specular_texture;
@ -27,15 +35,17 @@ typedef struct {
#include "../../../../gen/l1/eve/r0/VecGenericModelOnSceneMem.h"
typedef struct {
MargaretMAIterator vbo;
MargaretMAIterator ebo;
MargaretMAIterator staging_vbo;
MargaretMAIterator staging_ebo;
size_t indexes;
MargaretMAIterator instance_attr_buf;
MargaretMAIterator staging_instance_attr_buf;
U64 instance_vec_capacity;
U64 instance_vec_len;
MargaretMAIterator staging_vbo;
MargaretMAIterator staging_ebo;
MargaretMAIterator staging_instance_attr_buf;
MargaretMAIterator vbo;
MargaretMAIterator ebo;
MargaretMAIterator instance_attr_buf;
} ShinyModelOnSceneMem;
#include "../../../../gen/l1/eve/r0/VecShinyModelOnSceneMem.h"
@ -112,7 +122,8 @@ Scene Scene_new(VecGenericModelOnSceneMem generic_models, VecShinyModelOnSceneMe
return (Scene){.generic_models = generic_models, .shiny_models = shiny_models,
.color = {.float32 = {0, 0, 0, 1}},
.gamma_correction_factor = 2.2f, .hdr_factor = 1, .lsd_factor = 0, .anim_time = 0,
.pipeline0_staging_ubo = pipeline0_staging_ubo, .pipeline0_ubo = pipeline0_ubo
.pipeline0_staging_ubo = pipeline0_staging_ubo, .pipeline0_ubo = pipeline0_ubo,
.cam = CamControlInfo_new(), .funny_vector = {0, 0, 0}
};
}
@ -121,13 +132,13 @@ void Scene_drop(Scene self) {
VecShinyModelOnSceneMem_drop(self.shiny_models);
}
/* Does not reset, does not begin, does not end command_buffer */
void SceneTemplate_copy_initial_model_topology_and_record_transfer_cmd(
const SceneTemplate* scene_template, const Scene* scene, VkCommandBuffer command_buffer ) {
/* No buffer rerecording, no buffer beginning, no buffer ending */
void SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
const SceneTemplate* scene_template, const Scene* scene, VkCommandBuffer command_buffer) {
assert(scene_template->generic_models.len == scene->generic_models.len);
assert(scene_template->shiny_models.len == scene->shiny_models.len);
assert(scene_template->generic_models.len == scene->generic_models.len);
for (size_t mi = 0; mi < scene_template->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
@ -147,11 +158,29 @@ void SceneTemplate_copy_initial_model_topology_and_record_transfer_cmd(
}
for (size_t mi = 0; mi < scene_template->shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* mt = VecShinyMeshInSceneTemplate_at(&scene_template->shiny_models, mi);
const ShinyMeshTopology* mt = VecShinyMeshTopology_at(&scene_template->shiny_models, mi);
const ShinyModelOnSceneMem *mm = VecShinyModelOnSceneMem_at(&scene->shiny_models, mi);
size_t vbo_len = mt->topology.vertices.len * sizeof(ShinyMeshVertex);
size_t vbo_len = mt->vertices.len * sizeof(ShinyMeshVertex);
ShinyMeshVertex* staging_vbo = (ShinyMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
memcpy(staging_vbo, mt->vertices.buf, vbo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
assert(mt->indexes.len == mm->indexes);
size_t ebo_len = mt->indexes.len * sizeof(U32);
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
memcpy(staging_ebo, mt->indexes.buf, ebo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
}
for (size_t mi = 0; mi < scene_template->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
memcpy(staging_vbo, mt->topology.vertices.buf, vbo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
@ -163,6 +192,8 @@ void SceneTemplate_copy_initial_model_topology_and_record_transfer_cmd(
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
}
margaret_end_command_buffer(command_buffer);
}
#endif