Wrote r0 (now it uses texture set per generic model)

This commit is contained in:
Андреев Григорий 2025-09-27 02:05:54 +03:00
parent aed4035806
commit f2f5ef5340
15 changed files with 421 additions and 334 deletions

View File

@ -27,9 +27,9 @@ add_compile_definitions(_POSIX_C_SOURCE=200112L)
add_compile_definitions(_GNU_SOURCE)
add_compile_options(-fno-trapping-math)
add_executable(codegen_l1 src/l1/anne/codegen.c)
target_compile_definitions(codegen_l1
PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
#add_executable(codegen_l1 src/l1/anne/codegen.c)1
#target_compile_definitions(codegen_l1
# PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
#add_executable(0_test src/l1_4/tests/t0.c)
#add_executable(1_test src/l1_4/tests/t1.c)
@ -37,10 +37,9 @@ target_compile_definitions(codegen_l1
#add_executable(l1_4_t2 src/l1_4/tests/t2.c)
#add_executable(codegen_l1_5 src/l1_5/anne/codegen.c)
#
#add_executable(0_render_test src/l2/tests/r0/r0.c gen/l_wl_protocols/xdg-shell-private.c
# src/l1/core/rb_tree_node.h)
#target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -lpng)
add_executable(0_render_test src/l2/tests/r0/r0.c gen/l_wl_protocols/xdg-shell-private.c)
target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -lpng)
add_executable(0r_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
target_link_libraries(0r_tex_init_prep -lm -lpng)
@ -58,4 +57,4 @@ target_link_libraries(0r_tex_init_prep -lm -lpng)
#target_link_libraries(0_play_test -lncurses)
#
add_executable(l2t0 src/l2/tests/data_structures/t0.c)
#add_executable(l2t0 src/l2/tests/data_structures/t0.c)

View File

@ -21,6 +21,7 @@ void generate_margaret_eve_for_vulkan_utils() {
.T = cstr("PtrMargaretImageInMemoryInfo"), .t_primitive = true, .vec = true, .span = true, .mut_span = true,
.collab_vec_span = true
});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretCommandForImageCopying"), true, true);
}

View File

@ -41,6 +41,7 @@ void generate_util_templ_inst_for_vulkan_headers() {
generate_guarded_span_company_for_primitive(l, ns, cstr("VkSemaphore"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkDescriptorPoolSize"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkBufferCopy"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkImageMemoryBarrier"), vulkan_dep, true, false);
}
#endif

View File

@ -1027,6 +1027,11 @@ MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_srgba(uint
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_8(uint32_t w, uint32_t h){
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8_UNORM,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_32(uint32_t w, uint32_t h) {
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_UNORM,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
@ -1103,98 +1108,107 @@ void margaret_copy_buffer_imm (
margaret_end_and_submit_and_free_command_buffer(device, command_pool, graphics_queue, cmd_buffer);
}
// todo: get rid of this crap. I can do better
// For application initialization purposes only
void transition_image_layout (
VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue,
VkImage image, VkImageLayout old_layout, VkImageLayout new_layout,
VkPipelineStageFlags src_stage_mask, VkAccessFlags src_access_mask,
VkPipelineStageFlags dst_stage_mask, VkAccessFlags dst_access_mask
) {
VkCommandBuffer cmd_buffer = margaret_alloc_and_begin_single_use_command_buffer(device, command_pool);
VkImageMemoryBarrier barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = src_access_mask,
.dstAccessMask = dst_access_mask,
.oldLayout = old_layout,
.newLayout = new_layout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vkCmdPipelineBarrier(cmd_buffer, src_stage_mask, dst_stage_mask,
// Flags
0,
0, NULL,
0, NULL,
1, &barrier
typedef struct {
size_t host_mem_buff_offset;
const MargaretImageInMemoryInfo* dst_image;
} MargaretCommandForImageCopying;
#include "../../../gen/l1/eve/margaret/VecAndSpan_MargaretCommandForImageCopying.h"
#include "../../../gen/l1/vulkan/VecVkImageMemoryBarrier.h"
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void margaret_rerecord_cmd_buff_for_texture_init (
VkCommandBuffer command_buffer, VkBuffer host_mem_buffer,
SpanMargaretCommandForImageCopying commands,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask
){
if (vkResetCommandBuffer(command_buffer, 0) != VK_SUCCESS)
abortf("vkResetCommandBuffer\n");
VkCommandBufferBeginInfo begin_info = {.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,};
if (vkBeginCommandBuffer(command_buffer, &begin_info) != VK_SUCCESS)
abortf("vkBeginCommandBuffer\n");
VecVkImageMemoryBarrier barriers = VecVkImageMemoryBarrier_new_reserved(commands.len);
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = img.dst_image->image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, /* Flags */
0, NULL, 0, NULL,
barriers.len, barriers.buf);
barriers.len = 0; /* It's ok, VkImageMemoryBarrier is primitive */
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VkBufferImageCopy region = {
.bufferOffset = img.host_mem_buff_offset,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = {
.width = img.dst_image->width,
.height = img.dst_image->height,
.depth = 1
},
};
vkCmdCopyBufferToImage(command_buffer, host_mem_buffer, img.dst_image->image,
// We assume that image was already transitioned to optimal layout transition_image_layout
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
/* filling buffers Vec again */
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = img.dst_image->image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0, /* Flags */
0, NULL, 0, NULL,
barriers.len, barriers.buf
);
margaret_end_and_submit_and_free_command_buffer(device, command_pool, graphics_queue, cmd_buffer);
VecVkImageMemoryBarrier_drop(barriers);
if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS)
abortf("vkEndCommandBuffer");
}
// For application initialization purposes only
void margaret_copy_buffer_to_trans_dst_optimal_image (
VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue,
const MargaretImageInMemoryInfo* dst_image, VkBuffer src_buffer
) {
VkCommandBuffer cmd_buffer = margaret_alloc_and_begin_single_use_command_buffer(device, command_pool);
VkBufferImageCopy region = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = {
.width = dst_image->width,
.height = dst_image->height,
.depth = 1
},
};
vkCmdCopyBufferToImage(cmd_buffer, src_buffer, dst_image->image,
// We assume that image was already transitioned to optimal layout transition_image_layout
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
margaret_end_and_submit_and_free_command_buffer(device, command_pool, graphics_queue, cmd_buffer);
}
// todo: AHFHDF EW WHAT IS THAT???
// For application initialization purposes only
void margaret_copy_buffer_to_texture_for_frag_shader_imm(
VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue,
const MargaretImageInMemoryInfo* dst_image, VkBuffer src_buffer
) {
transition_image_layout(device, command_pool, graphics_queue, dst_image->image,
// previous and new layouts
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
// src stage and access
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0,
// destination stage and access
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT
);
margaret_copy_buffer_to_trans_dst_optimal_image(device, command_pool, graphics_queue, dst_image, src_buffer);
transition_image_layout(device, command_pool, graphics_queue, dst_image->image,
// previous and new layouts
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
// src stage and access
VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
// destination stage and access
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT
);
}
// todo: cjafhs WHAT IS THIS?? I need to remove this. I can do better than this
// For texture
VkImageView margaret_create_view_for_image (
VkDevice device, const MargaretImageInMemoryInfo* image, VkImageAspectFlags aspect_flags

View File

@ -10,7 +10,7 @@
#include "../../../../gen/l_wl_protocols/xdg-shell-client.h"
#include <xkbcommon/xkbcommon.h>
#include "../../../l1/system/creating_child_proc.h"
#include "../../margaret/png_pixel_masses.h"
#include "../../../../gen/l1/margaret/png_pixel_masses.h"
// todo: generate this structure in l2
typedef struct {
@ -787,12 +787,73 @@ void record_cmd_set_viewport_and_scissors(VkCommandBuffer command_buffer, VkExte
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
typedef struct {
VkSemaphore in_frame_transfer_complete;
VkSemaphore image_available_semaphore;
VkSemaphore rendered_to_IT1_semaphore;
VkFence in_flight_fence;
} Jane_r0;
NODISCARD Jane_r0 Jane_r0_create(VkDevice device) {
return (Jane_r0){
.in_frame_transfer_complete = margaret_create_semaphore(device),
.image_available_semaphore = margaret_create_semaphore(device),
.rendered_to_IT1_semaphore = margaret_create_semaphore(device),
.in_flight_fence = margaret_create_fence(device, true)
};
}
void Jane_r0_destroy(VkDevice device, Jane_r0 jane) {
vkDestroyFence(device, jane.in_flight_fence, NULL);
vkDestroySemaphore(device, jane.rendered_to_IT1_semaphore, NULL);
vkDestroySemaphore(device, jane.image_available_semaphore, NULL);
vkDestroySemaphore(device, jane.in_frame_transfer_complete, NULL);
}
// todo: handle case where presentation and graphics are from the same family
typedef struct {
VkQueue graphics_queue;
VkQueue presentation_queue;
} UsedVulkanQueues;
typedef struct {
MargaretBufferInMemoryInfo vbo;
MargaretBufferInMemoryInfo ebo;
/* We store image in yet another meaningless buffer (will change it later) */
TextureDataR8G8B8A8 reading_diffuse;
TextureDataR8G8B8A8 reading_normal;
TextureDataR8 reading_specular;
/* Filled during first (and the only) memory init */
MargaretImageInMemoryInfo diffuse;
MargaretImageInMemoryInfo normal;
MargaretImageInMemoryInfo specular;
/* will be filled in later */
VkImageView diffuse_view;
VkImageView normal_view;
VkImageView specular_view;
/* Each generic model has ti's own descriptor set
* It's because it has it's own textures. But it also has copies of references to light UBO
* Because I am to lazy to create two set layouts for generic model pipeline */
VkDescriptorSet p_0a_set_0;
} GenericModelTopAndTexInMemoryInfo;
#include "../../../../gen/l1/eve/r0/VecGenericModelTopAndTexInMemoryInfo.h"
typedef struct {
MargaretBufferInMemoryInfo vbo;
MargaretBufferInMemoryInfo ebo;
} ShinyModelTopInMemoryInfo;
#include "../../../../gen/l1/eve/r0/VecShinyModelTopInMemoryInfo.h"
void reset_and_record_command_buffer_0(
VkCommandBuffer command_buffer, VkRenderPass render_pass_0,
const PipelineHands* pipeline_and_layout_0a, const PipelineHands* pipeline_and_layout_0b,
VkFramebuffer result_framebuffer, VkExtent2D image_extent,
const Scene* scene,
VkDescriptorSet descriptor_set_for_pipeline_0a, VkDescriptorSet descriptor_set_for_pipeline_0b,
const Scene* scene, const VecGenericModelTopAndTexInMemoryInfo* generic_models,
VkDescriptorSet descriptor_set_for_pipeline_0b,
mat4 proj_cam_t, vec3 camera_pos
) {
if (vkResetCommandBuffer(command_buffer, 0) != VK_SUCCESS)
@ -813,6 +874,7 @@ void reset_and_record_command_buffer_0(
};
vkCmdBeginRenderPass(command_buffer, &renderpass_begin, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0a->pipeline);
// We forgot that viewport is not built into our pipeline
// We forgot that scissors are not built into out pipeline
@ -831,7 +893,7 @@ void reset_and_record_command_buffer_0(
vkCmdBindIndexBuffer(command_buffer, model->model.ebo, 0, VK_INDEX_TYPE_UINT32);
vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0a->pipeline_layout, 0,
1, &descriptor_set_for_pipeline_0a, 0, NULL);
1, &VecGenericModelTopAndTexInMemoryInfo_at(generic_models, i)->p_0a_set_0, 0, NULL);
vkCmdDrawIndexed(command_buffer, model->model.indexes, model->instances.len, 0, 0, 0);
}
@ -841,6 +903,9 @@ void reset_and_record_command_buffer_0(
0, sizeof(mat4), &proj_cam_t);
vkCmdPushConstants(command_buffer, pipeline_and_layout_0b->pipeline_layout, VK_SHADER_STAGE_FRAGMENT_BIT,
sizeof(mat4), sizeof(vec3), &camera_pos);
vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0b->pipeline_layout, 0,
1, &descriptor_set_for_pipeline_0b, 0, NULL);
for (size_t i = 0; i < scene->shiny_models.len; i++) {
const UsedShinyModelOnScene* model = VecUsedShinyModelOnScene_at(&scene->shiny_models, i);
VkBuffer attached_buffers[2] = { model->model.vbo, model->model.instance_attr_buf };
@ -849,9 +914,6 @@ void reset_and_record_command_buffer_0(
assert(ARRAY_SIZE(attached_buffers) == 2 && ARRAY_SIZE(offsets_in_buffers) == 2);
vkCmdBindVertexBuffers(command_buffer, 0, 2, attached_buffers, offsets_in_buffers);
vkCmdBindIndexBuffer(command_buffer, model->model.ebo, 0, VK_INDEX_TYPE_UINT32);
vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0b->pipeline_layout, 0,
1, &descriptor_set_for_pipeline_0b, 0, NULL);
vkCmdDrawIndexed(command_buffer, model->model.indexes, model->instances.len, 0, 0, 0);
}
@ -990,60 +1052,7 @@ void copy_scene_info_to_buffer_and_rerecord_full_copy_command_buffer(
abortf("vkEndCommandBuffer");
}
typedef struct {
VkSemaphore in_frame_transfer_complete;
VkSemaphore image_available_semaphore;
VkSemaphore rendered_to_IT1_semaphore;
VkFence in_flight_fence;
} Jane_r0;
NODISCARD Jane_r0 Jane_r0_create(VkDevice device) {
return (Jane_r0){
.in_frame_transfer_complete = margaret_create_semaphore(device),
.image_available_semaphore = margaret_create_semaphore(device),
.rendered_to_IT1_semaphore = margaret_create_semaphore(device),
.in_flight_fence = margaret_create_fence(device, true)
};
}
void Jane_r0_destroy(VkDevice device, Jane_r0 jane) {
vkDestroyFence(device, jane.in_flight_fence, NULL);
vkDestroySemaphore(device, jane.rendered_to_IT1_semaphore, NULL);
vkDestroySemaphore(device, jane.image_available_semaphore, NULL);
vkDestroySemaphore(device, jane.in_frame_transfer_complete, NULL);
}
// todo: handle case where presentation and graphics are from the same family
typedef struct {
VkQueue graphics_queue;
VkQueue presentation_queue;
} UsedVulkanQueues;
typedef struct {
MargaretBufferInMemoryInfo vbo;
MargaretBufferInMemoryInfo ebo;
/* We store image in yet another meaningless buffer (will change it later) */
TextureDataR8G8B8A8 reading_diffuse;
TextureDataR8G8B8A8 reading_normal;
TextureDataR8 reading_specular;
/* Filled during first (and the only) memory init */
MargaretImageInMemoryInfo diffuse;
MargaretImageInMemoryInfo normal;
MargaretImageInMemoryInfo specular;
/* will be filled in later */
VkImageView diffuse_view;
VkImageView normal_view;
VkImageView specular_view;
} GenericModelTopAndTexInMemoryInfo;
#include "../../../../gen/l1/eve/r0/VecGenericModelTopAndTexInMemoryInfo.h"
typedef struct {
MargaretBufferInMemoryInfo vbo;
MargaretBufferInMemoryInfo ebo;
} ShinyModelTopInMemoryInfo;
#include "../../../../gen/l1/eve/r0/VecShinyModelTopInMemoryInfo.h"
typedef struct {
MargaretInstanceAndItsDebug instance_and_debug;
@ -1086,12 +1095,12 @@ typedef struct {
VkImageView zbuffer_view;
VkImageView IT1_view;
VkFramebuffer IT1_framebuffer;
VkImageView cyl_1_diffuse_texture_view;
VkImageView cyl_1_normal_texture_view;
VkSampler linear_sampler;
VkSampler nearest_sampler;
VkDescriptorPool descriptor_pool;
VkDescriptorSet descriptor_set_for_pipeline_0a;
/* Descriptor sets */
// Descriptor sets for pipeline_0a are stored in device_generic_models_top_and_tex
VkDescriptorSet descriptor_set_for_pipeline_0b;
VkDescriptorSet descriptor_set_for_pipeline_1;
@ -1258,7 +1267,8 @@ void vulkano_frame_drawing(state_r0* state) {
&state->vk_ctx.pipeline_hands_0a, &state->vk_ctx.pipeline_hands_0b,
state->vk_ctx.IT1_framebuffer, state->vk_ctx.swfb.extent,
&state->vk_ctx.scene,
state->vk_ctx.descriptor_set_for_pipeline_0a, state->vk_ctx.descriptor_set_for_pipeline_0b,
&state->vk_ctx.device_generic_models_top_and_tex, /* Needed just to get descriptor sets for generic models */
state->vk_ctx.descriptor_set_for_pipeline_0b,
t_mat, state->vk_ctx.my_cam_control_info.pos);
reset_and_record_command_buffer_1(state->vk_ctx.rendering_command_buffer_1, state->vk_ctx.render_pass_1,
@ -1784,25 +1794,56 @@ int main() {
vk_ctx->device_generic_models_top_and_tex = VecGenericModelTopAndTexInMemoryInfo_new_reserved(vk_ctx->scene_template.generic_models.len);
for (size_t i = 0; i < vk_ctx->scene_template.generic_models.len; i++) {
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk_ctx->scene_template.generic_models, i);
TextureDataR8G8B8A8 reading_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path));
TextureDataR8G8B8A8 reading_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->normal_texture_path));
TextureDataR8 reading_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path));
VecGenericModelTopAndTexInMemoryInfo_append(&vk_ctx->device_generic_models_top_and_tex,
(GenericModelTopAndTexInMemoryInfo){
.vbo = GenericMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
.ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
.reading_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path)),
.reading_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path)),
// todo: continue from here. But first - create TextureDataR8 reading function
.reading_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&M->diffuse_texture_path)),
.reading_diffuse = reading_diffuse, .reading_normal = reading_normal, .reading_specular = reading_specular,
.diffuse = margaret_prep_image_mem_info_of_gpu_texture_srgba(reading_diffuse.width, reading_diffuse.height),
.normal = margaret_prep_image_mem_info_of_gpu_texture_unorm_32(reading_normal.width, reading_normal.height),
.specular = margaret_prep_image_mem_info_of_gpu_texture_unorm_8(reading_specular.width, reading_specular.height),
/* image views will be created after the images are allocated */
/* descriptor set for each model will be allocated later */
});
}
// todo: kill myself (update: still WiP (update: it's became growing technical debt point now)
VecU64 offset_of_image_in_host_mem_buff_during_init = VecU64_new_zeroinit(vk_ctx->device_generic_models_top_and_tex.len * 3);
U64 grand_total_texture_size_in_host_mem = 0;
{
U64 offset = 0;
for (size_t i = 0; i < vk_ctx->device_generic_models_top_and_tex.len; i++) {
offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 0] = offset;
offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk_ctx->device_generic_models_top_and_tex.buf[i].reading_diffuse);
offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 1] = offset;
offset += TextureDataR8G8B8A8_get_size_in_bytes(&vk_ctx->device_generic_models_top_and_tex.buf[i].reading_normal);
offset_of_image_in_host_mem_buff_during_init.buf[3 * i + 2] = offset;
offset += TextureDataR8_get_size_in_bytes(&vk_ctx->device_generic_models_top_and_tex.buf[i].reading_specular);
}
grand_total_texture_size_in_host_mem = offset;
}
vk_ctx->device_shiny_models_top = VecShinyModelTopInMemoryInfo_new_reserved(vk_ctx->scene_template.shiny_models.len);
for (size_t i = 0; i < vk_ctx->scene_template.shiny_models.len; i++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk_ctx->scene_template.shiny_models, i);
VecShinyModelTopInMemoryInfo_append(&vk_ctx->device_shiny_models_top,
(ShinyModelTopInMemoryInfo){
.vbo = ShinyMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len),
.ebo = margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len),
});
}
// todo: kill myself (update: still WiP (update: it became a growing technical debt now))
// todo: commit suicide
// We have only one staging buffer in host memory (because we don't really need more)
vk_ctx->host_mem_buffer = (MargaretBufferInMemoryInfo){ .sz =
MAX_U64(SceneTemplate_get_space_for_initial_model_topology_transfer(&vk_ctx->scene_template),
MAX_U64(SceneTemplate_get_space_needed_for_widest_state_transfer(&vk_ctx->scene_template),
MAX_U64(TextureDataR8G8B8A8_get_size_in_bytes(&vk_ctx->cyl_1_diffuse_tex),
MAX_U64(TextureDataR8G8B8A8_get_size_in_bytes(&vk_ctx->cyl_1_normal_tex), 0))))
MAX_U64(grand_total_texture_size_in_host_mem, 0)))
, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT };
PtrMargaretBufferInMemoryInfo host_mem_buffer_SPAN[1] = {&vk_ctx->host_mem_buffer};
vk_ctx->host_mem = margaret_initialize_buffers_and_images(vk_ctx->physical_device, vk_ctx->device,
@ -1810,50 +1851,15 @@ int main() {
(MutSpanPtrMargaretImageInMemoryInfo){ 0 },
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
// todo: split this in two (or maybe even better: merge it all into one/two buffer and use offsets
vk_ctx->device_ebo_buffers_for_generic_meshes = VecMargaretBufferInMemoryInfo_new();
vk_ctx->device_vbo_buffers_for_generic_meshes = VecMargaretBufferInMemoryInfo_new();
for (size_t mi = 0; mi < vk_ctx->scene_template.generic_models.len; mi++) {
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk_ctx->scene_template.generic_models, mi);
VecMargaretBufferInMemoryInfo_append(&vk_ctx->device_vbo_buffers_for_generic_meshes,
GenericMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len));
VecMargaretBufferInMemoryInfo_append(&vk_ctx->device_ebo_buffers_for_generic_meshes,
margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len));
}
vk_ctx->device_ebo_buffers_for_shiny_meshes = VecMargaretBufferInMemoryInfo_new();
vk_ctx->device_vbo_buffers_for_shiny_meshes = VecMargaretBufferInMemoryInfo_new();
for (size_t mi = 0; mi < vk_ctx->scene_template.shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk_ctx->scene_template.shiny_models, mi);
VecMargaretBufferInMemoryInfo_append(&vk_ctx->device_vbo_buffers_for_shiny_meshes,
ShinyMeshVertex_buffer_crinfo_of_gpu_vbo(M->topology.vertices.len));
VecMargaretBufferInMemoryInfo_append(&vk_ctx->device_ebo_buffers_for_shiny_meshes,
margaret_prep_buffer_mem_info_of_gpu_ebo(M->topology.indexes.len));
}
vk_ctx->device_lighting_ubo = margaret_prep_buffer_mem_info_of_gpu_ubo(sizeof(Pipeline0UBO));
vk_ctx->device_instance_attrs_for_models = (MargaretBufferInMemoryInfo){
.sz = SceneTemplate_get_space_needed_for_all_instance_attributes(&vk_ctx->scene_template),
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
};
VecPtrMargaretBufferInMemoryInfo device_mem_buffers_SPAN = VecPtrMargaretBufferInMemoryInfo_new();
// todo: add iteration macro generation
for (size_t i = 0; i < vk_ctx->device_ebo_buffers_for_generic_meshes.len; i++) {
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN,
VecMargaretBufferInMemoryInfo_mat(&vk_ctx->device_ebo_buffers_for_generic_meshes, i));
}
for (size_t i = 0; i < vk_ctx->device_vbo_buffers_for_generic_meshes.len; i++) {
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN,
VecMargaretBufferInMemoryInfo_mat(&vk_ctx->device_vbo_buffers_for_generic_meshes, i));
}
for (size_t i = 0; i < vk_ctx->device_ebo_buffers_for_shiny_meshes.len; i++) {
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN,
VecMargaretBufferInMemoryInfo_mat(&vk_ctx->device_ebo_buffers_for_shiny_meshes, i));
}
for (size_t i = 0; i < vk_ctx->device_vbo_buffers_for_shiny_meshes.len; i++) {
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN,
VecMargaretBufferInMemoryInfo_mat(&vk_ctx->device_vbo_buffers_for_shiny_meshes, i));
}
VecPtrMargaretBufferInMemoryInfo device_mem_buffers_SPAN = VecPtrMargaretBufferInMemoryInfo_new_reserved(
vk_ctx->device_generic_models_top_and_tex.len + vk_ctx->device_shiny_models_top.len);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk_ctx->device_lighting_ubo);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &vk_ctx->device_instance_attrs_for_models);
@ -1861,21 +1867,35 @@ int main() {
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, IT1_format.some);
vk_ctx->device_zbuffer_image = margaret_prep_image_mem_info_of_zbuffer(
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, zbuffer_format.some);
vk_ctx->device_cyl_1_diffuse_texture = margaret_prep_image_mem_info_of_gpu_texture_srgba(
vk_ctx->cyl_1_diffuse_tex.width, &vk_ctx->cyl_1_diffuse_tex.height);
vk_ctx->device_cyl_1_normal_texture = margaret_prep_image_mem_info_of_gpu_texture_unorm_32(
vk_ctx->cyl_1_normal_tex.width, &vk_ctx->cyl_1_normal_tex.height);
PtrMargaretImageInMemoryInfo device_mem_images_SPAN[] = {
&vk_ctx->device_IT1_image, &vk_ctx->device_zbuffer_image, &vk_ctx->device_cyl_1_diffuse_texture,
&vk_ctx->device_cyl_1_normal_texture
};
VecPtrMargaretImageInMemoryInfo device_mem_images_SPAN =
VecPtrMargaretImageInMemoryInfo_new_reserved(2 + 3 * vk_ctx->scene_template.generic_models.len);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk_ctx->device_IT1_image);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &vk_ctx->device_zbuffer_image);
for (size_t i = 0; i < vk_ctx->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = &vk_ctx->device_generic_models_top_and_tex.buf[i];
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->diffuse);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->normal);
VecPtrMargaretImageInMemoryInfo_append(&device_mem_images_SPAN, &M->specular);
}
for (size_t i = 0; i < vk_ctx->device_shiny_models_top.len; i++) {
ShinyModelTopInMemoryInfo* M = &vk_ctx->device_shiny_models_top.buf[i];
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->vbo);
VecPtrMargaretBufferInMemoryInfo_append(&device_mem_buffers_SPAN, &M->ebo);
}
vk_ctx->device_mem = margaret_initialize_buffers_and_images(vk_ctx->physical_device, vk_ctx->device,
VecPtrMargaretBufferInMemoryInfo_to_mspan(&device_mem_buffers_SPAN),
(MutSpanPtrMargaretImageInMemoryInfo){ .data = device_mem_images_SPAN, .len = ARRAY_SIZE(device_mem_images_SPAN) },
VecPtrMargaretImageInMemoryInfo_to_mspan(&device_mem_images_SPAN),
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
/* device_mem_buffers_SPAN invalidated */
/* device_mem_buffers_SPAN, device_mem_images_SPAN invalidated */
VecPtrMargaretBufferInMemoryInfo_drop(device_mem_buffers_SPAN);
VecPtrMargaretImageInMemoryInfo_drop(device_mem_images_SPAN);
vk_ctx->command_pool = margaret_create_resettable_command_pool(vk_ctx->device, vk_ctx->queue_fam.for_graphics);
vk_ctx->rendering_command_buffer_0 = margaret_allocate_command_buffer(vk_ctx->device, vk_ctx->command_pool);
@ -1890,16 +1910,17 @@ int main() {
size_t offset_in_attr_buffer = 0;
for (size_t mi = 0; mi < vk_ctx->scene_template.generic_models.len; mi++) {
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&vk_ctx->scene_template.generic_models, mi);
const GenericModelTopAndTexInMemoryInfo* MM = VecGenericModelTopAndTexInMemoryInfo_at(&vk_ctx->device_generic_models_top_and_tex, mi);
VecUsedGenericModelOnScene_append(&vk_ctx->scene.generic_models, (UsedGenericModelOnScene){
.model = (ModelOnSceneMem){
.vbo = VecMargaretBufferInMemoryInfo_at(
&vk_ctx->device_vbo_buffers_for_generic_meshes, mi)->buffer,
.ebo = VecMargaretBufferInMemoryInfo_at(
&vk_ctx->device_ebo_buffers_for_generic_meshes, mi)->buffer,
.model = (GenericModelOnSceneMem){
.vbo = MM->vbo.buffer,
.ebo = MM->ebo.buffer,
.indexes = M->topology.indexes.len,
.instance_attr_buf = vk_ctx->device_instance_attrs_for_models.buffer,
.instance_attr_buf_offset = offset_in_attr_buffer,
.limit_max_instance_count = M->max_instance_count
// todo: remove vbo, ebo from here (we don't need it here).
// as you may see, I didn't specifid images (that's becasuse I fpn need to)
},
.instances = VecGenericMeshInstance_new(),
});
@ -1907,12 +1928,11 @@ int main() {
}
for (size_t mi = 0; mi < vk_ctx->scene_template.shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&vk_ctx->scene_template.shiny_models, mi);
const ShinyModelTopInMemoryInfo* MM = VecShinyModelTopInMemoryInfo_at(&vk_ctx->device_shiny_models_top, mi);
VecUsedShinyModelOnScene_append(&vk_ctx->scene.shiny_models, (UsedShinyModelOnScene){
.model = (ModelOnSceneMem){
.vbo = VecMargaretBufferInMemoryInfo_at(
&vk_ctx->device_vbo_buffers_for_shiny_meshes, mi)->buffer,
.ebo = VecMargaretBufferInMemoryInfo_at(
&vk_ctx->device_ebo_buffers_for_shiny_meshes, mi)->buffer,
.model = (ShinyModelOnSceneMem){
.vbo = MM->vbo.buffer,
.ebo = MM->ebo.buffer,
.indexes = M->topology.indexes.len,
.instance_attr_buf = vk_ctx->device_instance_attrs_for_models.buffer,
.instance_attr_buf_offset = offset_in_attr_buffer,
@ -1933,6 +1953,13 @@ int main() {
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk_ctx->scene.generic_models, 1)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4(vk_ctx->Buba_control_info)
});
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk_ctx->scene.generic_models, 2)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4((vec3){5, -7, 6})});
VecGenericMeshInstance_append(&VecUsedGenericModelOnScene_mat(&vk_ctx->scene.generic_models, 3)->instances,
(GenericMeshInstance){ .model_t = marie_translation_mat4((vec3){5, -7, -6})});
for (U64 i = 0; i < 5; i++) {
VecShinyMeshInstance_append(&VecUsedShinyModelOnScene_mat(&vk_ctx->scene.shiny_models, 0)->instances,
(ShinyMeshInstance){ .model_t = marie_translation_mat4((vec3){
@ -1947,34 +1974,53 @@ int main() {
if (vkMapMemory(vk_ctx->device, vk_ctx->host_mem, 0, VK_WHOLE_SIZE, 0, &vk_ctx->host_mem_buffer_mem) != VK_SUCCESS)
abortf("vkMapMemory");
SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
{
SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
&vk_ctx->scene_template, &vk_ctx->scene, vk_ctx->host_mem_buffer_mem,
vk_ctx->transfer_command_buffer, vk_ctx->host_mem_buffer.buffer);
{
VkCommandBuffer command_buffers[1] = { vk_ctx->transfer_command_buffer };
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = ARRAY_SIZE(command_buffers),
.pCommandBuffers = command_buffers,
.commandBufferCount = 1, .pCommandBuffers = &vk_ctx->transfer_command_buffer,
};
if (vkQueueSubmit(vk_ctx->queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
abortf("vkQueueSubmit\n");
}
vkDeviceWaitIdle(vk_ctx->device);
{
memcpy(vk_ctx->host_mem_buffer_mem, vk_ctx->cyl_1_diffuse_tex.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&vk_ctx->cyl_1_diffuse_tex));
margaret_copy_buffer_to_texture_for_frag_shader_imm(
vk_ctx->device, vk_ctx->command_pool, vk_ctx->queues.graphics_queue,
&vk_ctx->device_cyl_1_diffuse_texture, vk_ctx->host_mem_buffer.buffer);
}
vkDeviceWaitIdle(vk_ctx->device);
{
memcpy(vk_ctx->host_mem_buffer_mem, vk_ctx->cyl_1_normal_tex.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&vk_ctx->cyl_1_normal_tex));
margaret_copy_buffer_to_texture_for_frag_shader_imm(
vk_ctx->device, vk_ctx->command_pool, vk_ctx->queues.graphics_queue,
&vk_ctx->device_cyl_1_normal_texture, vk_ctx->host_mem_buffer.buffer);
VecMargaretCommandForImageCopying commands =
VecMargaretCommandForImageCopying_new_reserved(vk_ctx->device_generic_models_top_and_tex.len);
for (size_t i = 0; i < vk_ctx->device_generic_models_top_and_tex.len; i++) {
const GenericModelTopAndTexInMemoryInfo* M =
VecGenericModelTopAndTexInMemoryInfo_at(&vk_ctx->device_generic_models_top_and_tex, i);
U64 diffuse_offset = *VecU64_at(&offset_of_image_in_host_mem_buff_during_init, 3 * i + 0);
U64 normal_offset = *VecU64_at(&offset_of_image_in_host_mem_buff_during_init, 3 * i + 1);
U64 specular_offset = *VecU64_at(&offset_of_image_in_host_mem_buff_during_init, 3 * i + 2);
memcpy(vk_ctx->host_mem_buffer_mem + diffuse_offset,
M->reading_diffuse.pixels.buf, TextureDataR8G8B8A8_get_size_in_bytes(&M->reading_diffuse));
memcpy(vk_ctx->host_mem_buffer_mem + normal_offset,
M->reading_normal.pixels.buf, TextureDataR8G8B8A8_get_size_in_bytes(&M->reading_normal));
memcpy(vk_ctx->host_mem_buffer_mem + specular_offset,
M->reading_specular.pixels.buf, TextureDataR8_get_size_in_bytes(&M->reading_specular));
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
.dst_image = &M->diffuse, .host_mem_buff_offset = diffuse_offset});
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
.dst_image = &M->normal, .host_mem_buff_offset = normal_offset});
VecMargaretCommandForImageCopying_append(&commands, (MargaretCommandForImageCopying){
.dst_image = &M->specular, .host_mem_buff_offset = specular_offset});
}
margaret_rerecord_cmd_buff_for_texture_init(vk_ctx->transfer_command_buffer, vk_ctx->host_mem_buffer.buffer,
VecMargaretCommandForImageCopying_to_span(&commands),
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
VecMargaretCommandForImageCopying_drop(commands);
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1, .pCommandBuffers = &vk_ctx->transfer_command_buffer,
};
if (vkQueueSubmit(vk_ctx->queues.graphics_queue, 1, &submit_info, NULL) != VK_SUCCESS)
abortf("vkQueueSubmit\n");
}
vkDeviceWaitIdle(vk_ctx->device);
// We sent everything we needed. but host_mem_buffer_mem may be used later
@ -1985,16 +2031,18 @@ int main() {
/* Here we create an image view into a temporary IT1 texture and a framebuffer for scene rendering */
vk_ctx->IT1_view = margaret_create_view_for_image(vk_ctx->device,
&vk_ctx->device_IT1_image, VK_IMAGE_ASPECT_COLOR_BIT);
/* Busy creating views for all my textures */
for (size_t i = 0; i < vk_ctx->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = VecGenericModelTopAndTexInMemoryInfo_mat(&vk_ctx->device_generic_models_top_and_tex, i);
M->diffuse_view = margaret_create_view_for_image(vk_ctx->device, &M->diffuse, VK_IMAGE_ASPECT_COLOR_BIT);
M->normal_view = margaret_create_view_for_image(vk_ctx->device, &M->normal, VK_IMAGE_ASPECT_COLOR_BIT);
M->specular_view = margaret_create_view_for_image(vk_ctx->device, &M->specular, VK_IMAGE_ASPECT_COLOR_BIT);
}
vk_ctx->IT1_framebuffer = create_IT1_framebuffer(vk_ctx->device,
vk_ctx->IT1_view, vk_ctx->zbuffer_view, vk_ctx->render_pass_0, MAX_WIN_WIDTH, MAX_WIN_HEIGHT);
// My cylinder 1 texture needs VkImageView
vk_ctx->cyl_1_diffuse_texture_view = margaret_create_view_for_image(
vk_ctx->device, &vk_ctx->device_cyl_1_diffuse_texture, VK_IMAGE_ASPECT_COLOR_BIT);
// My cylinder 1 normal texture also needs NkImageView
vk_ctx->cyl_1_normal_texture_view = margaret_create_view_for_image(
vk_ctx->device, &vk_ctx->device_cyl_1_normal_texture, VK_IMAGE_ASPECT_COLOR_BIT);
// Right now I only have one light source
VecPipeline0PointLight_append(&vk_ctx->scene.point_lights, (Pipeline0PointLight){.pos = {0}, .color = {100, 100, 100}});
@ -2003,30 +2051,73 @@ int main() {
vk_ctx->linear_sampler = margaret_create_sampler(vk_ctx->physical_device, vk_ctx->device, true);
vk_ctx->nearest_sampler = margaret_create_sampler(vk_ctx->physical_device, vk_ctx->device, false);
vk_ctx->descriptor_pool = margaret_create_descriptor_set_pool(vk_ctx->device, 2, 3, 3);
vk_ctx->descriptor_set_for_pipeline_0a = margaret_allocate_descriptor_set(
vk_ctx->device, vk_ctx->descriptor_pool, vk_ctx->pipeline_hands_0a.descriptor_set_layout);
vk_ctx->descriptor_pool = margaret_create_descriptor_set_pool(vk_ctx->device,
1 + 1 * vk_ctx->device_generic_models_top_and_tex.len,
1 + 3 * vk_ctx->device_generic_models_top_and_tex.len,
2 + 1 * vk_ctx->device_generic_models_top_and_tex.len);
for (size_t i = 0; i < vk_ctx->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = &vk_ctx->device_generic_models_top_and_tex.buf[i];
M->p_0a_set_0 = margaret_allocate_descriptor_set(
vk_ctx->device, vk_ctx->descriptor_pool, vk_ctx->pipeline_hands_0a.descriptor_set_layout);
}
vk_ctx->descriptor_set_for_pipeline_0b = margaret_allocate_descriptor_set(
vk_ctx->device, vk_ctx->descriptor_pool, vk_ctx->pipeline_hands_0b.descriptor_set_layout);
vk_ctx->descriptor_set_for_pipeline_1 = margaret_allocate_descriptor_set(
vk_ctx->device, vk_ctx->descriptor_pool, vk_ctx->pipeline_hands_1.descriptor_set_layout);
// Configuring my descriptor sets, that I just allocated
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0a = {
.buffer = vk_ctx->device_lighting_ubo.buffer,
.offset = 0,
.range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_1_in_set_0a = {
.sampler = vk_ctx->linear_sampler,
.imageView = vk_ctx->cyl_1_diffuse_texture_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorImageInfo image_info_for_descriptor_2_in_set_0a = {
.sampler = vk_ctx->nearest_sampler,
.imageView = vk_ctx->cyl_1_normal_texture_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
for (size_t i = 0; i < vk_ctx->device_generic_models_top_and_tex.len; i++) {
GenericModelTopAndTexInMemoryInfo* M = &vk_ctx->device_generic_models_top_and_tex.buf[i];
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0a = {
.buffer = vk_ctx->device_lighting_ubo.buffer,
.offset = 0,
.range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_1_in_set_0a = {
.sampler = vk_ctx->linear_sampler,
.imageView = M->diffuse_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorImageInfo image_info_for_descriptor_2_in_set_0a = {
.sampler = vk_ctx->nearest_sampler,
.imageView = M->normal_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
// todo: add a third binding (for specular shading)
VkWriteDescriptorSet writes_in_descriptor_set[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info_for_descriptor_0_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_1_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = M->p_0a_set_0,
.dstBinding = 2,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_2_in_set_0a,
},
};
vkUpdateDescriptorSets(vk_ctx->device, ARRAY_SIZE(writes_in_descriptor_set), writes_in_descriptor_set, 0, NULL);
}
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0b = {
.buffer = vk_ctx->device_lighting_ubo.buffer,
.offset = 0,
@ -2038,34 +2129,6 @@ int main() {
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet writes_in_descriptor_sets[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk_ctx->descriptor_set_for_pipeline_0a,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info_for_descriptor_0_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk_ctx->descriptor_set_for_pipeline_0a,
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_1_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk_ctx->descriptor_set_for_pipeline_0a,
.dstBinding = 2,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info_for_descriptor_2_in_set_0a,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = vk_ctx->descriptor_set_for_pipeline_0b,
@ -2106,8 +2169,8 @@ int main() {
vkDestroySampler(vk_ctx->device, vk_ctx->linear_sampler, NULL);
vkDestroySampler(vk_ctx->device, vk_ctx->nearest_sampler, NULL);
vkDestroyImageView(vk_ctx->device, vk_ctx->cyl_1_normal_texture_view, NULL);
vkDestroyImageView(vk_ctx->device, vk_ctx->cyl_1_diffuse_texture_view, NULL);
// vkDestroyImageView(vk_ctx->device, vk_ctx->cyl_1_normal_texture_view, NULL);
// vkDestroyImageView(vk_ctx->device, vk_ctx->cyl_1_diffuse_texture_view, NULL);
vkDestroyFramebuffer(vk_ctx->device, vk_ctx->IT1_framebuffer, NULL);
vkDestroyImageView(vk_ctx->device, vk_ctx->IT1_view, NULL);
vkDestroyImageView(vk_ctx->device, vk_ctx->zbuffer_view, NULL);
@ -2121,44 +2184,44 @@ int main() {
vkFreeMemory(vk_ctx->device, vk_ctx->device_mem, NULL);
// todo: delete all the crap
vkDestroyImage(vk_ctx->device, vk_ctx->device_cyl_1_diffuse_texture.image, NULL);
vkDestroyImage(vk_ctx->device, vk_ctx->device_cyl_1_normal_texture.image, NULL);
// vkDestroyImage(vk_ctx->device, vk_ctx->device_cyl_1_diffuse_texture.image, NULL);
// vkDestroyImage(vk_ctx->device, vk_ctx->device_cyl_1_normal_texture.image, NULL);
vkDestroyImage(vk_ctx->device, vk_ctx->device_IT1_image.image, NULL);
vkDestroyImage(vk_ctx->device, vk_ctx->device_zbuffer_image.image, NULL);
vkDestroyBuffer(vk_ctx->device, vk_ctx->device_lighting_ubo.buffer, NULL);
vkDestroyBuffer(vk_ctx->device, vk_ctx->device_instance_attrs_for_models.buffer, NULL);
for (size_t i = 0; i < vk_ctx->device_ebo_buffers_for_generic_meshes.len; i++)
vkDestroyBuffer(vk_ctx->device,
VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_ebo_buffers_for_generic_meshes, i)->buffer,
NULL);
VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_ebo_buffers_for_generic_meshes);
for (size_t i = 0; i < vk_ctx->device_vbo_buffers_for_generic_meshes.len; i++)
vkDestroyBuffer(vk_ctx->device,
VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_vbo_buffers_for_generic_meshes, i)->buffer,
NULL);
VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_vbo_buffers_for_generic_meshes);
for (size_t i = 0; i < vk_ctx->device_ebo_buffers_for_shiny_meshes.len; i++)
vkDestroyBuffer(vk_ctx->device,
VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_ebo_buffers_for_shiny_meshes, i)->buffer,
NULL);
VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_ebo_buffers_for_shiny_meshes);
for (size_t i = 0; i < vk_ctx->device_vbo_buffers_for_shiny_meshes.len; i++)
vkDestroyBuffer(vk_ctx->device,
VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_vbo_buffers_for_shiny_meshes, i)->buffer,
NULL);
VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_vbo_buffers_for_shiny_meshes);
// for (size_t i = 0; i < vk_ctx->device_ebo_buffers_for_generic_meshes.len; i++)
// vkDestroyBuffer(vk_ctx->device,
// VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_ebo_buffers_for_generic_meshes, i)->buffer,
// NULL);
// VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_ebo_buffers_for_generic_meshes);
//
// for (size_t i = 0; i < vk_ctx->device_vbo_buffers_for_generic_meshes.len; i++)
// vkDestroyBuffer(vk_ctx->device,
// VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_vbo_buffers_for_generic_meshes, i)->buffer,
// NULL);
// VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_vbo_buffers_for_generic_meshes);
//
// for (size_t i = 0; i < vk_ctx->device_ebo_buffers_for_shiny_meshes.len; i++)
// vkDestroyBuffer(vk_ctx->device,
// VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_ebo_buffers_for_shiny_meshes, i)->buffer,
// NULL);
// VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_ebo_buffers_for_shiny_meshes);
//
// for (size_t i = 0; i < vk_ctx->device_vbo_buffers_for_shiny_meshes.len; i++)
// vkDestroyBuffer(vk_ctx->device,
// VecMargaretBufferInMemoryInfo_at(&vk_ctx->device_vbo_buffers_for_shiny_meshes, i)->buffer,
// NULL);
// VecMargaretBufferInMemoryInfo_drop(vk_ctx->device_vbo_buffers_for_shiny_meshes);
vkDestroyBuffer(vk_ctx->device, vk_ctx->host_mem_buffer.buffer, NULL);
TextureDataR8G8B8A8_drop(vk_ctx->cyl_1_normal_tex);
TextureDataR8G8B8A8_drop(vk_ctx->cyl_1_diffuse_tex);
// TextureDataR8G8B8A8_drop(vk_ctx->cyl_1_normal_tex);
// TextureDataR8G8B8A8_drop(vk_ctx->cyl_1_diffuse_tex);
SceneTemplate_drop(vk_ctx->scene_template);
MargaretSwapchainBundle_drop_with_device(vk_ctx->device, vk_ctx->swfb);

View File

@ -869,9 +869,9 @@ ShinyMeshTopology generate_shiny_rhombicuboctahedron(float r) {
GenericMeshInSceneTemplate GenericMeshInSceneTemplate_for_log(U32 w, U32 r, U32 k, U32 max_instance_count) {
return (GenericMeshInSceneTemplate){.topology = generate_one_fourth_of_a_cylinder((float)w, (float)r, k),
.max_instance_count = max_instance_count,
.diffuse_texture_path = VecU8_format("log_%u_%u_%u_diffuse.png", w, r, k),
.normal_texture_path = VecU8_format("log_%u_%u_%u_NORMAL.png", w, r, k),
.specular_texture_path = VecU8_format("log_%u_%u_%u_specular.png", w, r, k),
.diffuse_texture_path = VecU8_format("textures/log_%u_%u_%u_diffuse.png", w, r, k),
.normal_texture_path = VecU8_format("textures/log_%u_%u_%u_NORMAL.png", w, r, k),
.specular_texture_path = VecU8_format("textures/log_%u_%u_%u_specular.png", w, r, k),
};
}

View File

@ -15,12 +15,12 @@ typedef struct {
VkImage diffuse_texture;
VkImage normal_texture;
VkImage specular_texture;
} ModelOnSceneMem;
} GenericModelOnSceneMem;
/* Contains both data for model instances attributes and buffer (+offset) where it is stored */
/* Also, I made it non-clonable. Thus */
typedef struct {
ModelOnSceneMem model;
GenericModelOnSceneMem model;
VecGenericMeshInstance instances;
} UsedGenericModelOnScene;
@ -31,7 +31,16 @@ void UsedGenericModelOnScene_drop(UsedGenericModelOnScene self) {
#include "../../../../gen/l1/eve/r0/VecUsedGenericModelOnScene.h"
typedef struct {
ModelOnSceneMem model;
VkBuffer vbo;
VkBuffer ebo;
size_t indexes;
VkBuffer instance_attr_buf;
VkDeviceSize instance_attr_buf_offset;
U32 limit_max_instance_count;
} ShinyModelOnSceneMem;
typedef struct {
ShinyModelOnSceneMem model;
VecShinyMeshInstance instances;
} UsedShinyModelOnScene;
@ -137,7 +146,7 @@ void SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
// todo: ot use one buffer per all the data
for (size_t mi = 0; mi < scene_template->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
const ModelOnSceneMem *m_buf = &VecUsedGenericModelOnScene_at(&scene->generic_models, mi)->model;
const GenericModelOnSceneMem *m_buf = &VecUsedGenericModelOnScene_at(&scene->generic_models, mi)->model;
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
memcpy(host_mem_buffer_mem + offset, mt->topology.vertices.buf, vbo_len);
VkBufferCopy ra = {.srcOffset = offset, .dstOffset = 0, .size = vbo_len};
@ -151,7 +160,7 @@ void SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
}
for (size_t mi = 0; mi < scene_template->shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* mt = VecShinyMeshInSceneTemplate_at(&scene_template->shiny_models, mi);
const ModelOnSceneMem *m_buf = &VecUsedShinyModelOnScene_at(&scene->shiny_models, mi)->model;
const ShinyModelOnSceneMem *m_buf = &VecUsedShinyModelOnScene_at(&scene->shiny_models, mi)->model;
size_t vbo_len = mt->topology.vertices.len * sizeof(ShinyMeshVertex);
memcpy(host_mem_buffer_mem + offset, mt->topology.vertices.buf, vbo_len);
VkBufferCopy ra = {.srcOffset = offset, .dstOffset = 0, .size = vbo_len};

View File

Before

Width:  |  Height:  |  Size: 106 KiB

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB