I FINALLY REWROTE r0 TO USE NORMAL STUFF!! YEEEES. AFTER 100 years. But now session is back on my ass, again

This commit is contained in:
Андреев Григорий 2025-12-14 04:42:35 +03:00
parent aadc346f43
commit 8e3a306459
13 changed files with 674 additions and 545 deletions

View File

@ -14,15 +14,19 @@ void generate_margaret_eve_for_vulkan_utils() {
});
/* For l2/margaret/{ vulkan_img_claire.h , vulkan_buffer_claire.h } */
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretFreeSegment"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretIAFreeSegment"), true, false);
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("MargaretFreeSegment"), .t_primitive = true});
.T = cstr("MargaretIAFreeSegment"), .t_primitive = true});
// todo: add to BufRBTree instantiator option to create necessary shit by itself
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBAFreeSegment"), true, false);
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("MargaretBAFreeSegment"), .t_primitive = true});
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretFreeSegment")});
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment")});
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretImgAllocatorOneBlock"), true, false);
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretBufAllocatorOneBlock"), true, false);
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretBufAllocatorOneBlock")}, true);
}

View File

@ -23,7 +23,6 @@ void generate_headers_for_r0_r1_r2_r3() {
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyModelOnSceneMem"), true, false);
/* r0 */
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelTexVulkPointers"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("CommandForImageCopying"), true, true);
}
mkdir_nofail("l1/eve/r2");
{ /* r2 */

View File

@ -91,7 +91,8 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
}
void generate_List_templ_inst_eve_header(SpanU8 layer, SpanU8 bonus_ns, list_instantiation_op op, bool gen_node_declaration) {
void generate_List_templ_inst_eve_header(
SpanU8 layer, SpanU8 bonus_ns, list_instantiation_op op, bool gen_node_declaration) {
generate_SOME_templ_inst_eve_header(layer, bonus_ns,
generate_List_template_instantiation(op, gen_node_declaration), VecU8_fmt("List%s", op.T));
}

View File

@ -53,6 +53,12 @@ NODISCARD VecU8 read_whole_file_or_abort(SpanU8 path) {
return result;
}
NODISCARD VecU8 read_file_by_path(VecU8 path){
VecU8 content = read_whole_file_or_abort(VecU8_to_span(&path));
VecU8_drop(path);
return content;
}
void write_whole_file_or_abort(const char* filename, SpanU8 content) {
FILE* fd = fopen(filename, "wb");
if (!fd) {

View File

@ -11,16 +11,16 @@ void generate_l1_5_template_instantiations_for_margaret(){
/* For l2/margaret/{ vulkan_img_claire.h , vulkan_buffer_claire.h } */
generate_buf_rbtree_Set_templ_inst_eve_header(l, ns, (set_instantiation_op){
.T = cstr("MargaretFreeSegment"), .t_primitive = true,
.T = cstr("MargaretIAFreeSegment"), .t_primitive = true,
/* comparison takes additional U8 parameter */
.alternative_less = cstr("MargaretFreeSegment_less_resp_align"),
.alternative_less = cstr("MargaretIAFreeSegment_less_resp_align"),
.alternative_comp_set_name_embed = cstr("LenRespAlign"),
.guest_data_T = cstr("U8"),
});
generate_buf_rbtree_Set_templ_inst_eve_header(l, ns, (set_instantiation_op){
.T = cstr("MargaretFreeSegment"), .t_primitive = true,
.T = cstr("MargaretBAFreeSegment"), .t_primitive = true,
/* comparison takes additional U8 parameter */
.alternative_less = cstr("MargaretFreeSegment_less_len"),
.alternative_less = cstr("MargaretBAFreeSegment_less_len"),
.alternative_comp_set_name_embed = cstr("Len"),
});
}

View File

@ -7,9 +7,10 @@
#include "../../../gen/l1/VecAndSpan_U32Segment.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../l1_5/core/rb_tree_node.h"
typedef struct {
MargaretMAIterator img;
MargaretImg img;
U64 usage;
U64 pos_in_desc_array;
} LucyImage;
@ -41,8 +42,6 @@ typedef struct{
typedef struct {
MargaretEngineReference ve;
VkCommandBuffer transfer_cmd_buffer;
MargaretMAIterator staging_buffer;
ListLucyImage images;
VkDescriptorSetLayout descriptor_set_layout;
VkDescriptorSet descriptor_set;
@ -50,10 +49,9 @@ typedef struct {
// todo: write
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve, VkCommandBuffer transfer_cmd_buffer, VkDescriptorSetLayout descriptor_set_layout){
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve, VkDescriptorSetLayout descriptor_set_layout){
VkDescriptorSet descriptor_set = margaret_allocate_descriptor_set(ve.device, ve.descriptor_pool, descriptor_set_layout);
MargaretMAIterator staging_buffer = MargaretMemAllocatorRequests_alloc_buf(ve.host_visible_mem_requests, 8192, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
return (LucyGlyphCache){.ve = ve, .transfer_cmd_buffer = transfer_cmd_buffer, .staging_buffer = staging_buffer,
return (LucyGlyphCache){.ve = ve,
.images = ListLucyImage_new(), .descriptor_set_layout = descriptor_set_layout, .descriptor_set = descriptor_set};
}

View File

@ -2,6 +2,15 @@
#define prototype1_src_l2_lucy_rendering_h
#include "glyph_cache.h"
#include "../../../gen/l1/pixel_masses.h"
#include "../../../gen/l1/geom.h"
typedef struct{
vec4 color;
vec2 pos;
vec2 tex_cord;
U32 tex_ind;
} LucyVertex;
typedef struct{
LucyGlyphCache cache;
@ -12,8 +21,8 @@ typedef struct{
#define LUCY_MAX_DESCRIPTOR_COUNT 10
LucyGlyphRenderer LucyGlyphRenderer_new(
MargaretEngineReference engine_reference, VkCommandBuffer transfer_command_buffer,
VkRenderPass render_pass, U32 renderpass_subpass, SpanU8 root_dir){
MargaretEngineReference engine_reference, SpanU8 root_dir,
VkRenderPass render_pass, U32 renderpass_subpass){
VkDescriptorSetLayout descriptor_set_layout;
check(vkCreateDescriptorSetLayout(engine_reference.device, &(VkDescriptorSetLayoutCreateInfo){
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
@ -27,7 +36,7 @@ LucyGlyphRenderer LucyGlyphRenderer_new(
}, NULL, &descriptor_set_layout) == VK_SUCCESS);
LucyGlyphCache cache = LucyGlyphCache_new(engine_reference, transfer_command_buffer, descriptor_set_layout);
LucyGlyphCache cache = LucyGlyphCache_new(engine_reference, descriptor_set_layout);
VkPipelineLayout pipeline_layout;
check(vkCreatePipelineLayout(engine_reference.device, &(VkPipelineLayoutCreateInfo){
@ -42,7 +51,25 @@ LucyGlyphRenderer LucyGlyphRenderer_new(
VkPipeline pipeline = margaret_create_triangle_pipeline_one_attachment(engine_reference.device,
render_pass, renderpass_subpass, (MargaretMostImportantPipelineOptions){
.pipeline_layout = pipeline_layout,});
.pipeline_layout = pipeline_layout,
.vertex_shader_code = read_file_by_path(VecU8_fmt("%s/gen/l_adele/lucy/vert.spv", root_dir)),
.fragment_shader_code = read_file_by_path(VecU8_fmt("%s/gen/l_adele/lucy/frag.spv", root_dir)),
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = (VkVertexInputBindingDescription[]){
{ .binding = 0, .stride = sizeof(LucyVertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX } },
.vertexAttributeDescriptionCount = 4,
.pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]){
{.location = 0, .binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT, .offset = offsetof(LucyVertex, color)},
{.location = 1, .binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof(LucyVertex, pos)},
{.location = 2, .binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof(LucyVertex, tex_cord)},
{.location = 3, .binding = 0,
.format = VK_FORMAT_R32_UINT, .offset = offsetof(LucyVertex, tex_ind)},
},
.depthTestEnable = false, .depthWriteEnable = false, .blendEnable = true,
});
return (LucyGlyphRenderer){.cache = cache, .pipeline_layout = pipeline_layout, .pipeline = pipeline};
}

View File

@ -1,47 +1 @@
#ifndef prototype1_src_l2_margaret_allocator_base_h
#define prototype1_src_l2_margaret_allocator_base_h
#include "../../l1/core/uint_segments.h"
#include "../../l1/core/util.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
typedef struct {
U64 block;
U64 start;
U64 len;
} MargaretFreeSegment;
bool MargaretFreeSegment_less_len(const MargaretFreeSegment* A, const MargaretFreeSegment* B){
if (A->len == B->len) {
if (A->block == B->block) {
return A->start < B->start;
}
return A->block < B->block;
}
return A->len < B->len;
}
// todo: substitute U64Segment_get_length_resp_alignment by my own function
bool MargaretFreeSegment_less_resp_align(const MargaretFreeSegment* A, const MargaretFreeSegment* B, U8 alignment_exp){
U64 A_len = U64Segment_get_length_resp_alignment((U64Segment){A->start, A->len}, alignment_exp);
U64 B_len = U64Segment_get_length_resp_alignment((U64Segment){B->start, B->len}, alignment_exp);
if (A_len == B_len) {
if (A->block == B->block) {
return A->start < B->start;
}
return A->block < B->block;
}
return A_len < B_len;
}
#include "../../../gen/l1/eve/margaret/VecMargaretFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionMargaretFreeSegment.h"
U64 margaret_bump_buffer_size_to_alignment(U64 A, U8 alignment_exp){
if (A & ((1ull << alignment_exp) - 1))
A = A - (A & ((1ull << alignment_exp) - 1)) + (1ull << alignment_exp);
return A;
}
#endif

View File

@ -1,36 +1,60 @@
#ifndef prototype1_src_l2_margaret_vulkan_buffer_claire_h
#define prototype1_src_l2_margaret_vulkan_buffer_claire_h
// Same dependencies as vulkan memory allocator
#include "../../l1/core/uint_segments.h"
#include "../../l1/core/util.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
#include "allocator_base.h"
typedef struct MargaretBufAllocatorOneBlock MargaretBufAllocatorOneBlock;
typedef struct {
U64 block;
MargaretBufAllocatorOneBlock* block;
U64 start;
U64 len;
} MargaretBufAllocation;
} MargaretBAFreeSegment;
bool MargaretBAFreeSegment_less_len(const MargaretBAFreeSegment* A, const MargaretBAFreeSegment* B){
if (A->len == B->len) {
if (A->block == B->block) {
return A->start < B->start;
}
return (uintptr_t)A->block < (uintptr_t)B->block;
}
return A->len < B->len;
}
U64 margaret_bump_buffer_size_to_alignment(U64 A, U8 alignment_exp){
if (A & ((1ull << alignment_exp) - 1))
A = A - (A & ((1ull << alignment_exp) - 1)) + (1ull << alignment_exp);
return A;
}
typedef struct {
MargaretBufAllocatorOneBlock* block;
U64 start;
U64 len;
} MargaretSubbuf;
struct MargaretBufAllocatorOneBlock{
BufRBTree_MapU64ToU64 occupants;
U64 capacity;
U64 occupation_counter;
VkDeviceMemory mem_hand;
VkBuffer buf_hand;
void* mapped_memory;
} MargaretBufAllocatorOneBlock;
};
void MargaretBufAllocatorOneBlock_drop(MargaretBufAllocatorOneBlock self){
BufRBTree_MapU64ToU64_drop(self.occupants);
}
#include "../../../gen/l1/eve/margaret/ListMargaretBufAllocatorOneBlock.h"
#include "../../../gen/l1/eve/margaret/VecMargaretBufAllocatorOneBlock.h"
#include "../../../gen/l1/VecAndSpan_U8.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLen_SetMargaretFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionMargaretBAFreeSegment.h"
#include "../../../gen/l1/eve/margaret/VecMargaretBAFreeSegment.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLen_SetMargaretBAFreeSegment.h"
typedef struct{
VecMargaretBufAllocatorOneBlock blocks;
BufRBTreeByLen_SetMargaretFreeSegment mem_free_space;
typedef struct {
ListMargaretBufAllocatorOneBlock blocks;
BufRBTreeByLen_SetMargaretBAFreeSegment mem_free_space;
VkDevice device;
VkPhysicalDevice physical_device;
VkBufferUsageFlags usage;
@ -40,35 +64,35 @@ typedef struct{
} MargaretBufAllocator;
void MargaretBufAllocator__erase_gap(MargaretBufAllocator* self, U64 block_id, U64 start, U64 len){
void MargaretBufAllocator__erase_gap(
MargaretBufAllocator* self, MargaretBufAllocatorOneBlock* block, U64 start, U64 len){
if (len == 0)
return;
bool eret = BufRBTreeByLen_SetMargaretFreeSegment_erase(&self->mem_free_space,
&(MargaretFreeSegment){.block = block_id, .start = start, .len = len});
bool eret = BufRBTreeByLen_SetMargaretBAFreeSegment_erase(&self->mem_free_space,
&(MargaretBAFreeSegment){.block = block, .start = start, .len = len});
assert(eret);
MargaretBufAllocatorOneBlock* BLOCK = VecMargaretBufAllocatorOneBlock_mat(&self->blocks, block_id);
BLOCK->occupation_counter += len;
assert(BLOCK->occupation_counter <= BLOCK->capacity);
block->occupation_counter += len;
assert(block->occupation_counter <= block->capacity);
}
void MargaretBufAllocator__insert_gap(MargaretBufAllocator* self, U64 block_id, U64 start, U64 len){
void MargaretBufAllocator__insert_gap(
MargaretBufAllocator* self, MargaretBufAllocatorOneBlock* block, U64 start, U64 len){
if (len == 0)
return;
bool iret = BufRBTreeByLen_SetMargaretFreeSegment_insert(&self->mem_free_space,
(MargaretFreeSegment){.block = block_id, .start = start, .len = len});
bool iret = BufRBTreeByLen_SetMargaretBAFreeSegment_insert(&self->mem_free_space,
(MargaretBAFreeSegment){.block = block, .start = start, .len = len});
assert(iret);
MargaretBufAllocatorOneBlock* BLOCK = VecMargaretBufAllocatorOneBlock_mat(&self->blocks, block_id);
assert(len <= BLOCK->occupation_counter);
BLOCK->occupation_counter -= len;
assert(len <= block->occupation_counter);
block->occupation_counter -= len;
}
OptionMargaretFreeSegment MargaretBufAllocator__search_gap(MargaretBufAllocator* self, U64 req_size){
OptionMargaretBAFreeSegment MargaretBufAllocator__search_gap(MargaretBufAllocator* self, U64 req_size){
assert(req_size % (1ull << self->alignment_exp) == 0);
U64 sit = BufRBTreeByLen_SetMargaretFreeSegment_find_min_grtr_or_eq(&self->mem_free_space,
&(MargaretFreeSegment){.len = req_size});
U64 sit = BufRBTreeByLen_SetMargaretBAFreeSegment_find_min_grtr_or_eq(&self->mem_free_space,
&(MargaretBAFreeSegment){.len = req_size});
if (sit == 0)
return None_MargaretFreeSegment();
return Some_MargaretFreeSegment(*BufRBTreeByLen_SetMargaretFreeSegment_at_iter(&self->mem_free_space, sit));
return None_MargaretBAFreeSegment();
return Some_MargaretBAFreeSegment(*BufRBTreeByLen_SetMargaretBAFreeSegment_at_iter(&self->mem_free_space, sit));
}
void MargaretBufAllocator__add_block(MargaretBufAllocator* self, U64 capacity){
@ -92,10 +116,10 @@ void MargaretBufAllocator__add_block(MargaretBufAllocator* self, U64 capacity){
if (self->host_visible) {
check(vkMapMemory(self->device, memory, 0, capacity, 0, &mapped_memory) == VK_SUCCESS);
}
VecMargaretBufAllocatorOneBlock_append(&self->blocks, (MargaretBufAllocatorOneBlock){
ListMargaretBufAllocatorOneBlock_insert(&self->blocks, (MargaretBufAllocatorOneBlock){
.occupants = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = 0,
.occupation_counter = capacity,
.mem_hand = memory, .buf_hand = buffer, .mapped_memory = mapped_memory
});
}
@ -105,35 +129,34 @@ MargaretBufAllocator MargaretBufAllocator_new(
VkBufferUsageFlags usage, U8 memory_type_id, U8 alignment_exp, bool host_visible, U64 initial_block_size
){
MargaretBufAllocator self = {
.blocks = VecMargaretBufAllocatorOneBlock_new(),
.mem_free_space = BufRBTreeByLen_SetMargaretFreeSegment_new_reserved(1),
.blocks = ListMargaretBufAllocatorOneBlock_new(),
.mem_free_space = BufRBTreeByLen_SetMargaretBAFreeSegment_new_reserved(1),
.device = device, .physical_device = physical_device, .usage = usage, .memory_type_id = memory_type_id,
.alignment_exp = alignment_exp, .host_visible = host_visible
};
MargaretBufAllocator__add_block(&self, initial_block_size);
MargaretBufAllocator__insert_gap(&self, 0, 0, initial_block_size);
MargaretBufAllocator__insert_gap(&self, &self.blocks.first->el, 0, initial_block_size);
return self;
}
void MargaretBufAllocator__put_buf_to_a_gap(MargaretBufAllocator* self, MargaretFreeSegment segment, U64 req_size){
void MargaretBufAllocator__put_buf_to_a_gap(MargaretBufAllocator* self, MargaretBAFreeSegment segment, U64 req_size){
assert(req_size <= segment.len);
MargaretBufAllocator__erase_gap(self, segment.block, segment.start, segment.len);
MargaretBufAllocator__insert_gap(self, segment.block,
segment.start + req_size, segment.start + segment.len - req_size);
BufRBTree_MapU64ToU64* images = &VecMargaretBufAllocatorOneBlock_mat(&self->blocks, segment.block)->occupants;
bool iret = BufRBTree_MapU64ToU64_insert(images, segment.start, req_size);
segment.start + req_size, segment.len - req_size);
BufRBTree_MapU64ToU64* occupants = &segment.block->occupants;
bool iret = BufRBTree_MapU64ToU64_insert(occupants, segment.start, req_size);
assert(iret);
}
U64Segment MargaretBufAllocator__get_left_free_space(
const MargaretBufAllocator* self, const MargaretBufAllocation* allocation){
const MargaretBufAllocatorOneBlock* block = VecMargaretBufAllocatorOneBlock_at(&self->blocks, allocation->block);
const MargaretBufAllocator* self, const MargaretSubbuf* allocation){
U64 occ_start = allocation->start;
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&block->occupants, allocation->start);
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&allocation->block->occupants, allocation->start);
if (prev_occ_it != 0) {
U64 prev_occ_start;
U64 prev_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&block->occupants, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
BufRBTree_MapU64ToU64_at_iter(&allocation->block->occupants, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
assert(prev_occ_start + prev_occ_taken_size <= occ_start);
return (U64Segment){
@ -144,33 +167,33 @@ U64Segment MargaretBufAllocator__get_left_free_space(
}
U64Segment MargaretBufAllocator__get_right_free_space(
const MargaretBufAllocator* self, const MargaretBufAllocation* allocation){
const MargaretBufAllocatorOneBlock* block = VecMargaretBufAllocatorOneBlock_at(&self->blocks, allocation->block);
const MargaretBufAllocator* self, const MargaretSubbuf* allocation){
U64 occ_start = allocation->start;
U64 occ_taken_size = allocation->len;
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&block->occupants, allocation->start);
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&allocation->block->occupants, allocation->start);
if (next_occ_it != 0) {
U64 next_occ_start;
U64 next_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&block->occupants, next_occ_it, &next_occ_start, &next_occ_taken_size);
BufRBTree_MapU64ToU64_at_iter(&allocation->block->occupants, next_occ_it, &next_occ_start, &next_occ_taken_size);
assert(occ_start + occ_taken_size <= next_occ_start);
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
}
return (U64Segment){.start = occ_start + occ_taken_size, .len = block->capacity - (occ_start + occ_taken_size)};
return (U64Segment){.start = occ_start + occ_taken_size, .len = allocation->block->capacity - (occ_start + occ_taken_size)};
}
void MargaretBufAllocator_drop(MargaretBufAllocator self){
for (size_t bi = 0; bi < self.blocks.len; bi++) {
vkDestroyBuffer(self.device, self.blocks.buf[bi].buf_hand, NULL);
vkFreeMemory(self.device, self.blocks.buf[bi].mem_hand, NULL);
for (ListNodeMargaretBufAllocatorOneBlock* bi = self.blocks.first; bi; bi = bi->next) {
vkDestroyBuffer(self.device, bi->el.buf_hand, NULL);
vkFreeMemory(self.device, bi->el.mem_hand, NULL);
}
VecMargaretBufAllocatorOneBlock_drop(self.blocks);
BufRBTreeByLen_SetMargaretFreeSegment_drop(self.mem_free_space);
ListMargaretBufAllocatorOneBlock_drop(self.blocks);
BufRBTreeByLen_SetMargaretBAFreeSegment_drop(self.mem_free_space);
}
void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretBufAllocation allocation){
/* Free one subbuffer, not a whole MBA :) */
void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretSubbuf allocation){
U64Segment left_free_space = MargaretBufAllocator__get_left_free_space(self, &allocation);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, &allocation);
@ -181,7 +204,41 @@ void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretBufAllocation
right_free_space.start + right_free_space.len - left_free_space.start);
}
NODISCARD MargaretBufAllocation MargaretBufAllocator_alloc(MargaretBufAllocator* self, U64 req_size){
/* Idk how to hide this monster */
void MargaretBufAllocator_debug(const MargaretBufAllocator* self){
printf(" ======== MargaretBufAllocator state ======== \n");
int n_segments = (int)self->mem_free_space.el.len;
printf("Blocks:\n");
for (ListNodeMargaretBufAllocatorOneBlock* block_it = self->blocks.first; block_it; block_it = block_it->next) {
U64 free_space_acc_segs = 0;
U64 occ_space_acc_occ = 0;
MargaretBufAllocatorOneBlock* block = &block_it->el;
int n_occupants = (int)block->occupants.el.len;
printf("-*- occupied: %lu/%lu, occupants: %d\n", block->occupation_counter, block->capacity, n_occupants);
for (int si = 0; si < n_segments; si++) {
MargaretBAFreeSegment fseg = self->mem_free_space.el.buf[si];
if (fseg.block == block) {
assert(fseg.start + fseg.len <= block->capacity);
free_space_acc_segs += fseg.len;
}
}
for (int oi = 0; oi < n_occupants; oi++) {
KVPU64ToU64 occ = block->occupants.el.buf[oi];
assert(occ.key + occ.value <= block->capacity);
occ_space_acc_occ += occ.value;
for (int sc = 0; sc < n_occupants; sc++) {
KVPU64ToU64 occ2 = block->occupants.el.buf[sc];
if (sc != oi) {
assert(occ.key + occ.value <= occ2.key || occ2.key + occ2.value <= occ.key);
}
}
}
assert(free_space_acc_segs == block->capacity - block->occupation_counter);
assert(occ_space_acc_occ == block->occupation_counter);
}
}
NODISCARD MargaretSubbuf MargaretBufAllocator_alloc(MargaretBufAllocator* self, U64 req_size){
req_size = margaret_bump_buffer_size_to_alignment(req_size, self->alignment_exp);
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
@ -194,26 +251,25 @@ NODISCARD MargaretBufAllocation MargaretBufAllocator_alloc(MargaretBufAllocator*
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
check(req_size <= maintenance3_properties.maxMemoryAllocationSize);
OptionMargaretFreeSegment free_gap = MargaretBufAllocator__search_gap(self, req_size);
OptionMargaretBAFreeSegment free_gap = MargaretBufAllocator__search_gap(self, req_size);
if (free_gap.variant == Option_None) {
assert(self->blocks.len > 0);
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
assert(self->blocks.first != NULL);
U64 pitch = self->blocks.first->el.capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(req_size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
MargaretBufAllocator__add_block(self, new_capacity);
U64 bid = self->blocks.len;
MargaretBufAllocator__insert_gap(self, bid, req_size, new_capacity - req_size);
MargaretBufAllocatorOneBlock* block = VecMargaretBufAllocatorOneBlock_mat(&self->blocks, free_gap.some.block);
block->occupation_counter = req_size;
bool iret = BufRBTree_MapU64ToU64_insert(&block->occupants, 0, req_size);
MargaretBufAllocatorOneBlock* new_block = &self->blocks.first->el;
MargaretBufAllocator__insert_gap(self, new_block, req_size, new_capacity - req_size);
new_block->occupation_counter = req_size;
bool iret = BufRBTree_MapU64ToU64_insert(&new_block->occupants, 0, req_size);
assert(iret);
return (MargaretSubbuf){.block = &self->blocks.first->el, 0, req_size};
}
MargaretBufAllocator__put_buf_to_a_gap(self, free_gap.some, req_size);
return (MargaretBufAllocation){.block = free_gap.some.block, .start = free_gap.some.start, req_size};
return (MargaretSubbuf){.block = free_gap.some.block, .start = free_gap.some.start, req_size};
}
void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretBufAllocation* allocation, U64 smaller_size){
void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 smaller_size){
smaller_size = margaret_bump_buffer_size_to_alignment(smaller_size, self->alignment_exp);
assert(smaller_size > 0);
assert(smaller_size <= allocation->len);
@ -229,10 +285,10 @@ void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretBufAllocati
/* It actually may returns a 'null-MargaretBuf-allocation' : if return value .len field is zero it means
* that expansion in-place was possible and the allocator argument was updated with a new size and nothing was returned.
* But if ret value .len field is non-zero it means a valid MargaretBufAllocation object was returned and the
* But if ret value .len field is non-zero it means a valid MargaretSubbuf object was returned and the
* `allocation` argument was untouched. It remains a valid object, you need to deallocate it yourself
*/
NODISCARD MargaretBufAllocation MargaretBufAllocator_expand(MargaretBufAllocator* self, MargaretBufAllocation* allocation, U64 bigger_size){
NODISCARD MargaretSubbuf MargaretBufAllocator_expand(MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size){
bigger_size = margaret_bump_buffer_size_to_alignment(bigger_size, self->alignment_exp);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
@ -245,7 +301,16 @@ NODISCARD MargaretBufAllocation MargaretBufAllocator_expand(MargaretBufAllocator
right_free_space.len + (allocation->len - bigger_size));
allocation->len = bigger_size;
return (MargaretBufAllocation){0};
return (MargaretSubbuf){0};
}
#endif
char* MargaretSubbuf_get_mapped(const MargaretSubbuf* allocation){
assert(allocation->block->mapped_memory);
assert(allocation->start + allocation->len <= allocation->block->capacity);
return (char*)allocation->block->mapped_memory + allocation->start;
}
VkBuffer MargaretSubbuf_get_buffer(const MargaretSubbuf* allocation){
assert(allocation->start + allocation->len <= allocation->block->capacity);
return allocation->block->buf_hand;
}

View File

@ -178,7 +178,30 @@
// todo: fucking rewrite all of this. Yes, I want all of this shit rewritten
#include "allocator_base.h"
#include "../../l1/core/util.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
typedef struct{
U64 block;
U64 start;
U64 len;
} MargaretIAFreeSegment;
#include "../../l1/core/uint_segments.h"
// todo: substitute U64Segment_get_length_resp_alignment by my own function
bool MargaretIAFreeSegment_less_resp_align(const MargaretIAFreeSegment* A, const MargaretIAFreeSegment* B, U8 alignment_exp){
U64 A_len = U64Segment_get_length_resp_alignment((U64Segment){A->start, A->len}, alignment_exp);
U64 B_len = U64Segment_get_length_resp_alignment((U64Segment){B->start, B->len}, alignment_exp);
if (A_len == B_len) {
if (A->block == B->block) {
return A->start < B->start;
}
return A->block < B->block;
}
return A_len < B_len;
}
/* Does not include all parameters needed for relocation. Because relocation is needed only
* during controlled defragmentation */
@ -197,20 +220,23 @@ typedef struct {
void* mapped_memory;
} MargaretImgAllocatorOneBlock;
void MargaretMemAllocatorOneBlock_drop(MargaretImgAllocatorOneBlock self){
void MargaretImgAllocatorOneBlock_drop(MargaretImgAllocatorOneBlock self){
BufRBTree_MapU64ToU64_drop(self.images);
}
#include "../../../gen/l1/eve/margaret/VecMargaretImgAllocatorOneBlock.h"
#include "../../../gen/l1/VecAndSpan_U8.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLenRespAlign_SetMargaretFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionBufRBTreeByLenRespAlign_SetMargaretFreeSegment.h"
#include "../../../gen/l1/eve/margaret/VecMargaretIAFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionMargaretIAFreeSegment.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment.h"
#define MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP 28
/* Superstructure for managing free segments of memory of some type in ALL BLOCKS */
typedef struct {
OptionBufRBTreeByLenRespAlign_SetMargaretFreeSegment free_space_in_memory[MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP];
OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment free_space_in_memory[MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP];
VecU8 set_present;
} MargaretMemFreeSpaceManager;
@ -218,15 +244,15 @@ MargaretMemFreeSpaceManager MargaretMemFreeSpaceManager_new(){
MargaretMemFreeSpaceManager res = {.set_present = VecU8_new_zeroinit(1)};
res.set_present.buf[0] = 3;
for (U8 algn = 0; algn < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; algn++)
res.free_space_in_memory[algn] = None_BufRBTreeByLenRespAlign_SetMargaretFreeSegment();
res.free_space_in_memory[3] = Some_BufRBTreeByLenRespAlign_SetMargaretFreeSegment(
BufRBTreeByLenRespAlign_SetMargaretFreeSegment_new_reserved(3, 1));
res.free_space_in_memory[algn] = None_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment();
res.free_space_in_memory[3] = Some_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment(
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_new_reserved(3, 1));
return res;
}
void MargaretMemFreeSpaceManager_drop(MargaretMemFreeSpaceManager self){
for (U8 alignment_exp = 0; alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; alignment_exp++)
OptionBufRBTreeByLenRespAlign_SetMargaretFreeSegment_drop(self.free_space_in_memory[alignment_exp]);
OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_drop(self.free_space_in_memory[alignment_exp]);
VecU8_drop(self.set_present);
}
@ -238,9 +264,9 @@ void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man, U64 blo
U8 alignment = man->set_present.buf[aj];
assert(alignment < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
assert(man->free_space_in_memory[alignment].variant == Option_Some);
bool eret = BufRBTreeByLenRespAlign_SetMargaretFreeSegment_erase(&
bool eret = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_erase(&
man->free_space_in_memory[alignment].some,
&(MargaretFreeSegment){.block = block, .start = start, .len = len});
&(MargaretIAFreeSegment){.block = block, .start = start, .len = len});
assert(eret);
}
}
@ -253,32 +279,32 @@ void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man, U64 bl
U8 alignment = man->set_present.buf[aj];
assert(alignment < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
assert(man->free_space_in_memory[alignment].variant == Option_Some);
bool iret = BufRBTreeByLenRespAlign_SetMargaretFreeSegment_insert(&
man->free_space_in_memory[alignment].some, (MargaretFreeSegment){.block = block, .start = start, .len = len});
bool iret = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_insert(&
man->free_space_in_memory[alignment].some, (MargaretIAFreeSegment){.block = block, .start = start, .len = len});
assert(iret);
}
}
OptionMargaretFreeSegment MargaretMemFreeSpaceManager_search(
OptionMargaretIAFreeSegment MargaretMemFreeSpaceManager_search(
MargaretMemFreeSpaceManager* man, U8 alignment_exp, U64 req_size) {
check(alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
if (man->free_space_in_memory[alignment_exp].variant == Option_None) {
assert(man->set_present.len > 0);
assert(man->free_space_in_memory[man->set_present.buf[0]].variant == Option_Some);
BufRBTreeByLenRespAlign_SetMargaretFreeSegment* have = &man->free_space_in_memory[man->set_present.buf[0]].some;
man->free_space_in_memory[alignment_exp] = Some_BufRBTreeByLenRespAlign_SetMargaretFreeSegment(
BufRBTreeByLenRespAlign_SetMargaretFreeSegment_new_reserved(alignment_exp, have->el.len));
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment* have = &man->free_space_in_memory[man->set_present.buf[0]].some;
man->free_space_in_memory[alignment_exp] = Some_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment(
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_new_reserved(alignment_exp, have->el.len));
for (size_t i = 0; i < have->el.len; i++) {
BufRBTreeByLenRespAlign_SetMargaretFreeSegment_insert(
&man->free_space_in_memory[alignment_exp].some, *VecMargaretFreeSegment_at(&have->el, i));
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_insert(
&man->free_space_in_memory[alignment_exp].some, *VecMargaretIAFreeSegment_at(&have->el, i));
}
}
assert(man->free_space_in_memory[alignment_exp].variant == Option_Some);
U64 sit = BufRBTreeByLenRespAlign_SetMargaretFreeSegment_find_min_grtr_or_eq(
&man->free_space_in_memory[alignment_exp].some, &(MargaretFreeSegment){.len = req_size,});
U64 sit = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_find_min_grtr_or_eq(
&man->free_space_in_memory[alignment_exp].some, &(MargaretIAFreeSegment){.len = req_size,});
if (sit == 0)
return None_MargaretFreeSegment();
return Some_MargaretFreeSegment(*BufRBTreeByLenRespAlign_SetMargaretFreeSegment_at_iter(
return None_MargaretIAFreeSegment();
return Some_MargaretIAFreeSegment(*BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_at_iter(
&man->free_space_in_memory[alignment_exp].some, sit));
}
@ -314,7 +340,7 @@ void MargaretImgAllocator__add_block(MargaretImgAllocator* self, U64 capacity){
VecMargaretImgAllocatorOneBlock_append(&self->blocks, (MargaretImgAllocatorOneBlock){
.images = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = 0,
.occupation_counter = capacity,
.mem_hand = memory,
.mapped_memory = NULL /* not supported */});
}
@ -340,7 +366,7 @@ U64 margaret_get_alignment_left_padding(U64 unaligned_start, U8 alignment_exp){
}
U64 MargaretImgAllocator__add_img_given_gap(
MargaretImgAllocator* self, MargaretFreeSegment segment, U64 required_size, U8 alignment_exp
MargaretImgAllocator* self, MargaretIAFreeSegment segment, U64 required_size, U8 alignment_exp
){
U64 gap_start = segment.start;
U64 gap_len = segment.len;
@ -418,9 +444,9 @@ void MargaretImgAllocator_free(MargaretImgAllocator* self, MargaretImgAllocation
right_free_space.start + right_free_space.len - left_free_space.start);
}
NODISCARD MargaretImgAllocation MargaretImgAllocator_alloc(
NODISCARD MargaretImgAllocation MargaretImgAllocator__alloc(
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
VkImageUsageFlags usage_flags, VkImageLayout current_layout
VkImageUsageFlags usage_flags
){
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
@ -444,7 +470,7 @@ NODISCARD MargaretImgAllocation MargaretImgAllocator_alloc(
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = usage_flags,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.initialLayout = current_layout,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
}, NULL, &fresh_img) == VK_SUCCESS);
VkMemoryRequirements mem_requirements;
vkGetImageMemoryRequirements(self->device, fresh_img, &mem_requirements);
@ -452,25 +478,44 @@ NODISCARD MargaretImgAllocation MargaretImgAllocator_alloc(
check(U64_is_2pow(mem_requirements.alignment));
U8 alignment_exp = U64_2pow_log(mem_requirements.alignment);
check(mem_requirements.size <= maintenance3_properties.maxMemoryAllocationSize);
OptionMargaretFreeSegment free_gap =
OptionMargaretIAFreeSegment free_gap =
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
if (free_gap.variant == Option_None) {
assert(self->blocks.len > 0);
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(mem_requirements.size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
MargaretImgAllocator__add_block(self, new_capacity);
U64 bid = self->blocks.len;
U64 bid = self->blocks.len - 1;
MargaretImgAllocator__insert_gap(self, bid, mem_requirements.size, new_capacity - mem_requirements.size);
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, free_gap.some.block);
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, bid);
block->occupation_counter = mem_requirements.size;
bool iret = BufRBTree_MapU64ToU64_insert(&block->images, 0, mem_requirements.size);
assert(iret);
check(vkBindImageMemory(self->device, fresh_img, block->mem_hand, 0) == VK_SUCCESS);
return (MargaretImgAllocation){.block = bid, fresh_img, 0};
}
U64 aligned_pos = MargaretImgAllocator__add_img_given_gap(self, free_gap.some, mem_requirements.size, alignment_exp);
VkDeviceMemory memory = VecMargaretImgAllocatorOneBlock_at(&self->blocks, free_gap.some.block)->mem_hand;
check(vkBindImageMemory(self->device, fresh_img, memory, aligned_pos) == VK_SUCCESS);
return (MargaretImgAllocation){.block = free_gap.some.block, .image = fresh_img, .start = aligned_pos};
}
typedef struct{
MargaretImgAllocation a;
U64 width;
U64 height;
VkFormat format;
VkImageUsageFlags usage_flags;
VkImageLayout current_layout;
} MargaretImg;
NODISCARD MargaretImg MargaretImgAllocator_alloc(
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
VkImageUsageFlags usage_flags
){
return (MargaretImg){.a = MargaretImgAllocator__alloc(self, width, height, format, usage_flags),
.width = width, .height = height, .format = format, .usage_flags = usage_flags,
.current_layout = VK_IMAGE_LAYOUT_UNDEFINED};
}

View File

@ -1127,15 +1127,99 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
// todo: move image copying function here
// for users of memory that should be aware whether we are using two memory types or one
typedef struct {
VkDevice device;
MargaretMemAllocator* host_visible_mem;
MargaretMemAllocatorRequests* host_visible_mem_requests;
MargaretMemAllocator* device_local_mem;
MargaretMemAllocatorRequests* device_local_mem_requests;
bool device_local_is_host_visible;
VkPhysicalDevice physical_device;
VkCommandBuffer transfer_cmd_buffer;
MargaretImgAllocator* dev_local_images;
MargaretBufAllocator* dev_local_buffers;
MargaretBufAllocator* staging_buffers;
VkDescriptorPool descriptor_pool;
} MargaretEngineReference;
void margaret_rec_cmd_copy_buffer(
VkCommandBuffer cmd_buf,
const MargaretSubbuf* src_allocation, U64 src_offset,
const MargaretSubbuf* dst_allocation, U64 dst_offset, U64 length){
vkCmdCopyBuffer(cmd_buf,
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
1, &(VkBufferCopy){
.srcOffset = src_allocation->start + src_offset, .dstOffset = dst_allocation->start + dst_offset,
.size = length});
}
void margaret_rec_cmd_copy_buffer_one_to_one_part(
VkCommandBuffer cmd_buf,
const MargaretSubbuf* src_allocation,
const MargaretSubbuf* dst_allocation, U64 offset, U64 length){
assert(offset + length <= src_allocation->len);
assert(src_allocation->len == dst_allocation->len);
vkCmdCopyBuffer(cmd_buf,
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
1, &(VkBufferCopy){
.srcOffset = src_allocation->start + offset, .dstOffset = dst_allocation->start + offset, .size = length});
}
void margaret_rec_cmd_copy_buffer_one_to_one(
VkCommandBuffer cmd_buf, const MargaretSubbuf* src_allocation, const MargaretSubbuf* dst_allocation){
assert(src_allocation->len == dst_allocation->len);
vkCmdCopyBuffer(cmd_buf,
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
1, &(VkBufferCopy){
.srcOffset = src_allocation->start, .dstOffset = dst_allocation->start, .size = src_allocation->len});
}
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(
VkCommandBuffer cmd_buf, const MargaretSubbuf* src, MargaretImg* dst,
VkImageLayout dst_new_layout,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask){
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0 /* Flags */, 0, NULL, 0, NULL, 1, &(VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst->a.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
vkCmdCopyBufferToImage(cmd_buf, MargaretSubbuf_get_buffer(src), dst->a.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &(VkBufferImageCopy){
.bufferOffset = src->start,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = { .width = dst->width, .height = dst->height, .depth = 1 },
});
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0 /* Flags */, 0, NULL, 0, NULL, 1, &(VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = dst_new_layout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst->a.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
dst->current_layout = dst_new_layout;
}
#endif

View File

@ -536,8 +536,6 @@ void reset_and_record_command_buffer_0(
}, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0a->pipeline);
// We forgot that viewport is not built into our pipeline
// We forgot that scissors are not built into out pipeline
record_cmd_set_viewport_and_scissors(command_buffer, image_extent);
vkCmdPushConstants(command_buffer, pipeline_and_layout_0a->pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT,
0, sizeof(mat4), &proj_cam_t);
@ -546,17 +544,22 @@ void reset_and_record_command_buffer_0(
for (size_t i = 0; i < scene->generic_models.len; i++) {
const GenericModelOnSceneMem *model = VecGenericModelOnSceneMem_at(&scene->generic_models, i);
VkDescriptorSet model_indiv_descriptor_set_0 = VecGenericModelTexVulkPointers_at(generic_models, i)->p_0a_set_0;
// todo: rewrite using compound literal syntax
VkBuffer attached_buffers[2] = { model->vbo->value.me.buf.buffer, model->instance_attr_buf->value.me.buf.buffer};
// We use our whole buffers, no need for offset
VkDeviceSize offsets_in_buffers[2] = {0, 0};
const MargaretSubbuf* dev_local_vbo = &model->vbo;
const MargaretSubbuf* dev_local_inst_attr = &model->instance_attr.device_local;
const MargaretSubbuf* dev_local_ebo = &model->ebo;
// const
vkCmdBindVertexBuffers(command_buffer, 0,
2, attached_buffers, offsets_in_buffers);
vkCmdBindIndexBuffer(command_buffer, model->ebo->value.me.buf.buffer, 0, VK_INDEX_TYPE_UINT32);
2, (VkBuffer[]){
MargaretSubbuf_get_buffer(dev_local_vbo),
MargaretSubbuf_get_buffer(dev_local_inst_attr),
}, (VkDeviceSize[]){ dev_local_vbo->start, dev_local_inst_attr->start });
vkCmdBindIndexBuffer(command_buffer, MargaretSubbuf_get_buffer(dev_local_ebo), dev_local_ebo->start,
VK_INDEX_TYPE_UINT32);
vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0a->pipeline_layout, 0,
1, &model_indiv_descriptor_set_0, 0, NULL);
vkCmdDrawIndexed(command_buffer, model->indexes, model->instance_vec_len, 0, 0, 0);
vkCmdDrawIndexed(command_buffer, model->indexes, model->instance_attr.count, 0, 0, 0);
}
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout_0b->pipeline);
@ -570,12 +573,15 @@ void reset_and_record_command_buffer_0(
1, &descriptor_set_for_pipeline_0b, 0, NULL);
for (size_t i = 0; i < scene->shiny_models.len; i++) {
const ShinyModelOnSceneMem* model = VecShinyModelOnSceneMem_at(&scene->shiny_models, i);
VkBuffer attached_buffers[2] = { model->vbo->value.me.buf.buffer, model->instance_attr_buf->value.me.buf.buffer };
// Same. We use our whole buffer, no need for offset
VkDeviceSize offsets_in_buffers[2] = {0, 0};
vkCmdBindVertexBuffers(command_buffer, 0, 2, attached_buffers, offsets_in_buffers);
vkCmdBindIndexBuffer(command_buffer, model->ebo->value.me.buf.buffer, 0, VK_INDEX_TYPE_UINT32);
vkCmdDrawIndexed(command_buffer, model->indexes, model->instance_vec_len, 0, 0, 0);
const MargaretSubbuf* dev_local_vbo = &model->vbo;
const MargaretSubbuf* dev_local_inst_attr = &model->instance_attr.device_local;
const MargaretSubbuf* dev_local_ebo = &model->ebo;
vkCmdBindVertexBuffers(command_buffer, 0, 2, (VkBuffer[]){
MargaretSubbuf_get_buffer(dev_local_vbo), MargaretSubbuf_get_buffer(dev_local_inst_attr)
}, (VkDeviceSize[]){ dev_local_vbo->start, dev_local_inst_attr->start });
vkCmdBindIndexBuffer(command_buffer, MargaretSubbuf_get_buffer(dev_local_ebo), dev_local_ebo->start,
VK_INDEX_TYPE_UINT32);
vkCmdDrawIndexed(command_buffer, model->indexes, model->instance_attr.count, 0, 0, 0);
}
vkCmdEndRenderPass(command_buffer);
@ -632,59 +638,49 @@ void record_copying_entire_scene_from_staging_to_device_local(VkCommandBuffer co
margaret_reset_and_begin_command_buffer(command_buffer);
for (size_t mi = 0; mi < scene->generic_models.len; mi++) {
const GenericModelOnSceneMem* model = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
assert(model->instance_vec_len <= model->instance_vec_capacity);
assert(model->instance_vec_capacity * sizeof(GenericMeshInstance) == model->instance_attr_buf->value.me.buf.capacity);
if (model->instance_vec_len) {
vkCmdCopyBuffer(command_buffer,
model->staging_instance_attr_buf->value.me.buf.buffer, model->instance_attr_buf->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0,
.size = model->instance_vec_len * sizeof(GenericMeshInstance)});
assert(model->instance_attr.count * sizeof(GenericMeshInstance) <= model->instance_attr.staging_busy.len);
assert(model->instance_attr.count * sizeof(GenericMeshInstance) <= model->instance_attr.device_local.len);
if (model->instance_attr.count) {
margaret_rec_cmd_copy_buffer(command_buffer,
&model->instance_attr.staging_busy, 0, &model->instance_attr.device_local, 0,
model->instance_attr.count * sizeof(GenericMeshInstance));
}
}
for (size_t mi = 0; mi < scene->shiny_models.len; mi++) {
const ShinyModelOnSceneMem* model = VecShinyModelOnSceneMem_at(&scene->shiny_models, mi);
assert(model->instance_vec_len <= model->instance_vec_capacity);
assert(model->instance_vec_capacity * sizeof(ShinyMeshInstance) == model->instance_attr_buf->value.me.buf.capacity);
if (model->instance_vec_len) {
vkCmdCopyBuffer(command_buffer,
model->staging_instance_attr_buf->value.me.buf.buffer, model->instance_attr_buf->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0,
.size = model->instance_vec_len * sizeof(ShinyMeshInstance)});
assert(model->instance_attr.count * sizeof(ShinyMeshInstance) <= model->instance_attr.staging_busy.len);
assert(model->instance_attr.count * sizeof(ShinyMeshInstance) <= model->instance_attr.device_local.len);
if (model->instance_attr.count) {
margaret_rec_cmd_copy_buffer(command_buffer,
&model->instance_attr.staging_busy, 0, &model->instance_attr.device_local, 0,
model->instance_attr.count * sizeof(ShinyMeshInstance));
}
}
Pipeline0UBO* pipeline_0_ubo = (Pipeline0UBO*)MargaretMAIterator_get_mapped(scene->pipeline0_staging_ubo);
assert(pipeline_0_ubo->point_light_count <= pipeline_0_ubo_point_light_max_count);
assert(pipeline_0_ubo->spotlight_count <= pipeline_0_ubo_spotlight_max_count);
{ /* Pipeline0 UBO */
Pipeline0UBO* mem = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&scene->pipeline0_ubo.staging_busy);
assert(mem->point_light_count <= pipeline_0_ubo_point_light_max_count);
assert(mem->spotlight_count <= pipeline_0_ubo_spotlight_max_count);
VkBufferCopy regions_to_copy[4] = {
{
.srcOffset = offsetof(Pipeline0UBO, point_light_count), .dstOffset = offsetof(Pipeline0UBO, point_light_count),
.size = sizeof(int)
},
{
.srcOffset = offsetof(Pipeline0UBO, spotlight_count), .dstOffset = offsetof(Pipeline0UBO, spotlight_count),
.size = sizeof(int)
// todo: I will probably replace pipeline0ubo with ubo for length + two readonly storage buffers for light sources
// all of this is basically useless
const MargaretSubbuf* ubo_staging = &scene->pipeline0_ubo.staging_busy;
const MargaretSubbuf* ubo_device_local = &scene->pipeline0_ubo.device_local;
margaret_rec_cmd_copy_buffer_one_to_one_part(command_buffer, ubo_staging, ubo_device_local,
offsetof(Pipeline0UBO, point_light_count), sizeof(int));
margaret_rec_cmd_copy_buffer_one_to_one_part(command_buffer, ubo_staging, ubo_device_local,
offsetof(Pipeline0UBO, spotlight_count), sizeof(int));
if (mem->point_light_count) {
margaret_rec_cmd_copy_buffer_one_to_one_part(command_buffer, ubo_staging, ubo_device_local,
offsetof(Pipeline0UBO, point_light_arr), sizeof(Pipeline0PointLight) * mem->point_light_count);
}
};
int regions_to_copy_c = 2;
if (pipeline_0_ubo->point_light_count) {
regions_to_copy[regions_to_copy_c] = (VkBufferCopy){
.srcOffset = offsetof(Pipeline0UBO, point_light_arr), .dstOffset = offsetof(Pipeline0UBO, point_light_arr),
.size = sizeof(Pipeline0PointLight) * pipeline_0_ubo->point_light_count,
};
regions_to_copy_c++;
if (mem->spotlight_count) {
margaret_rec_cmd_copy_buffer_one_to_one_part(command_buffer, ubo_staging, ubo_device_local,
offsetof(Pipeline0UBO, spotlight_arr), sizeof(Pipeline0Spotlight) * mem->spotlight_count);
}
margaret_end_command_buffer(command_buffer);
}
if (pipeline_0_ubo->spotlight_count) {
regions_to_copy[regions_to_copy_c] = (VkBufferCopy){
.srcOffset = offsetof(Pipeline0UBO, spotlight_arr), .dstOffset = offsetof(Pipeline0UBO, spotlight_arr),
.size = sizeof(Pipeline0Spotlight) * pipeline_0_ubo->spotlight_count,
};
regions_to_copy_c++;
}
vkCmdCopyBuffer(command_buffer, scene->pipeline0_staging_ubo->value.me.buf.buffer,
scene->pipeline0_ubo->value.me.buf.buffer, regions_to_copy_c, regions_to_copy);
margaret_end_command_buffer(command_buffer);
}
typedef struct {
@ -712,16 +708,19 @@ typedef struct {
// todo: but first, write the damn thing
VkCommandBuffer host_visible_mem_mv_command_buf; /* This is just pure blasphemy: todo: remove it nahyu */
VkDescriptorPool descriptor_pool; // todo: write dynamic allocator wrapper for descriptor pools
MargaretMemAllocator host_visible_coherent_mem;
MargaretMemAllocator device_local_mem;
MargaretImgAllocator dev_local_images;
MargaretBufAllocator dev_local_buffers;
MargaretBufAllocator staging_buffers;
Jane_r0 jane; // todo: figure out my own design
MargaretSwapchainBundle swfb;
SceneTemplate scene_template;
Scene scene;
MargaretMAIterator device_IT1_image;
MargaretMAIterator device_zbuffer_image;
MargaretImg IT1_image;
MargaretImg zbuffer_image;
VecGenericModelTexVulkPointers generic_model_tex_vulk_pointers;
VkImageView zbuffer_view;
@ -731,7 +730,6 @@ typedef struct {
VkDescriptorSet descriptor_set_for_pipeline_1;
// Descriptor sets for pipeline_0a are stored in generic_model_tex_vulk_pointers
bool dt_transfer_required;
} vulkan_ctx_r0;
typedef struct {
@ -812,27 +810,23 @@ void update_state(state_r0* state, uint32_t dur) {
{
GenericModelOnSceneMem* model = VecGenericModelOnSceneMem_mat(&state->vk.scene.generic_models, 0);
assert(model->instance_vec_len >= 1);
GenericMeshInstance* instances = (GenericMeshInstance*)MargaretMAIterator_get_mapped(model->staging_instance_attr_buf);
assert(model->instance_attr.count >= 1);
GenericMeshInstance* instances = (GenericMeshInstance*)MargaretSubbuf_get_mapped(&model->instance_attr.staging_updatable);
if (state->first_0x80_keys[XKB_KEY_j]) {
state->vk.scene.funny_vector.x -= fl;
instances[0].model_t = marie_translation_mat4(state->vk.scene.funny_vector);
state->vk.dt_transfer_required = true;
}
if (state->first_0x80_keys[XKB_KEY_k]) {
state->vk.scene.funny_vector.z -= fl;
instances[0].model_t = marie_translation_mat4(state->vk.scene.funny_vector);
state->vk.dt_transfer_required = true;
}
if (state->first_0x80_keys[XKB_KEY_l]) {
state->vk.scene.funny_vector.z += fl;
instances[0].model_t = marie_translation_mat4(state->vk.scene.funny_vector);
state->vk.dt_transfer_required = true;
}
if (state->first_0x80_keys[XKB_KEY_semicolon]) {
state->vk.scene.funny_vector.x += fl;
instances[0].model_t = marie_translation_mat4(state->vk.scene.funny_vector);
state->vk.dt_transfer_required = true;
}
}
}
@ -840,9 +834,9 @@ void update_state(state_r0* state, uint32_t dur) {
/* It recreates image views, descriptor sets, framebuffers. */
void recreate_vulkan_references_objects(state_r0* state){
vulkan_ctx_r0* vk = &state->vk;
vk->zbuffer_view = margaret_create_view_for_image(vk->device, vk->device_zbuffer_image->value.me.img.image,
vk->zbuffer_view = margaret_create_view_for_image(vk->device, vk->zbuffer_image.a.image,
vk->zbuffer_format, VK_IMAGE_ASPECT_DEPTH_BIT);
vk->IT1_view = margaret_create_view_for_image(vk->device, vk->device_IT1_image->value.me.img.image,
vk->IT1_view = margaret_create_view_for_image(vk->device, vk->IT1_image.a.image,
vk->IT1_format, VK_IMAGE_ASPECT_COLOR_BIT);
vk->IT1_framebuffer = create_IT1_framebuffer(vk->device,
vk->IT1_view, vk->zbuffer_view, vk->render_pass_0,
@ -853,20 +847,21 @@ void recreate_vulkan_references_objects(state_r0* state){
for (size_t i = 0; i < vk->scene.generic_models.len; i++) {
GenericModelOnSceneMem* model = &vk->scene.generic_models.buf[i];
GenericModelTexVulkPointers P = (GenericModelTexVulkPointers){
.diffuse_view = margaret_create_view_for_image(vk->device, model->diffuse_texture->value.me.img.image,
model->diffuse_texture->value.me.img.format, VK_IMAGE_ASPECT_COLOR_BIT),
.normal_view = margaret_create_view_for_image(vk->device, model->normal_texture->value.me.img.image,
model->normal_texture->value.me.img.format, VK_IMAGE_ASPECT_COLOR_BIT),
.specular_view = margaret_create_view_for_image(vk->device, model->specular_texture->value.me.img.image,
model->specular_texture->value.me.img.format, VK_IMAGE_ASPECT_COLOR_BIT),
.diffuse_view = margaret_create_view_for_image(vk->device, model->diffuse_texture.a.image,
VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_ASPECT_COLOR_BIT),
.normal_view = margaret_create_view_for_image(vk->device, model->normal_texture.a.image,
VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_ASPECT_COLOR_BIT),
.specular_view = margaret_create_view_for_image(vk->device, model->specular_texture.a.image,
VK_FORMAT_R8_UNORM, VK_IMAGE_ASPECT_COLOR_BIT),
.p_0a_set_0 = margaret_allocate_descriptor_set( vk->device,
vk->descriptor_pool, vk->pipeline_hands_0a.descriptor_set_layout),
};
VecGenericModelTexVulkPointers_append(&vk->generic_model_tex_vulk_pointers, P);
// Configuring my descriptor sets, that I just allocated
// todo: create a separate function for that shit
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0a = {
.buffer = vk->scene.pipeline0_ubo->value.me.buf.buffer,
.offset = 0, .range = sizeof(Pipeline0UBO),
.buffer = MargaretSubbuf_get_buffer(&vk->scene.pipeline0_ubo.device_local),
.offset = vk->scene.pipeline0_ubo.device_local.start, .range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_1_in_set_0a = {
.sampler = vk->linear_sampler, .imageView = P.diffuse_view,
@ -928,10 +923,10 @@ void recreate_vulkan_references_objects(state_r0* state){
vk->device, vk->descriptor_pool, vk->pipeline_hands_1.descriptor_set_layout);
// todo: update the others + ACTUALLY CARRY OUT DEVIEL LOCAL ALLOCATION MARAGERReeuqs request
// todo: separate set0 and set 1
VkDescriptorBufferInfo buffer_info_for_descriptor_0_in_set_0b = {
.buffer = vk->scene.pipeline0_ubo->value.me.buf.buffer,
.offset = 0, .range = sizeof(Pipeline0UBO),
.buffer = MargaretSubbuf_get_buffer(&vk->scene.pipeline0_ubo.device_local),
.offset = vk->scene.pipeline0_ubo.device_local.start, .range = sizeof(Pipeline0UBO),
};
VkDescriptorImageInfo image_info_for_descriptor_0_in_set_1 = {
.sampler = vk->nearest_sampler, .imageView = vk->IT1_view,
@ -961,12 +956,52 @@ void recreate_vulkan_references_objects(state_r0* state){
vkUpdateDescriptorSets(vk->device, ARRAY_SIZE(writes_in_descriptor_sets), writes_in_descriptor_sets, 0, NULL);
}
/* Needed for defragmentation of memory */
void destroy_vulkan_reference_objects(state_r0* state){
vulkan_ctx_r0* vk = &state->vk;
// todo: vkdestro all the views all the framebuffers
// todo: drop everything,
}
/* another frame. busy staging buffer is not longer busy. And the updatable staging buffer contains updates.
* We copy updates to the one that once was hella busy, but not anymore, and the updatable buffer swaps places with
* busy buffer. This happens in generic model instances, shiny model instances, pipeline0 ubo
*/
void Scene_swap_talking_buffers(Scene* scene){
for (size_t mi = 0; mi < scene->generic_models.len; mi++) {
PatriciaBuf* inst = &scene->generic_models.buf[mi].instance_attr;
assert(inst->count <= inst->cap);
assert(inst->cap * sizeof(GenericMeshInstance) <= inst->staging_updatable.len);
assert(inst->staging_updatable.len == inst->staging_busy.len);
memcpy(MargaretSubbuf_get_mapped(&inst->staging_busy), MargaretSubbuf_get_mapped(&inst->staging_updatable),
inst->count * sizeof(GenericMeshInstance));
PatriciaBuf_swap_staging(inst);
}
for (size_t mi = 0; mi < scene->shiny_models.len; mi++) {
PatriciaBuf* inst = &scene->shiny_models.buf[mi].instance_attr;
assert(inst->count <= inst->cap);
assert(inst->cap * sizeof(ShinyMeshInstance) <= inst->staging_updatable.len);
assert(inst->staging_updatable.len == inst->staging_busy.len);
memcpy(MargaretSubbuf_get_mapped(&inst->staging_busy), MargaretSubbuf_get_mapped(&inst->staging_updatable),
inst->count * sizeof(ShinyMeshInstance));
PatriciaBuf_swap_staging(inst);
}
{
Pipeline0Transfer* ubo = &scene->pipeline0_ubo;
Pipeline0UBO* updated = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&ubo->staging_updatable);
Pipeline0UBO* outdated = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&ubo->staging_busy);
outdated->point_light_count = updated->point_light_count;
outdated->spotlight_count = updated->spotlight_count;
memcpy(outdated->point_light_arr, updated->point_light_arr,
sizeof(Pipeline0PointLight) * updated->point_light_count);
memcpy(outdated->spotlight_arr, updated->spotlight_arr,
sizeof(Pipeline0Spotlight) * updated->spotlight_count);
MargaretSubbuf t = ubo->staging_updatable;
ubo->staging_updatable = ubo->staging_busy;
ubo->staging_busy = t;
}
}
void vulkano_frame_drawing(state_r0* state) {
check(vkWaitForFences(state->vk.device, 1, &state->vk.jane.in_flight_fence, VK_TRUE, UINT64_MAX) == VK_SUCCESS);
check(vkResetFences(state->vk.device, 1, &state->vk.jane.in_flight_fence) == VK_SUCCESS);
@ -985,6 +1020,8 @@ void vulkano_frame_drawing(state_r0* state) {
abortf("vkAcquireNextImageKHR");
}
Scene_swap_talking_buffers(&state->vk.scene);
state->vk.scene.color = (VkClearColorValue){{0, 0.5f, 0.9f, 1}};
mat4 projection_matrix = marie_perspective_projection_fov_mat4(
(float)state->width_confirmed, (float)state->height_confirmed,
@ -993,18 +1030,14 @@ void vulkano_frame_drawing(state_r0* state) {
mat4 camera_translation_matrix = marie_translation_mat4(vec3_minus(state->vk.scene.cam.pos));
mat4 t_mat = mat4_mul_mat4(projection_matrix, mat4_mul_mat4(camera_rotation_matrix, camera_translation_matrix));
if (state->vk.dt_transfer_required){
record_copying_entire_scene_from_staging_to_device_local(state->vk.transfer_command_buf, &state->vk.scene);
check(vkQueueSubmit(state->vk.queues.graphics_queue, 1, &(VkSubmitInfo){
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = (VkCommandBuffer[]){ state->vk.transfer_command_buf },
.signalSemaphoreCount = 1,
.pSignalSemaphores = (VkSemaphore[]){ state->vk.jane.in_frame_transfer_complete },
// todo: add waiting for device_local_movement command buffer
// todo: but first: write a use case for it
}, NULL) == VK_SUCCESS);
}
record_copying_entire_scene_from_staging_to_device_local(state->vk.transfer_command_buf, &state->vk.scene);
check(vkQueueSubmit(state->vk.queues.graphics_queue, 1, &(VkSubmitInfo){
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = (VkCommandBuffer[]){ state->vk.transfer_command_buf },
.signalSemaphoreCount = 1,
.pSignalSemaphores = (VkSemaphore[]){ state->vk.jane.in_frame_transfer_complete },
}, NULL) == VK_SUCCESS);
reset_and_record_command_buffer_0(
state->vk.rendering_command_buf_0, state->vk.render_pass_0,
@ -1021,23 +1054,14 @@ void vulkano_frame_drawing(state_r0* state) {
state->vk.swfb.extent,
state->sane_image_extent_limit, &state->vk.scene, state->vk.descriptor_set_for_pipeline_1);
VkSemaphore waiting_for_semaphores_if_dt_transfer_required[1] = {
state->vk.jane.in_frame_transfer_complete
};
VkPipelineStageFlags waiting_stages_if_dt_transfer_required[1] = {
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
};
check(vkQueueSubmit(state->vk.queues.graphics_queue, 1, &(VkSubmitInfo){
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
// We wait for `waiting_for_semaphores` before THESE stages
// waitSemaphoreCount specifies size for both pWaitSemaphores and pWaitDstStageMask
.waitSemaphoreCount = state->vk.dt_transfer_required ?
ARRAY_SIZE(waiting_for_semaphores_if_dt_transfer_required) : 0,
.pWaitSemaphores = state->vk.dt_transfer_required ?
waiting_for_semaphores_if_dt_transfer_required : NULL,
.pWaitDstStageMask = state->vk.dt_transfer_required ?
waiting_stages_if_dt_transfer_required : NULL,
.waitSemaphoreCount = 1,
.pWaitSemaphores = (VkSemaphore[]){state->vk.jane.in_frame_transfer_complete},
.pWaitDstStageMask = (VkPipelineStageFlags[]){
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
},
.commandBufferCount = 1,
.pCommandBuffers = (VkCommandBuffer[]){ state->vk.rendering_command_buf_0 },
@ -1081,7 +1105,6 @@ void vulkano_frame_drawing(state_r0* state) {
abortf("vkQueuePresentKHR");
}
state->vk.dt_transfer_required = false;
margaret_ns_time frame_B0 = margaret_clock_gettime_monotonic_raw();
state->frame_count_since_key++;
if (margaret_ns_time_sec_diff(state->prev_key_frame_time, frame_B0) > 1.0) {
@ -1092,93 +1115,13 @@ void vulkano_frame_drawing(state_r0* state) {
}
}
typedef struct {
// todo: this iterator better be MargaretBA instead of MargaretMA
MargaretMAIterator staging_buffer;
MargaretMAIterator image;
} CommandForImageCopying;
#include "../../../../gen/l1/eve/r0/VecAndSpan_CommandForImageCopying.h"
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void copying_buffer_to_image_color_aspect_record_cmd_buf (
VkCommandBuffer command_buffer, SpanCommandForImageCopying commands,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask
){
VecVkImageMemoryBarrier barriers = VecVkImageMemoryBarrier_new_reserved(commands.len);
for (size_t i = 0; i < commands.len; i++) {
CommandForImageCopying pair = commands.data[i];
assert(pair.image->value.me.variant == MargaretMemoryOccupation_Image);
assert(pair.staging_buffer->value.me.variant == MargaretMemoryOccupation_Buffer);
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = pair.image->value.me.img.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0 /* Flags */, 0, NULL, 0, NULL, barriers.len, barriers.buf);
VecVkImageMemoryBarrier_sink(&barriers, 0);
for (size_t i = 0; i < commands.len; i++) {
CommandForImageCopying pair = commands.data[i];
VkBufferImageCopy region = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = {
.width = pair.image->value.me.img.width, .height = pair.image->value.me.img.height, .depth = 1
},
};
vkCmdCopyBufferToImage(command_buffer, pair.staging_buffer->value.me.buf.buffer,
pair.image->value.me.img.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
/* filling buffers Vec again */
for (size_t i = 0; i < commands.len; i++) {
CommandForImageCopying pair = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = pair.image->value.me.img.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0 /* Flags */, 0, NULL, 0, NULL, barriers.len, barriers.buf );
VecVkImageMemoryBarrier_drop(barriers);
}
static void main_h_xdg_surface_configure(void *data, struct xdg_surface *xdg_surface, uint32_t serial){
state_r0 *state = data;
printf("XDG surface configured! (%d %d)\n", state->width_heard, state->height_heard);
state->width_confirmed = state->width_heard;
state->height_confirmed = state->height_heard;
xdg_surface_ack_configure(xdg_surface, serial);
// todo: omg, I need to fix this crap for real, like, this is all so wrong
recreate_swapchain(state);
vulkano_frame_drawing(state);
}
@ -1281,16 +1224,15 @@ static void main_h_wl_keyboard_key(
vec3 p = state->vk.scene.cam.pos;
p.y += 1.5f;
ShinyModelOnSceneMem* model = VecShinyModelOnSceneMem_mat(&state->vk.scene.shiny_models, 0);
assert(model->instance_vec_len >= 1);
ShinyMeshInstance* instances = (ShinyMeshInstance*)MargaretMAIterator_get_mapped(model->staging_instance_attr_buf);
assert(model->instance_attr.count >= 1);
ShinyMeshInstance* instances = (ShinyMeshInstance*)MargaretSubbuf_get_mapped(&model->instance_attr.staging_updatable);
instances[0].model_t = marie_translation_mat4(p);
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretMAIterator_get_mapped(state->vk.scene.pipeline0_staging_ubo);
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&state->vk.scene.pipeline0_ubo.staging_updatable);
assert(ubo->point_light_count >= 1);
ubo->point_light_arr[0].pos = p;
printf("Point light source pos set to %f %f %f\n", p.x, p.y, p.z);
state->vk.dt_transfer_required = true;
} else if (keysym == XKB_KEY_2) {
state->vk.scene.hdr_factor /= 1.05f;
printf("hdr factor decreased to %f\n", state->vk.scene.hdr_factor);
@ -1602,12 +1544,20 @@ int main() {
abortf("Can't find device local memory\n");
}
// todo: replace staging MMA with MBA
vk->host_visible_coherent_mem = MargaretMemAllocator_new(vk->device, vk->physical_device,
vk->host_visible_mem_mv_command_buf, host_visible_coherent, mem_type_id_host_visible_coherent);
vk->staging_buffers = MargaretBufAllocator_new(vk->device, vk->physical_device,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
mem_type_id_host_visible_coherent, 3, true, 16);
// todo: inquire about the uniform buffer alignment and storage buffer alignment
vk->dev_local_buffers = MargaretBufAllocator_new(vk->device, vk->physical_device,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
/* | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT */,
mem_type_id_device_local, 3, false, 16);
vk->dev_local_images = MargaretImgAllocator_new(vk->device, vk->physical_device,
mem_type_id_device_local, 16);
vk->device_local_mem = MargaretMemAllocator_new(vk->device, vk->physical_device,
vk->device_local_mem_mv_command_buf, device_local, mem_type_id_device_local);
vk->jane = Jane_r0_create(vk->device);
/* Luckily, swapchain image allocation is not managed by me */
@ -1622,74 +1572,62 @@ int main() {
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
GenericMeshInSceneTemplate_for_log(10, 2, 6));
// VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
// GenericMeshInSceneTemplate_for_log(5, 5, 10));
// VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
// GenericMeshInSceneTemplate_for_log(1, 10, 4));
// VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
// GenericMeshInSceneTemplate_for_log(2, 1, 6));
VecShinyMeshTopology_append(&vk->scene_template.shiny_models, generate_shiny_rhombicuboctahedron(0.5f));
// VecShinyMeshInSceneTemplate_append(&vk->scene_template.shiny_models, (ShinyMeshInSceneTemplate){
// .topology = generate_shiny_cube(0.5f), .max_instance_count = 5
// });
MargaretMemAllocatorRequests initial_req_for_staging = MargaretMemAllocatorRequests_new();
MargaretMemAllocatorRequests initial_req_for_device_local = MargaretMemAllocatorRequests_new();
VecGenericMeshInSceneTemplate_append(&vk->scene_template.generic_models,
GenericMeshInSceneTemplate_for_log(5, 5, 10));
VecShinyMeshTopology_append(&vk->scene_template.shiny_models, generate_shiny_rhombicuboctahedron(0.3f));
VecShinyMeshTopology_append(&vk->scene_template.shiny_models, generate_shiny_cube(0.2f));
VecGenericModelOnSceneMem generic_model_mem = VecGenericModelOnSceneMem_new();
VecShinyModelOnSceneMem shiny_model_mem = VecShinyModelOnSceneMem_new();
for (size_t i = 0; i < vk->scene_template.generic_models.len; i++) {
const GenericMeshInSceneTemplate* template = &vk->scene_template.generic_models.buf[i];
// TextureDataR8G8B8A8 pixe
GenericModelOnSceneMem mem;
mem.indexes = template->topology.indexes.len;
mem.instance_vec_capacity = 100;
mem.instance_vec_len = 0;
mem.instance_attr.cap = 100;
// todo: patricia method should manage this, not me
mem.instance_attr.staging_busy = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.instance_attr.cap * sizeof(GenericMeshInstance));
mem.instance_attr.staging_updatable = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.instance_attr.cap * sizeof(GenericMeshInstance));
mem.instance_attr.device_local = MargaretBufAllocator_alloc(&vk->dev_local_buffers,
mem.instance_attr.cap * sizeof(GenericMeshInstance));
mem.instance_attr.count = 0;
mem.staging_vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
template->topology.vertices.len * sizeof(GenericMeshVertex), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
template->topology.indexes.len * sizeof(U32), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.instance_vec_capacity * sizeof(GenericMeshInstance), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true);
mem.staging_vbo = MargaretBufAllocator_alloc(&vk->staging_buffers,
template->topology.vertices.len * sizeof(GenericMeshVertex));
mem.staging_ebo = MargaretBufAllocator_alloc(&vk->staging_buffers,
template->topology.indexes.len * sizeof(U32));
mem.pixels_diffuse = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&template->diffuse_texture_path)),
mem.pixels_normal = TextureDataR8G8B8A8_read_from_png_nofail(VecU8_to_span(&template->normal_texture_path)),
mem.pixels_specular = TextureDataR8_read_from_png_nofail(VecU8_to_span(&template->specular_texture_path)),
mem.staging_diffuse_tex_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.pixels_diffuse.pixels.len * sizeof(cvec4), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_normal_tex_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.pixels_normal.pixels.len * sizeof(cvec4), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_specular_tex_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.pixels_specular.pixels.len * sizeof(U8), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_diffuse_tex_buf = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.pixels_diffuse.pixels.len * sizeof(cvec4));
mem.staging_normal_tex_buf = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.pixels_normal.pixels.len * sizeof(cvec4));
mem.staging_specular_tex_buf = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.pixels_specular.pixels.len * sizeof(U8));
mem.vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
template->topology.vertices.len * sizeof(GenericMeshVertex),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
template->topology.indexes.len * sizeof(U32),
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
mem.instance_vec_capacity * sizeof(GenericMeshInstance),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.diffuse_texture = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
mem.vbo = MargaretBufAllocator_alloc(&vk->dev_local_buffers,
template->topology.vertices.len * sizeof(GenericMeshVertex));
mem.ebo = MargaretBufAllocator_alloc(&vk->dev_local_buffers,
template->topology.indexes.len * sizeof(U32));
mem.diffuse_texture = MargaretImgAllocator_alloc(&vk->dev_local_images,
mem.pixels_diffuse.width, mem.pixels_diffuse.height,
VK_FORMAT_R8G8B8A8_SRGB,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, true);
mem.normal_texture = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
mem.normal_texture = MargaretImgAllocator_alloc(&vk->dev_local_images,
mem.pixels_normal.width, mem.pixels_normal.height,
VK_FORMAT_R8G8B8A8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, true);
mem.specular_texture = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
mem.specular_texture = MargaretImgAllocator_alloc(&vk->dev_local_images,
mem.pixels_specular.width, mem.pixels_specular.height,
VK_FORMAT_R8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT, true);
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
VecGenericModelOnSceneMem_append(&generic_model_mem, mem);
}
@ -1697,75 +1635,90 @@ int main() {
const ShinyMeshTopology* temp_topology = &vk->scene_template.shiny_models.buf[i];
ShinyModelOnSceneMem mem;
mem.indexes = temp_topology->indexes.len;
mem.instance_vec_capacity = 100;
mem.instance_vec_len = 0;
mem.staging_vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
temp_topology->vertices.len * sizeof(ShinyMeshVertex), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
temp_topology->indexes.len * sizeof(U32), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
mem.staging_instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
mem.instance_vec_capacity * sizeof(ShinyMeshInstance), VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true);
mem.instance_attr.cap = 100;
// todo: patricia method should manage this, not me
mem.instance_attr.staging_busy = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.instance_attr.cap * sizeof(ShinyMeshInstance));
mem.instance_attr.staging_updatable = MargaretBufAllocator_alloc(&vk->staging_buffers,
mem.instance_attr.cap * sizeof(ShinyMeshInstance));
mem.instance_attr.device_local = MargaretBufAllocator_alloc(&vk->dev_local_buffers,
mem.instance_attr.cap * sizeof(ShinyMeshInstance));
mem.instance_attr.count = 0;
mem.vbo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
temp_topology->vertices.len * sizeof(ShinyMeshVertex),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.ebo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
temp_topology->indexes.len * sizeof(U32),
VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.instance_attr_buf = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
mem.instance_vec_capacity * sizeof(ShinyMeshInstance),
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
mem.staging_vbo = MargaretBufAllocator_alloc(&vk->staging_buffers,
temp_topology->vertices.len * sizeof(ShinyMeshVertex));
mem.staging_ebo = MargaretBufAllocator_alloc(&vk->staging_buffers,
temp_topology->indexes.len * sizeof(U32));
mem.vbo = MargaretBufAllocator_alloc(&vk->dev_local_buffers,
temp_topology->vertices.len * sizeof(ShinyMeshVertex));
mem.ebo = MargaretBufAllocator_alloc(&vk->dev_local_buffers,
temp_topology->indexes.len * sizeof(U32));
VecShinyModelOnSceneMem_append(&shiny_model_mem, mem);
}
MargaretMAIterator pipeline0_staging_ubo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_staging,
sizeof(Pipeline0UBO),
VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true);
MargaretMAIterator pipeline0_ubo = MargaretMemAllocatorRequests_alloc_buf(&initial_req_for_device_local,
sizeof(Pipeline0UBO),
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, true);
Pipeline0Transfer pipeline0_ubo = (Pipeline0Transfer){
.staging_busy = MargaretBufAllocator_alloc(&vk->staging_buffers, sizeof(Pipeline0UBO)),
.staging_updatable = MargaretBufAllocator_alloc(&vk->staging_buffers, sizeof(Pipeline0UBO)),
.device_local = MargaretBufAllocator_alloc(&vk->dev_local_buffers, sizeof(Pipeline0UBO)),
};
vk->scene = Scene_new(generic_model_mem, shiny_model_mem, pipeline0_staging_ubo, pipeline0_ubo);
vk->device_IT1_image = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, vk->IT1_format, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
/* We do layout transitions in renderpass (the easy way) + we don't copy this image */
VK_IMAGE_LAYOUT_UNDEFINED, 0, 0, false);
vk->device_zbuffer_image = MargaretMemAllocatorRequests_alloc_image(&initial_req_for_device_local,
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, vk->zbuffer_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
/* We do layout transitions in renderpass (the easy way) + we don't copy this image */
VK_IMAGE_LAYOUT_UNDEFINED, 0, 0, false);
MargaretMemAllocatorDemands tunturun = MargaretMemAllocator_carry_out_request(&vk->host_visible_coherent_mem, &initial_req_for_staging);
check(tunturun == MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT);
tunturun = MargaretMemAllocator_carry_out_request(&vk->device_local_mem, &initial_req_for_device_local);
check(tunturun == MARGARET_MA_DEMANDS_DEFRAGMENTATION_BIT);
vk->scene = Scene_new(generic_model_mem, shiny_model_mem, pipeline0_ubo);
vk->IT1_image = MargaretImgAllocator_alloc(&vk->dev_local_images,
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, vk->IT1_format, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
vk->zbuffer_image = MargaretImgAllocator_alloc(&vk->dev_local_images,
MAX_WIN_WIDTH, MAX_WIN_HEIGHT, vk->zbuffer_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
{
GenericModelOnSceneMem *model_g = VecGenericModelOnSceneMem_mat(&vk->scene.generic_models, 0);
GenericMeshInstance* g_instances = (GenericMeshInstance*)MargaretMAIterator_get_mapped(model_g->staging_instance_attr_buf);
assert(model_g->instance_vec_capacity == 100);
GenericMeshInstance* g_instances = (GenericMeshInstance*)MargaretSubbuf_get_mapped(&model_g->instance_attr.staging_updatable);
assert(model_g->instance_attr.cap >= 100);
for (int X = 0; X < 10; X++) {
for (int Z = 0; Z < 10; Z++) {
g_instances[X * 10 + Z] = (GenericMeshInstance){
.model_t = marie_translation_mat4((vec3){11.f * (float)X, -6, 4.f * (float)Z}) };
}
}
model_g->instance_vec_len = 100;
model_g->instance_attr.count = 100;
GenericModelOnSceneMem *model_g2 = VecGenericModelOnSceneMem_mat(&vk->scene.generic_models, 1);
GenericMeshInstance* g2_instances = (GenericMeshInstance*)MargaretSubbuf_get_mapped(&model_g2->instance_attr.staging_updatable);
assert(model_g2->instance_attr.cap >= 25);
for (int X = 0; X < 5; X++) {
for (int Z = 0; Z < 5; Z++) {
g2_instances[X * 5 + Z] = (GenericMeshInstance){
.model_t = marie_translation_mat4((vec3){6.f * (float)X, -12, 6.f * (float)Z}) };
}
}
model_g2->instance_attr.count = 25;
ShinyModelOnSceneMem* model_sh = VecShinyModelOnSceneMem_mat(&vk->scene.shiny_models, 0);
ShinyMeshInstance* sh_instances = (ShinyMeshInstance*)MargaretMAIterator_get_mapped(model_sh->staging_instance_attr_buf);
assert(model_sh->instance_vec_capacity == 100);
ShinyMeshInstance* sh_instances = (ShinyMeshInstance*)MargaretSubbuf_get_mapped(&model_sh->instance_attr.staging_updatable);
assert(model_sh->instance_attr.cap >= 100);
for (int X = 0; X < 10; X++) {
for (int Z = 0; Z < 10; Z++) {
sh_instances[X * 10 + Z] = (ShinyMeshInstance){
.model_t = marie_translation_mat4((vec3){11.f * (float)X, 10, 4.f * (float)Z}),
.model_t = marie_translation_mat4((vec3){11.f * (float)X - 20, 10, 4.f * (float)Z - 10}),
.color_on = {0, 1, 0}, .color_off = {1, 0.4f, 0.5f} };
}
}
model_sh->instance_vec_len = 100;
model_sh->instance_attr.count = 100;
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretMAIterator_get_mapped(vk->scene.pipeline0_staging_ubo);
ShinyModelOnSceneMem* model_sh2 = VecShinyModelOnSceneMem_mat(&vk->scene.shiny_models, 1);
ShinyMeshInstance* sh2_instances = (ShinyMeshInstance*)MargaretSubbuf_get_mapped(&model_sh2->instance_attr.staging_updatable);
assert(model_sh2->instance_attr.cap >= 25);
for (int X = 0; X < 25; X++) {
for (int Z = 0; Z < 25; Z++) {
sh2_instances[X * 5 + Z] = (ShinyMeshInstance){
.model_t = marie_translation_mat4((vec3){3.f * (float)X - 20, 12, 3.f * (float)Z - 14}),
.color_on = {0.1f, 0.1f, 1}, .color_off = {0.3f, 0.4f, 1.f} };
}
}
model_sh2->instance_attr.count = 25;
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&vk->scene.pipeline0_ubo.staging_updatable);
assert(pipeline_0_ubo_point_light_max_count >= 100);
ubo->point_light_count = 100;
ubo->spotlight_count = 0;
@ -1778,7 +1731,6 @@ int main() {
}
}
ubo->point_light_arr[0].color = (vec3){100, 100, 100};
// todo: synchronize them with my cool light sources)
}
/* Here we both copy from topology + textures to staging buffers and record commands that will copy staging data
@ -1786,27 +1738,24 @@ int main() {
margaret_reset_and_begin_command_buffer(vk->transfer_command_buf);
SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
&vk->scene_template, &vk->scene, vk->transfer_command_buf);
{
VecCommandForImageCopying init = VecCommandForImageCopying_new_reserved(3 * vk->scene.generic_models.len);
for (U64 i = 0; i < vk->scene.generic_models.len; i++) {
GenericModelOnSceneMem* model = &vk->scene.generic_models.buf[i];
memcpy(MargaretMAIterator_get_mapped(model->staging_diffuse_tex_buf), model->pixels_diffuse.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&model->pixels_diffuse));
memcpy(MargaretMAIterator_get_mapped(model->staging_normal_tex_buf), model->pixels_normal.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&model->pixels_normal));
memcpy(MargaretMAIterator_get_mapped(model->staging_specular_tex_buf), model->pixels_specular.pixels.buf,
TextureDataR8_get_size_in_bytes(&model->pixels_specular));
VecCommandForImageCopying_append(&init, (CommandForImageCopying){
.staging_buffer = model->staging_diffuse_tex_buf, .image = model->diffuse_texture });
VecCommandForImageCopying_append(&init, (CommandForImageCopying){
.staging_buffer = model->staging_normal_tex_buf, .image = model->normal_texture });
VecCommandForImageCopying_append(&init, (CommandForImageCopying){
.staging_buffer = model->staging_specular_tex_buf, .image = model->specular_texture });
}
for (U64 i = 0; i < vk->scene.generic_models.len; i++) {
GenericModelOnSceneMem* model = &vk->scene.generic_models.buf[i];
memcpy(MargaretSubbuf_get_mapped(&model->staging_diffuse_tex_buf), model->pixels_diffuse.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&model->pixels_diffuse));
memcpy(MargaretSubbuf_get_mapped(&model->staging_normal_tex_buf), model->pixels_normal.pixels.buf,
TextureDataR8G8B8A8_get_size_in_bytes(&model->pixels_normal));
memcpy(MargaretSubbuf_get_mapped(&model->staging_specular_tex_buf), model->pixels_specular.pixels.buf,
TextureDataR8_get_size_in_bytes(&model->pixels_specular));
copying_buffer_to_image_color_aspect_record_cmd_buf(vk->transfer_command_buf,
VecCommandForImageCopying_to_span(&init), VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
VecCommandForImageCopying_drop(init);
margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(vk->transfer_command_buf,
&model->staging_diffuse_tex_buf, &model->diffuse_texture, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(vk->transfer_command_buf,
&model->staging_normal_tex_buf, &model->normal_texture, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(vk->transfer_command_buf,
&model->staging_specular_tex_buf, &model->specular_texture, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
}
margaret_end_command_buffer(vk->transfer_command_buf);
check(vkQueueSubmit(vk->queues.graphics_queue, 1, &(VkSubmitInfo){
@ -1819,7 +1768,6 @@ int main() {
state.prev_key_frame_time = margaret_clock_gettime_monotonic_raw();
state.frame_count_since_key = 0;
/* Will happen mid-frame */
vk->dt_transfer_required = true;
printf("ENTERING WAYLAND MAINLOOP\n");

View File

@ -5,46 +5,56 @@
#include "../../margaret/vulkan_utils.h"
typedef struct {
U64 count;
MargaretSubbuf staging_busy;
MargaretSubbuf staging_updatable;
MargaretSubbuf device_local;
U64 cap; /* All 3 buffers are synced to the same capacity */
} PatriciaBuf;
void PatriciaBuf_swap_staging(PatriciaBuf* self){
MargaretSubbuf t = self->staging_updatable;
self->staging_updatable = self->staging_busy;
self->staging_busy = t;
}
typedef struct {
size_t indexes;
U64 instance_vec_len;
U64 instance_vec_capacity;
MargaretSubbuf staging_vbo;
MargaretSubbuf staging_ebo;
// todo: replace TextureDataXXX with MargaretPngPromises
MargaretMAIterator staging_vbo;
MargaretMAIterator staging_ebo;
MargaretMAIterator staging_instance_attr_buf;
TextureDataR8G8B8A8 pixels_diffuse;
TextureDataR8G8B8A8 pixels_normal;
TextureDataR8 pixels_specular;
MargaretMAIterator staging_diffuse_tex_buf;
MargaretMAIterator staging_normal_tex_buf;
MargaretMAIterator staging_specular_tex_buf;
MargaretSubbuf staging_diffuse_tex_buf;
MargaretSubbuf staging_normal_tex_buf;
MargaretSubbuf staging_specular_tex_buf;
MargaretMAIterator vbo;
MargaretMAIterator ebo;
MargaretMAIterator instance_attr_buf;
MargaretMAIterator diffuse_texture;
MargaretMAIterator normal_texture;
MargaretMAIterator specular_texture;
MargaretSubbuf vbo;
MargaretSubbuf ebo;
PatriciaBuf instance_attr;
// todo: store dimensions of these images
MargaretImg diffuse_texture;
MargaretImg normal_texture;
MargaretImg specular_texture;
} GenericModelOnSceneMem;
#include "../../../../gen/l1/eve/r0/VecGenericModelOnSceneMem.h"
typedef struct {
size_t indexes;
U64 instance_vec_capacity;
U64 instance_vec_len;
MargaretMAIterator staging_vbo;
MargaretMAIterator staging_ebo;
MargaretMAIterator staging_instance_attr_buf;
MargaretSubbuf staging_vbo;
MargaretSubbuf staging_ebo;
MargaretMAIterator vbo;
MargaretMAIterator ebo;
MargaretMAIterator instance_attr_buf;
MargaretSubbuf vbo;
MargaretSubbuf ebo;
PatriciaBuf instance_attr;
} ShinyModelOnSceneMem;
#include "../../../../gen/l1/eve/r0/VecShinyModelOnSceneMem.h"
@ -99,30 +109,36 @@ void CamControlInfo_update_direction(CamControlInfo* self, int win_width, int wi
self->cam_basis = marie_simple_camera_rot_m_basis_in_cols(yaw, pitch, 0);
}
typedef struct {
MargaretSubbuf staging_busy;
MargaretSubbuf staging_updatable;
MargaretSubbuf device_local;
} Pipeline0Transfer;
/* Non copyable */
typedef struct {
VecGenericModelOnSceneMem generic_models;
VecShinyModelOnSceneMem shiny_models;
VkClearColorValue color;
float gamma_correction_factor;
float hdr_factor;
float lsd_factor;
float anim_time; // A timer, passed to functions that push push constants
MargaretMAIterator pipeline0_staging_ubo;
MargaretMAIterator pipeline0_ubo;
/* point_light_vec_len and spotlight_vec_len are stored in staging (and also device local) buffers */
Pipeline0Transfer pipeline0_ubo;
CamControlInfo cam;
vec3 funny_vector;
} Scene;
Scene Scene_new(VecGenericModelOnSceneMem generic_models, VecShinyModelOnSceneMem shiny_models,
MargaretMAIterator pipeline0_staging_ubo, MargaretMAIterator pipeline0_ubo) {
Pipeline0Transfer pipeline0_ubo) {
return (Scene){.generic_models = generic_models, .shiny_models = shiny_models,
.color = {.float32 = {0, 0, 0, 1}},
.gamma_correction_factor = 2.2f, .hdr_factor = 1, .lsd_factor = 0, .anim_time = 0,
.pipeline0_staging_ubo = pipeline0_staging_ubo, .pipeline0_ubo = pipeline0_ubo,
.cam = CamControlInfo_new(), .funny_vector = {0, 0, 0}
.pipeline0_ubo = pipeline0_ubo, .cam = CamControlInfo_new(), .funny_vector = {0, 0, 0}
};
}
@ -143,17 +159,17 @@ void SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
assert(mm->vbo.len >= vbo_len);
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretSubbuf_get_mapped(&mm->staging_vbo);
memcpy(staging_vbo, mt->topology.vertices.buf, vbo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_vbo, &mm->vbo);
assert(mt->topology.indexes.len == mm->indexes);
size_t ebo_len = mt->topology.indexes.len * sizeof(U32);
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
assert(mm->ebo.len >= ebo_len);
U32* staging_ebo = (U32*)MargaretSubbuf_get_mapped(&mm->staging_ebo);
memcpy(staging_ebo, mt->topology.indexes.buf, ebo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_ebo, &mm->ebo);
}
for (size_t mi = 0; mi < scene_template->shiny_models.len; mi++) {
@ -161,35 +177,17 @@ void SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
const ShinyModelOnSceneMem *mm = VecShinyModelOnSceneMem_at(&scene->shiny_models, mi);
size_t vbo_len = mt->vertices.len * sizeof(ShinyMeshVertex);
ShinyMeshVertex* staging_vbo = (ShinyMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
assert(mm->vbo.len >= vbo_len);
ShinyMeshVertex* staging_vbo = (ShinyMeshVertex*)MargaretSubbuf_get_mapped(&mm->staging_vbo);
memcpy(staging_vbo, mt->vertices.buf, vbo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_vbo, &mm->vbo);
assert(mt->indexes.len == mm->indexes);
size_t ebo_len = mt->indexes.len * sizeof(U32);
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
assert(mm->ebo.len >= ebo_len);
U32* staging_ebo = (U32*)MargaretSubbuf_get_mapped(&mm->staging_ebo);
memcpy(staging_ebo, mt->indexes.buf, ebo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
}
for (size_t mi = 0; mi < scene_template->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
memcpy(staging_vbo, mt->topology.vertices.buf, vbo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
assert(mt->topology.indexes.len == mm->indexes);
size_t ebo_len = mt->topology.indexes.len * sizeof(U32);
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
memcpy(staging_ebo, mt->topology.indexes.buf, ebo_len);
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_ebo, &mm->ebo);
}
}