Compare commits

...

16 Commits

Author SHA1 Message Date
0187ed9442 I think I know what I am doing. Wait... No, I am not 2026-02-16 00:14:52 +03:00
d364b88db4 ... 2026-02-16 00:13:40 +03:00
360b8410a6 Fixed bug in physics simulator 2026-02-16 00:13:11 +03:00
3f52c14011 Idk what i am doing 2026-02-15 18:27:55 +03:00
a8ad12a9e7 Compute queue added 2026-02-15 11:21:00 +03:00
c17fe88e5d Organized vulkan queue creation. Added compute queue 2026-02-15 11:19:48 +03:00
15089cf7b5 I did the thing, I moved all textures into one big descriptor array. Now each frame looks up a texture in an array. But this has absolutely destroyed performance 2026-02-14 22:35:47 +03:00
2367ce1e9d MargaretImgAllocation now stores a direct pointer to MargaretImgAllocatorOneBlock. And both margaretImgAllocator and margaretBufAllocator blocks store pointers to allocators that created them. Memory allocators (buffer, images) are now stored on heap. Now I can do a simplification of allocations management: I don't need to pass allocator to allocation method. Though I hadn't refactored that yet... 2026-02-14 03:00:03 +03:00
248b81f2ec Omg, I just deleted all the crap I wrote 12.02. I made static resource transfer SO MUCH EASIER. But in exchange for simplicity, the rules for when you can acreate and ddelete meshes and font faces are ultra-convoluted. Because I removed all the deletions&copying queues. Everything is super-low-level now 2026-02-14 02:02:04 +03:00
b05c64a131 Clion keeps newlinging my frigging IFS 2026-02-13 18:58:35 +03:00
bca52758cc Split 0a pipeline set into light config set and texture set. Replaced light array ubo with light array storage buffer. But while writing this I realized that my current approach to staging buffer handling is trash, so I will need to rewrite everything I wrote yesterday :( 2026-02-13 18:51:23 +03:00
8cc0b43a5d Wow! I removed std140 padding of matrices in geom.h and nothing broke 2026-02-12 21:05:22 +03:00
5b8ecd8020 Little refactoring of alice: Added Abigail urility (transfer_in_mainloop.h). Buffers/textures that get copied to device local memory and then never modified occured many times in alice and lucy and was generally worth moving into a separate class. 2026-02-12 20:58:23 +03:00
71f73964cf Stashing progress... Don't take this commit seriously 2026-02-12 17:54:11 +03:00
4f06ecb7ac Fixed a dum l_adele Makefile bug 2026-02-11 19:34:27 +03:00
8c928841ad Fixed dumb color blending bug in margaret. Discovered it when set non-black wallpaper 2026-02-11 19:12:49 +03:00
19 changed files with 1023 additions and 947 deletions

53
.clang-format Normal file
View File

@ -0,0 +1,53 @@
---
Language: Cpp
BasedOnStyle: LLVM
AccessModifierOffset: -4
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignOperands: false
AlignTrailingComments: false
AlwaysBreakTemplateDeclarations: Yes
BraceWrapping:
AfterCaseLabel: false
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: true
BeforeElse: true
BeforeLambdaBody: false
BeforeWhile: true
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBraces: Custom
BreakConstructorInitializers: AfterColon
BreakConstructorInitializersBeforeComma: false
ColumnLimit: 120
ConstructorInitializerAllOnOneLineOrOnePerLine: false
IncludeCategories:
- Regex: '^<.*'
Priority: 1
- Regex: '^".*'
Priority: 2
- Regex: '.*'
Priority: 3
IncludeIsMainRegex: '([-_](test|unittest))?$'
IndentWidth: 4
InsertNewlineAtEOF: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 2
NamespaceIndentation: All
PointerAlignment: Left
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
TabWidth: 4
...

View File

@ -9,7 +9,7 @@ HEADERS_src_l1_5 = $(HEADERS_gen_l1) $(call find_headers,l1_5)
#HEADERS_gen_l1_5 := $(HEADERS_src_l1_5) gen/l1_5/dorothy.txt
HEADERS_gen_l1_5 := gen/l1_5/dorothy.txt
ASSETS_src_l_adele = $($call find_assets,l_adele)
ASSETS_src_l_adele = $(call find_assets,l_adele)
ASSETS_gen_l_adele = gen/l_adele/dorothy.txt
HEADERS_src_l2 := $(HEADERS_gen_l1_5) $(call find_headers,l2)

View File

@ -10,8 +10,12 @@ void generate_code_for_alice_on_l1(){
generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshVertexInc"), true, true);
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshVertexInc"), true, true);
/* Helpful utils for sticking your butt into rendering mainloop (transferring stuff to device local
* memory at the right time) */
generate_eve_span_company_for_primitive(l, ns, cstr("RefAliceBufferUplOnce"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("RefAliceTextureUplOnce"), true, false);
/* Engine stuff */
// todo: yes, maybe right now it is not primitive but I surely will make it primitive someday. Right now I don't care
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){
.T = cstr("AliceGenericMeshHand"), .t_primitive = true}, true);
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){
@ -23,4 +27,6 @@ void generate_code_for_alice_on_l1(){
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("GenericMeshTopology")
});
generate_eve_span_company_for_primitive(l, ns, cstr("AliceTextureSlot"), true, false);
}

View File

@ -152,21 +152,13 @@ NODISCARD VecU8 codegen_name_xmatnm(SpanU8 xmat, int cols, int rows) {
return VecU8_fmt("%s%cx%c", xmat, '0' + cols, '0' + rows);
}
void codegen_append_xmatnm_struct_and_methods(VecU8* res,
SpanU8 xmat, SpanU8 xvec, SpanU8 memb, int cols, int rows, int sizeof_member
) {
void codegen_append_xmatnm_struct_and_methods(VecU8* res, SpanU8 xmat, SpanU8 xvec, SpanU8 memb, int cols, int rows) {
VecU8 xmatnm = codegen_name_xmatnm(xmat, cols, rows);
VecU8 xvecm = codegen_name_xvecn(xvec, rows);
/* Structure xmatnm. todo: NO, std140 is NOT OUR EVERYTHING. TODO: get rid of padding
* With columns padded to 16 bytes (for std140, std140 is our everything) */
int sv = (rows * sizeof_member) % 16;
VecU8_append_cstr(res, "typedef struct {\n");
for (int x = 0; x < cols; x++) {
VecU8_append_fmt(res, SPACE "%r %s;\n", xvecm, vec_field_name(x));
if (sv) {
VecU8_append_fmt(res, SPACE "char _padding_%u[%u];\n", (U64)x, (U64)(16 - sv));
}
}
VecU8_append_fmt(res, "} %r;\n\n", xmatnm);
/* xmatnm_new method */
@ -503,12 +495,10 @@ void codegen_append_xvec234_structs_and_cool_methods(VecU8* res, SpanU8 xvec, Sp
codegen_append_xvec3_method_cross(res, xvec);
}
void codegen_append_xmat234x234_structs_and_base_methods(VecU8* res,
SpanU8 xmat, SpanU8 xvec, SpanU8 memb, int sizeof_member
){
void codegen_append_xmat234x234_structs_and_base_methods(VecU8* res,SpanU8 xmat, SpanU8 xvec, SpanU8 memb){
for (int cols = 2; cols <= 4; cols++) {
for (int rows = 2; rows <= 4; rows++) {
codegen_append_xmatnm_struct_and_methods(res, xmat, xvec, memb, cols, rows, sizeof_member);
codegen_append_xmatnm_struct_and_methods(res, xmat, xvec, memb, cols, rows);
}
}
for (int cols = 2; cols <= 4; cols++) {
@ -529,10 +519,8 @@ void codegen_append_xmat234x234_structs_and_base_methods(VecU8* res,
codegen_append_xmat234_det_method(res, xmat, xvec, memb);
}
void codegen_append_xmat234x234_structs_and_cool_methods(VecU8* res,
SpanU8 xmat, SpanU8 xvec, SpanU8 memb, int sizeof_member
){
codegen_append_xmat234x234_structs_and_base_methods(res, xmat, xvec, memb, sizeof_member);
void codegen_append_xmat234x234_structs_and_cool_methods(VecU8* res, SpanU8 xmat, SpanU8 xvec, SpanU8 memb){
codegen_append_xmat234x234_structs_and_base_methods(res, xmat, xvec, memb);
codegen_append_xmat_inverse_methods(res, xmat, xvec, memb);
}
@ -552,9 +540,8 @@ void generate_geom_header() {
codegen_append_xvec234_structs_and_cool_methods(&res.result, cstr("vec"), cstr("float"), cstr("sqrtf"));
codegen_append_xvec234_structs_and_cool_methods(&res.result, cstr("dvec"), cstr("double"), cstr("sqrt"));
// todo: remove padding from matrix structure. VERY IMPORTANT!!! Add padding on the fly when transferring to vulkan
codegen_append_xmat234x234_structs_and_cool_methods(&res.result, cstr("mat"), cstr("vec"), cstr("float"), sizeof(float));
codegen_append_xmat234x234_structs_and_base_methods(&res.result, cstr("s64mat"), cstr("s64vec"), cstr("S64"), sizeof(S64));
codegen_append_xmat234x234_structs_and_cool_methods(&res.result, cstr("mat"), cstr("vec"), cstr("float"));
codegen_append_xmat234x234_structs_and_base_methods(&res.result, cstr("s64mat"), cstr("s64vec"), cstr("S64"));
finish_header(res);
}

View File

@ -24,6 +24,10 @@ void generate_margaret_eve_for_vulkan_utils() {
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment")});
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretImgAllocatorOneBlock"), true, false);
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretBufAllocatorOneBlock")}, true);
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretImgAllocatorOneBlock")}, true);
/* Used in utilities such as Abigail */
generate_guarded_span_company_for_primitive(l, ns, cstr("MargaretSubbuf"),
cstr("#include \"../../../src/l2/margaret/vulkan_utils.h\"\n"), true, false);
}

View File

@ -30,7 +30,7 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
"} List%s;\n\n", /* op.T */
op.T, op.T);
VecU8_append_fmt(&res,
"#define List%s_new() {0}\n\n" /* op.T */
"#define List%s_new() ((List%s){0})\n\n" /* op.T, op.T */
"void List%s_drop(List%s self) {\n" /* op.T, op.T */
SPACE "ListNode%s* cur = self.first;\n" /* op.T */
SPACE "while (cur){\n"
@ -40,7 +40,7 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
SPACE SPACE "cur = next;\n"
SPACE "}\n"
"}\n\n",
op.T, op.T, op.T, op.T, op.T,
op.T, op.T, op.T, op.T, op.T, op.T,
op.t_primitive ? vcstr("") : VecU8_fmt(SPACE SPACE "%s_drop(cur->el);\n", op.T));
VecU8_append_fmt(&res,
"ListNode%s* List%s_insert(List%s* self, %s el) {\n" /* op.T, op.T, op.T, op.T */

View File

@ -34,7 +34,6 @@ GenericMeshTopology GenericMeshTopology_clone(const GenericMeshTopology* self) {
/* non-primitive */
typedef struct {
VecU8 diffuse_texture_path;
VecU8 normal_texture_path;
VecU8 specular_texture_path;
} AliceGenericMeshTexturePaths;
@ -124,10 +123,8 @@ typedef struct {
#define pipeline_0_ubo_point_light_max_count 120
#define pipeline_0_ubo_spotlight_max_count 20
/* This structure will be coupled with point_light and spotlight arrays */
typedef struct {
int point_light_count;
int spotlight_count;
char _padding_1[8];
Pipeline0PointLight point_light_arr[pipeline_0_ubo_point_light_max_count];
Pipeline0Spotlight spotlight_arr[pipeline_0_ubo_spotlight_max_count];
S32 point_light_count;
S32 spotlight_count;
} Pipeline0UBO;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
#pragma once
/* I sometimes call this sub-namespace of Alice namespace Abigail. Though I hadn't renamed it to Abigail yet.
* Abigail does not depend on Alice Engine. It only depends on margaret. So systems like Lucy can use it
* without creating any sus circular dependencies.
*/
#include "../margaret/vulkan_utils.h"
#include "../../../gen/l1/margaret/VecMargaretSubbuf.h"
typedef struct {
U64 count;
MargaretSubbuf staging;
MargaretSubbuf device_local;
} PatriciaBuf;
typedef struct {
VecMargaretSubbuf to_del;
} Abigail;
/* You know the deal, the buffer, returned in ret_dev_local_buf, cannot be deleted in the same `_another_frame`
* callback as this function was called. And it should be obvious that this function can only be called when the
* frame is not in flight. And ret_mapped_staging region is guaranteed to be len bytes long, also, it cannot be
* used when current init/another_frame phase is over
*/
MargaretSubbuf /* ret_dev_local_buf */ Abigail_register_new_buffer(Abigail* self, U64 len,
VkCommandBuffer transfer_cmd_buffer, MargaretBufAllocator* staging_buffers, MargaretBufAllocator* dev_local_buffers,
void** ret_mapped_staging
) {
MargaretSubbuf staging = MargaretBufAllocator_alloc(staging_buffers, len);
MargaretSubbuf dev_local = MargaretBufAllocator_alloc(dev_local_buffers, len);
margaret_rec_cmd_copy_buffer_one_to_one(transfer_cmd_buffer, &staging, &dev_local);
*ret_mapped_staging = MargaretSubbuf_get_mapped(&staging);
VecMargaretSubbuf_append(&self->to_del, staging);
return dev_local;
}
/* Same deal as Abigail_register_new_buffer, but for textures. I say textures, because returned object also includes
* VkImageView, created for this image.
* Returned device localimage cannot be deleted in the same 'no-frame-in-flight' stage. We have to wait for it
* to undergo copying, only then, on another frame, can you delete it. ret_mapped_staging points to staging buffer,
* it is a memory region of width * height * pixed_sz bytes long. Here pixel_sz is a size of pixel in image of format
* `foramt`. This memory can only be edited in the same `on-another-frame` phrase where it was given. But not later
*/
MargaretTexture /* ret_dev_local_texture */ Abigail_register_new_texture(Abigail* self, U64 width, U64 height, U64 pixel_sz,
VkFormat format, VkImageUsageFlags usage,
VkCommandBuffer transfer_cmd_buffer, MargaretBufAllocator* staging_buffers, MargaretImgAllocator* dev_local_images,
void** ret_mapped_staging
) {
MargaretSubbuf staging = MargaretBufAllocator_alloc(staging_buffers, width * height * pixel_sz);
MargaretImg dev_local = MargaretImgAllocator_alloc(dev_local_images, width, height, format, usage);
margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(transfer_cmd_buffer,
&staging, &dev_local, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
*ret_mapped_staging = MargaretSubbuf_get_mapped(&staging);
VecMargaretSubbuf_append(&self->to_del, staging);
return (MargaretTexture){.img = dev_local, .view = margaret_create_view_for_image(
dev_local_images->device, dev_local.a.image, format, VK_IMAGE_ASPECT_COLOR_BIT)};
}
void Abigail_wipe_old_staging(Abigail* self, VkDevice device, MargaretBufAllocator* staging_buffers) {
for (U64 i = 0; i < self->to_del.len; i++) {
MargaretSubbuf staging = self->to_del.buf[i];
MargaretBufAllocator_free(staging_buffers, staging);
}
self->to_del.len = 0;
}
void Abigail_drop(Abigail self) {
assert(self.to_del.len == 0);
VecMargaretSubbuf_drop(self.to_del);
}

View File

@ -1,30 +1,23 @@
#pragma once
#include "../margaret/vulkan_utils.h"
#include <ft2build.h>
#include FT_FREETYPE_H
#include "../../../gen/l1/VecAndSpan_U32Segment.h"
#include "../../../gen/l1/vulkan/VecVkDescriptorImageInfo.h"
#include "../../../gen/l1/pixel_masses.h"
#include "../../../gen/l1/VecAndSpan_U32.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../l1_5/core/rb_tree_node.h"
#include "../alice/transfer_in_mainloop.h"
#include <ft2build.h>
#include FT_FREETYPE_H
#define LUCY_MAX_DESCRIPTOR_COUNT 100
typedef U32 lucy_image_index_t;
typedef struct {
/* This value is actually Option<MargaretSubbuf>. If staging_buffer is already deleted (after it is no longer used),
* staging_buffer.len will be 0 */
MargaretSubbuf staging_buffer;
MargaretImg img;
VkImageView img_view;
MargaretTexture tex;
U64 usage;
/* 0 if this image isn't scheduled for deletion on th next cycle.
* 1 if it is */
int scheduled_for_deletion;
/* I just could not avoid storing this here. But yeah, after the copying is done, this field stops being used */
void* staging;
} LucyImage;
#include "../../../gen/l1/eve/lucy/OptionLucyImage.h"
@ -71,19 +64,19 @@ struct LucyFace {
struct LucyGlyphCache {
MargaretEngineReference ve;
Abigail* abigail;
VecOptionLucyImage image_slots;
VkDescriptorSetLayout descriptor_set_layout;
VkDescriptorSet descriptor_set;
/* to_be_freed_of_old_staging_next_cycle never intersect with to_be_copied_to_device_next_cycle */
VecU32 to_be_freed_of_old_staging_next_cycle;
VecU32 to_be_copied_to_device_next_cycle;
/* deletion will be performed last */
/* We can delete images and link images to descriptor set only when frame isn't in flight */
VecU32 to_be_written_to_descriptor_set;
VecU32 to_be_deleted;
};
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve){
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve, Abigail* abigail){
VkDescriptorSetLayout my_desc_set_layout;
VkDescriptorSetLayoutBindingFlagsCreateInfo set_layout_crinfo_flags = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
@ -108,10 +101,9 @@ LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve){
image_slots.buf[i].variant = Option_None;
}
return (LucyGlyphCache){
.ve = ve, .image_slots = image_slots,
.ve = ve, .abigail = abigail, .image_slots = image_slots,
.descriptor_set_layout = my_desc_set_layout, .descriptor_set = descriptor_set,
.to_be_freed_of_old_staging_next_cycle = VecU32_new(),
.to_be_copied_to_device_next_cycle = VecU32_new(),
.to_be_written_to_descriptor_set = VecU32_new(),
.to_be_deleted = VecU32_new()};
}
@ -125,9 +117,8 @@ void LucyFaceFixedSize_get_rid_of_myself(LucyFaceFixedSize* self){
LucyImage* img = &img_slot->some;
assert(img->usage > 0);
if (--img->usage) {
assert(!img->scheduled_for_deletion);
img->scheduled_for_deletion = 1;
VecU32_append(&cache->to_be_deleted, slot_id);
/* Nothing is written to descriptor set. And luckily, we don't have to */
img_slot->variant = Option_None;
}
}
BufRBTree_MapU32ToLucyStoredGlyph_sink(glyphs);
@ -172,7 +163,6 @@ U32 LucyGlyphCache_add_glyphs__find_image_slot(LucyGlyphCache* cache){
OptionLucyImage* slot = &cache->image_slots.buf[i];
if (slot->variant == Option_None) {
slot->variant = Option_Some;
slot->some.scheduled_for_deletion = 0;
slot->some.usage = 0;
return i;
}
@ -189,15 +179,27 @@ void LucyGlyphCache_add_glyphs__close_img(
assert(img_slot->variant == Option_Some);
LucyImage* img = &img_slot->some;
assert(img->usage > 0);
assert(!img->scheduled_for_deletion);
img_width = MAX_U32(img_width, 10); // Just a precaution. empty buffers aren't supported by Margaret
img_height = MAX_U32(img_height, 10);
VecU32_append(&cache->to_be_copied_to_device_next_cycle, img_slot_id);
img->staging_buffer = MargaretBufAllocator_alloc(cache->ve.staging_buffers, img_width * img_height * 1);
img->img = MargaretImgAllocator_alloc(cache->ve.dev_local_images, img_width, img_height, VK_FORMAT_R8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
img->img_view = margaret_create_view_for_image(cache->ve.device, img->img.a.image,
VK_FORMAT_R8_UNORM, VK_IMAGE_ASPECT_COLOR_BIT);
img->tex = Abigail_register_new_texture(cache->abigail,
img_width, img_height, sizeof(U8), VK_FORMAT_R8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, cache->ve.transfer_cmd_buffer,
cache->ve.staging_buffers, cache->ve.dev_local_images, &(img->staging));
assert(img->staging);
/* We are writing to descriptor set RIGHT NOW. That is why this function, and as such
* LucyGlyphCache_add_glyphs too, must be called when no frame is in flight */
vkUpdateDescriptorSets(cache->ve.device, 1, &(VkWriteDescriptorSet){
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = cache->descriptor_set, .dstBinding = 0, .dstArrayElement = img_slot_id,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &(VkDescriptorImageInfo){
.sampler = cache->ve.nearest_sampler, .imageView = img->tex.view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
}
}, 0, NULL);
}
void LucyGlyphCache_add_glyphs(VecLucyGlyphCachingRequest requests_for_faces){
@ -269,7 +271,6 @@ void LucyGlyphCache_add_glyphs(VecLucyGlyphCachingRequest requests_for_faces){
LucyPositionedStagingGlyph* p_glyph;
one_more_chance:
{}
int s = 23123;
p_glyph = &ready.buf[j];
LucyImage* img = &VecOptionLucyImage_mat(&cache->image_slots, img_slot_id)->some;
U64 new_width_required = p_glyph->bitmap.width + starting_x;
@ -319,8 +320,8 @@ void LucyGlyphCache_add_glyphs(VecLucyGlyphCachingRequest requests_for_faces){
for (size_t j = 0; j < ready.len; j++) {
LucyPositionedStagingGlyph* p_glyph = &ready.buf[j];
LucyImage* image = &VecOptionLucyImage_mat(&cache->image_slots, p_glyph->img_slot_id)->some;
U64 staging_width = image->img.width;
U8* staging = (U8*)MargaretSubbuf_get_mapped(&image->staging_buffer);
U64 staging_width = image->tex.img.width;
U8* staging = (U8*)image->staging;
for (U64 y = 0; y < p_glyph->bitmap.height; y++) {
U64 Y = y + p_glyph->pos.y;
for (U64 x = 0; x < p_glyph->bitmap.width; x++) {
@ -339,62 +340,10 @@ void LucyGlyphCache_drop(LucyGlyphCache self){
for (size_t i = 0; i < self.image_slots.len; i++) {
assert(self.image_slots.buf[i].variant == Option_None);
}
VecU32_drop(self.to_be_freed_of_old_staging_next_cycle);
VecU32_drop(self.to_be_copied_to_device_next_cycle);
VecU32_drop(self.to_be_written_to_descriptor_set);
VecU32_drop(self.to_be_deleted);
}
void LucyGlyphCache_another_frame(LucyGlyphCache* self){
for (size_t i = 0; i < self->to_be_freed_of_old_staging_next_cycle.len; i++) {
U32 slot_id = self->to_be_freed_of_old_staging_next_cycle.buf[i];
LucyImage* img = &self->image_slots.buf[slot_id].some;
assert(img->staging_buffer.len != 0);
MargaretBufAllocator_free(self->ve.staging_buffers, img->staging_buffer);
img->staging_buffer.len = 0;
}
for (size_t i = 0; i < self->to_be_copied_to_device_next_cycle.len; i++) {
U32 slot_id = self->to_be_copied_to_device_next_cycle.buf[i];
OptionLucyImage* img_slot = &self->image_slots.buf[slot_id];
assert(img_slot->variant == Option_Some);
LucyImage* img = &img_slot->some;
assert(img->staging_buffer.len != 0);
if (img->scheduled_for_deletion)
continue;
margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(self->ve.transfer_cmd_buffer,
&img->staging_buffer, &img->img, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
VecU32_append(&self->to_be_freed_of_old_staging_next_cycle, slot_id);
vkUpdateDescriptorSets(self->ve.device, 1, &(VkWriteDescriptorSet){
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = self->descriptor_set, .dstBinding = 0, .dstArrayElement = slot_id,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &(VkDescriptorImageInfo){
.sampler = self->ve.nearest_sampler, .imageView = img->img_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
}
}, 0, NULL);
}
/* We technically could carry out each deletion request in O(1) and each img creation request in O(1),
* but who cares, it's no problem going over the entire descriptor set when something get's added or deleted */
for (size_t i = 0; i < self->to_be_deleted.len; i++) {
U32 slot_id = self->to_be_copied_to_device_next_cycle.buf[i];
OptionLucyImage* img_slot = &self->image_slots.buf[slot_id];
assert(img_slot->variant == Option_Some);
LucyImage* img = &img_slot->some;
assert(img->scheduled_for_deletion);
assert(img->usage == 0);
if (img->staging_buffer.len != 0)
MargaretBufAllocator_free(self->ve.staging_buffers, img->staging_buffer);
MargaretImgAllocator_free(self->ve.dev_local_images, img->img.a);
img_slot->variant = Option_None;
}
self->to_be_freed_of_old_staging_next_cycle.len = 0;
self->to_be_copied_to_device_next_cycle.len = 0;
self->to_be_deleted.len = 0;
}
/* This function does not check font file for correctness, use only with trusted fonts */
LucyFace* LucyFace_new(FT_Library lib, LucyGlyphCache* cache, VecU8 path){
VecU8_append(&path, 0); // Making it null-terminated

View File

@ -97,14 +97,14 @@ void LucyRenderer_draw_char_glyph(LucyRenderer* self, vec4 color, ivec2 pos, Luc
OptionLucyImage* img_slot = VecOptionLucyImage_mat(&self->cache->image_slots, glyph->img_slot_id);
assert(img_slot->variant == Option_Some);
LucyImage* img = &img_slot->some;
float atlas_w = (float)img->img.width;
float atlas_h = (float)img->img.height;
float atlas_w = (float)img->tex.img.width;
float atlas_h = (float)img->tex.img.height;
ivec2 positioned = ivec2_add_ivec2(pos, glyph->bearing);
U64 needed_vbo_length = (self->glyphs_count + 1) * sizeof(LucyRenderInstance);
if (self->staging_vbo.len < needed_vbo_length) {
printf("LucyRenderer Staging Buffer: Gotta replace %lu with %lu\n",
self->staging_vbo.len, needed_vbo_length);
//printf("LucyRenderer Staging Buffer: Gotta replace %lu with %lu\n",
// self->staging_vbo.len, needed_vbo_length);
MargaretBufAllocator_expand_or_move_old_host_visible(
self->ve.staging_buffers, &self->staging_vbo, needed_vbo_length);
}

View File

@ -34,13 +34,16 @@ typedef struct {
U64 len;
} MargaretSubbuf;
struct MargaretBufAllocatorOneBlock{
typedef struct MargaretBufAllocator MargaretBufAllocator;
struct MargaretBufAllocatorOneBlock {
BufRBTree_MapU64ToU64 occupants;
U64 capacity;
U64 occupation_counter;
VkDeviceMemory mem_hand;
VkBuffer buf_hand;
void* mapped_memory;
MargaretBufAllocator* p;
};
void MargaretBufAllocatorOneBlock_drop(MargaretBufAllocatorOneBlock self){
@ -52,7 +55,7 @@ void MargaretBufAllocatorOneBlock_drop(MargaretBufAllocatorOneBlock self){
#include "../../../gen/l1/eve/margaret/VecMargaretBAFreeSegment.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLen_SetMargaretBAFreeSegment.h"
typedef struct {
struct MargaretBufAllocator {
ListMargaretBufAllocatorOneBlock blocks;
BufRBTreeByLen_SetMargaretBAFreeSegment mem_free_space;
VkDevice device;
@ -61,7 +64,8 @@ typedef struct {
U8 memory_type_id;
U8 alignment_exp;
bool host_visible;
} MargaretBufAllocator;
bool ban_non_envisaged_blocks;
};
void MargaretBufAllocator__erase_gap(
@ -120,22 +124,26 @@ void MargaretBufAllocator__add_block(MargaretBufAllocator* self, U64 capacity){
.occupants = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = capacity,
.mem_hand = memory, .buf_hand = buffer, .mapped_memory = mapped_memory
});
.mem_hand = memory, .buf_hand = buffer, .mapped_memory = mapped_memory,
.p = self });
}
MargaretBufAllocator MargaretBufAllocator_new(
MargaretBufAllocator* MargaretBufAllocator_new(
VkDevice device, VkPhysicalDevice physical_device,
VkBufferUsageFlags usage, U8 memory_type_id, U8 alignment_exp, bool host_visible, U64 initial_block_size
VkBufferUsageFlags usage,
U8 memory_type_id, U8 alignment_exp, bool host_visible,
U64 initial_block_size, bool ban_non_envisaged_blocks
){
MargaretBufAllocator self = {
MargaretBufAllocator* self = (MargaretBufAllocator*)safe_malloc(sizeof(MargaretBufAllocator));
*self = (MargaretBufAllocator){
.blocks = ListMargaretBufAllocatorOneBlock_new(),
.mem_free_space = BufRBTreeByLen_SetMargaretBAFreeSegment_new_reserved(1),
.device = device, .physical_device = physical_device, .usage = usage, .memory_type_id = memory_type_id,
.alignment_exp = alignment_exp, .host_visible = host_visible
.alignment_exp = alignment_exp, .host_visible = host_visible,
.ban_non_envisaged_blocks = ban_non_envisaged_blocks,
};
MargaretBufAllocator__add_block(&self, initial_block_size);
MargaretBufAllocator__insert_gap(&self, &self.blocks.first->el, 0, initial_block_size);
MargaretBufAllocator__add_block(self, initial_block_size);
MargaretBufAllocator__insert_gap(self, &self->blocks.first->el, 0, initial_block_size);
return self;
}
@ -230,6 +238,7 @@ void MargaretBufAllocator_debug(const MargaretBufAllocator* self){
/* Free one subbuffer, not a whole MBA :) */
void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretSubbuf allocation){
assert(allocation.block->p == self); // Vibe check
U64Segment left_free_space = MargaretBufAllocator__get_left_free_space(self, &allocation);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, &allocation);
@ -245,7 +254,6 @@ void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretSubbuf alloca
}
NODISCARD MargaretSubbuf MargaretBufAllocator_alloc(MargaretBufAllocator* self, U64 req_size){
// MargaretBufAllocator_debug(self);
req_size = margaret_bump_buffer_size_to_alignment(req_size, self->alignment_exp);
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
@ -261,6 +269,8 @@ NODISCARD MargaretSubbuf MargaretBufAllocator_alloc(MargaretBufAllocator* self,
OptionMargaretBAFreeSegment free_gap = MargaretBufAllocator__search_gap(self, req_size);
if (free_gap.variant == Option_None) {
assert(self->blocks.first != NULL);
if (self->ban_non_envisaged_blocks)
abortf("Exceeded the size of the initial memory block in a buffer allocator that bans additional blocks\n");
U64 pitch = self->blocks.first->el.capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(req_size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
@ -274,7 +284,6 @@ NODISCARD MargaretSubbuf MargaretBufAllocator_alloc(MargaretBufAllocator* self,
return (MargaretSubbuf){.block = &self->blocks.first->el, 0, req_size};
}
MargaretBufAllocator__put_buf_to_a_gap(self, free_gap.some, req_size);
// MargaretBufAllocator_debug(self);
return (MargaretSubbuf){.block = free_gap.some.block, .start = free_gap.some.start, req_size};
}
@ -298,14 +307,15 @@ void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretSubbuf* all
* `allocation` argument was untouched. It remains a valid object, you need to deallocate it yourself
*/
NODISCARD MargaretSubbuf MargaretBufAllocator_expand(
MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size){
MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size
){
assert(allocation->block->p == self); // Vibe check
bigger_size = margaret_bump_buffer_size_to_alignment(bigger_size, self->alignment_exp);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
if (allocation->start + bigger_size > right_free_space.start + right_free_space.len){
return MargaretBufAllocator_alloc(self, bigger_size);
}
// MargaretBufAllocator_debug(self);
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation->block,
allocation->start + bigger_size,
@ -358,4 +368,4 @@ void MargaretBufAllocator_expand_or_free_old(
MargaretBufAllocator_free(self, *allocation);
*allocation = maybe_bigger;
}
}
}

View File

@ -176,8 +176,10 @@
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
typedef struct{
U64 block;
typedef struct MargaretImgAllocatorOneBlock MargaretImgAllocatorOneBlock;
typedef struct {
MargaretImgAllocatorOneBlock* block;
U64 start;
U64 len;
} MargaretIAFreeSegment;
@ -199,7 +201,7 @@ bool MargaretIAFreeSegment_less_resp_align(const MargaretIAFreeSegment* A, const
if (A->block == B->block) {
return A->start < B->start;
}
return A->block < B->block;
return (uintptr_t)A->block < (uintptr_t)B->block;
}
return A_len < B_len;
}
@ -208,25 +210,28 @@ bool MargaretIAFreeSegment_less_resp_align(const MargaretIAFreeSegment* A, const
/* Does not include all parameters needed for relocation. Because relocation is needed only
* during controlled defragmentation */
typedef struct {
U64 block;
MargaretImgAllocatorOneBlock* block;
VkImage image;
U64 start;
} MargaretImgAllocation;
typedef struct MargaretImgAllocator MargaretImgAllocator;
/* Not primitive */
typedef struct {
struct MargaretImgAllocatorOneBlock {
BufRBTree_MapU64ToU64 images;
U64 capacity;
U64 occupation_counter;
VkDeviceMemory mem_hand;
void* mapped_memory;
} MargaretImgAllocatorOneBlock;
MargaretImgAllocator* p;
};
void MargaretImgAllocatorOneBlock_drop(MargaretImgAllocatorOneBlock self){
BufRBTree_MapU64ToU64_drop(self.images);
}
#include "../../../gen/l1/eve/margaret/VecMargaretImgAllocatorOneBlock.h"
#include "../../../gen/l1/eve/margaret/ListMargaretImgAllocatorOneBlock.h"
#include "../../../gen/l1/VecAndSpan_U8.h"
#include "../../../gen/l1/eve/margaret/VecMargaretIAFreeSegment.h"
@ -258,7 +263,9 @@ void MargaretMemFreeSpaceManager_drop(MargaretMemFreeSpaceManager self){
VecU8_drop(self.set_present);
}
void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man,
MargaretImgAllocatorOneBlock* block, U64 start, U64 len
){
if (len == 0)
return;
assert(man->set_present.len > 0);
@ -273,7 +280,9 @@ void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man, U64 blo
}
}
void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man,
MargaretImgAllocatorOneBlock* block, U64 start, U64 len
){
if (len == 0)
return;
assert(man->set_present.len > 0); /* MargaretMemFreeSpaceManager will do that for us with 2^3 */
@ -313,49 +322,51 @@ OptionMargaretIAFreeSegment MargaretMemFreeSpaceManager_search(
}
/* VkDevice and VkPhysicalDevice stay remembered here. Don't forget that, please */
typedef struct {
VecMargaretImgAllocatorOneBlock blocks;
struct MargaretImgAllocator {
ListMargaretImgAllocatorOneBlock blocks;
MargaretMemFreeSpaceManager mem_free_space;
VkDevice device;
VkPhysicalDevice physical_device;
U8 memory_type_id;
} MargaretImgAllocator;
};
void MargaretImgAllocator__erase_gap(MargaretImgAllocator* self, U64 block_id, U64 start, U64 len){
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, block_id, start, len);
MargaretImgAllocatorOneBlock* BLOCK = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, block_id);
BLOCK->occupation_counter += len;
assert(BLOCK->occupation_counter <= BLOCK->capacity);
void MargaretImgAllocator__erase_gap(MargaretImgAllocator* self,
MargaretImgAllocatorOneBlock* block, U64 start, U64 len
){
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, block, start, len);
block->occupation_counter += len;
assert(block->occupation_counter <= block->capacity);
}
void MargaretImgAllocator__insert_gap(MargaretImgAllocator* self, U64 block_id, U64 start, U64 len){
MargaretMemFreeSpaceManager_insert(&self->mem_free_space, block_id, start, len);
MargaretImgAllocatorOneBlock* BLOCK = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, block_id);
assert(len <= BLOCK->occupation_counter);
BLOCK->occupation_counter -= len;
void MargaretImgAllocator__insert_gap(MargaretImgAllocator* self,
MargaretImgAllocatorOneBlock* block, U64 start, U64 len
){
MargaretMemFreeSpaceManager_insert(&self->mem_free_space, block, start, len);
assert(len <= block->occupation_counter);
block->occupation_counter -= len;
}
void MargaretImgAllocator__add_block(MargaretImgAllocator* self, U64 capacity){
VkDeviceMemory memory;
printf("DEBUG MargaretImgAllocator: allocating block of size %lu\n", capacity);
// printf("DEBUG MargaretImgAllocator: allocating block of size %lu\n", capacity);
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = capacity, .memoryTypeIndex = self->memory_type_id
}, NULL, &memory) == VK_SUCCESS);
VecMargaretImgAllocatorOneBlock_append(&self->blocks, (MargaretImgAllocatorOneBlock){
ListMargaretImgAllocatorOneBlock_insert(&self->blocks, (MargaretImgAllocatorOneBlock){
.images = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = capacity, // sounds sus
.mem_hand = memory,
.mapped_memory = NULL /* not supported */});
.mapped_memory = NULL /* not supported */, .p = self });
}
/* Idk where to put it */
void MargaretImgAllocator__debug(const MargaretImgAllocator* self){
printf("=============================== MargaretImgAllocator ============\n"
"All blocks: { ");
for (size_t i = 0; i < self->blocks.len; i++) {
printf(" %lu/%lu ", self->blocks.buf[i].occupation_counter, self->blocks.buf[i].capacity);
for (ListNodeMargaretImgAllocatorOneBlock* i = 0; i; i = i->next) {
printf(" %lu/%lu ", i->el.occupation_counter, i->el.capacity);
}
printf("}\n");
for (size_t ai = 0; ai < self->mem_free_space.set_present.len; ai++) {
@ -367,24 +378,25 @@ void MargaretImgAllocator__debug(const MargaretImgAllocator* self){
assert(set->guest == alignment_exp);
for (size_t i = 0; i < set->el.len; i++) {
const MargaretIAFreeSegment *free_seg = &set->el.buf[i];
printf(" Block %lu, start %lu, len %lu\n", free_seg->block, free_seg->start, free_seg->len);
printf(" Block %p, start %lu, len %lu\n", (void*)free_seg->block, free_seg->start, free_seg->len);
}
}
}
MargaretImgAllocator MargaretImgAllocator_new(
MargaretImgAllocator* MargaretImgAllocator_new(
VkDevice device, VkPhysicalDevice physical_device, U8 memory_type_id, U64 initial_block_size
){
MargaretImgAllocator self = {
.blocks = VecMargaretImgAllocatorOneBlock_new(),
MargaretImgAllocator* self = (MargaretImgAllocator*)safe_malloc(sizeof(MargaretImgAllocator));
*self = (MargaretImgAllocator){
.blocks = ListMargaretImgAllocatorOneBlock_new(),
.mem_free_space = MargaretMemFreeSpaceManager_new(),
.device = device,
.physical_device = physical_device,
.memory_type_id = memory_type_id,
};
MargaretImgAllocator__add_block(&self, initial_block_size);
MargaretImgAllocator__insert_gap(&self, 0, 0, initial_block_size);
// MargaretImgAllocator__debug(&self);
MargaretImgAllocator__add_block(self, initial_block_size);
assert(self->blocks.first != NULL);
MargaretImgAllocator__insert_gap(self, &self->blocks.first->el, 0, initial_block_size);
return self;
}
@ -407,7 +419,7 @@ U64 MargaretImgAllocator__add_img_given_gap(
MargaretImgAllocator__insert_gap(self, segment.block, aligned_start + required_size,
gap_start + gap_len - (aligned_start + required_size));
BufRBTree_MapU64ToU64* images = &VecMargaretImgAllocatorOneBlock_mat(&self->blocks, segment.block)->images;
BufRBTree_MapU64ToU64* images = &segment.block->images;
bool iret = BufRBTree_MapU64ToU64_insert(images, aligned_start, required_size);
assert(iret);
return aligned_start;
@ -415,7 +427,7 @@ U64 MargaretImgAllocator__add_img_given_gap(
U64Segment MargaretImgAllocator__get_left_free_space(
const MargaretImgAllocator* self, MargaretImgAllocation allocation){
const MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_at(&self->blocks, allocation.block);
const MargaretImgAllocatorOneBlock* block = allocation.block;
U64 occ_start = allocation.start;
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&block->images, allocation.start);
@ -434,7 +446,7 @@ U64Segment MargaretImgAllocator__get_left_free_space(
U64Segment MargaretImgAllocator__get_right_free_space(
const MargaretImgAllocator* self, MargaretImgAllocation allocation){
const MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_at(&self->blocks, allocation.block);
const MargaretImgAllocatorOneBlock* block = allocation.block;
U64 occ_start = allocation.start;
VkMemoryRequirements occ_memory_requirements;
vkGetImageMemoryRequirements(self->device, allocation.image, &occ_memory_requirements);
@ -451,27 +463,29 @@ U64Segment MargaretImgAllocator__get_right_free_space(
return (U64Segment){.start = occ_start + occ_taken_size, .len = block->capacity - (occ_start + occ_taken_size)};
}
/* Also frees blocks */
void MargaretImgAllocator_drop(MargaretImgAllocator self){
for (size_t bi = 0; bi < self.blocks.len; bi++) {
vkFreeMemory(self.device, self.blocks.buf[bi].mem_hand, NULL);
for (ListNodeMargaretImgAllocatorOneBlock* bi = self.blocks.first; bi; bi = bi->next) {
vkFreeMemory(self.device, bi->el.mem_hand, NULL);
}
VecMargaretImgAllocatorOneBlock_drop(self.blocks);
ListMargaretImgAllocatorOneBlock_drop(self.blocks);
MargaretMemFreeSpaceManager_drop(self.mem_free_space);
}
void MargaretImgAllocator_free(MargaretImgAllocator* self, MargaretImgAllocation allocation){
assert(allocation.block->p == self); // Vibe check
U64Segment left_free_space = MargaretImgAllocator__get_left_free_space(self, allocation);
U64Segment right_free_space = MargaretImgAllocator__get_right_free_space(self, allocation);
vkDestroyImage(self->device, allocation.image, NULL);
MargaretImgAllocator__erase_gap(self, allocation.block, left_free_space.start, left_free_space.len);
MargaretImgAllocator__erase_gap(self, allocation.block, right_free_space.start, right_free_space.len);
MargaretImgAllocator__insert_gap(self, allocation.block,
MargaretImgAllocatorOneBlock* block = allocation.block;
MargaretImgAllocator__erase_gap(self, block, left_free_space.start, left_free_space.len);
MargaretImgAllocator__erase_gap(self, block, right_free_space.start, right_free_space.len);
MargaretImgAllocator__insert_gap(self, block,
left_free_space.start,
right_free_space.start + right_free_space.len - left_free_space.start);
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, allocation.block);
bool eret = BufRBTree_MapU64ToU64_erase(&block->images, allocation.start);
assert(eret);
}
@ -514,24 +528,23 @@ NODISCARD MargaretImgAllocation MargaretImgAllocator__alloc(
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
if (free_gap.variant == Option_None) {
assert(self->blocks.len > 0);
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
assert(self->blocks.first != NULL);
U64 pitch = self->blocks.first->el.capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(mem_requirements.size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
// U64 new_capacity = MAX_U64(mem_requirements.size, MIN_U64(pitch, maintenance3_properties.maxMemoryAllocationSize));
MargaretImgAllocator__add_block(self, new_capacity);
U64 bid = self->blocks.len - 1;
MargaretImgAllocator__insert_gap(self, bid, mem_requirements.size, new_capacity - mem_requirements.size);
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, bid);
MargaretImgAllocatorOneBlock* block = &self->blocks.first->el;
MargaretImgAllocator__insert_gap(self, block, mem_requirements.size, new_capacity - mem_requirements.size);
block->occupation_counter = mem_requirements.size;
bool iret = BufRBTree_MapU64ToU64_insert(&block->images, 0, mem_requirements.size);
assert(iret);
check(vkBindImageMemory(self->device, fresh_img, block->mem_hand, 0) == VK_SUCCESS);
// MargaretImgAllocator__debug(self);
return (MargaretImgAllocation){.block = bid, fresh_img, 0};
return (MargaretImgAllocation){.block = block, fresh_img, 0};
}
U64 aligned_pos = MargaretImgAllocator__add_img_given_gap(self, free_gap.some, mem_requirements.size, alignment_exp);
VkDeviceMemory memory = VecMargaretImgAllocatorOneBlock_at(&self->blocks, free_gap.some.block)->mem_hand;
VkDeviceMemory memory = free_gap.some.block->mem_hand;
check(vkBindImageMemory(self->device, fresh_img, memory, aligned_pos) == VK_SUCCESS);
// MargaretImgAllocator__debug(self);
return (MargaretImgAllocation){.block = free_gap.some.block, .image = fresh_img, .start = aligned_pos};

View File

@ -145,6 +145,7 @@ void MargaretInstanceAndItsDebug_drop(MargaretInstanceAndItsDebug instance) {
typedef struct {
U32 for_graphics;
U32 for_presentation;
U32 for_compute;
} MargaretChosenQueueFamilies;
/* MargaretChosenQueueFamilies or a static string, describing which part could not be found
@ -163,13 +164,13 @@ ResultMargaretChosenQueueFamiliesOrSpanU8 margaret_choose_good_queue_families(Vk
VecVkQueueFamilyProperties queue_families = VecVkQueueFamilyProperties_new_zeroinit(queue_family_count);
vkGetPhysicalDeviceQueueFamilyProperties(dev, &queue_family_count, queue_families.buf);
OptionU32 index_for_graphics = None_U32();
OptionU32 index_for_graph_and_comp = None_U32();
OptionU32 index_for_presentation = None_U32();
for (uint32_t i = 0; i < queue_family_count; i++) {
const VkQueueFamilyProperties* props = VecVkQueueFamilyProperties_at(&queue_families, i);
if (props->queueFlags & VK_QUEUE_GRAPHICS_BIT && props->queueCount >= 1) {
index_for_graphics = Some_U32(i);
if (props->queueFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT) && props->queueCount >= 1) {
index_for_graph_and_comp = Some_U32(i);
}
VkBool32 isPres = false;
if (vkGetPhysicalDeviceSurfaceSupportKHR(dev, i, surface, &isPres) != VK_SUCCESS)
@ -178,15 +179,45 @@ ResultMargaretChosenQueueFamiliesOrSpanU8 margaret_choose_good_queue_families(Vk
index_for_presentation = Some_U32(i);
}
VecVkQueueFamilyProperties_drop(queue_families);
if (index_for_graphics.variant == Option_None)
return (ResultMargaretChosenQueueFamiliesOrSpanU8){ .variant = Result_Err, .err = cstr("No graphics queue family") };
if (index_for_graph_and_comp.variant == Option_None)
return (ResultMargaretChosenQueueFamiliesOrSpanU8){ .variant = Result_Err,
.err = cstr("No queue family for both graphics and compute") };
if (index_for_presentation.variant == Option_None)
return (ResultMargaretChosenQueueFamiliesOrSpanU8){ .variant = Result_Err, .err = cstr("No presentation queue family") };
return (ResultMargaretChosenQueueFamiliesOrSpanU8){ .variant = Result_Ok, .ok = (MargaretChosenQueueFamilies){
.for_graphics = index_for_graphics.some, .for_presentation = index_for_presentation.some
.for_graphics = index_for_graph_and_comp.some,
.for_compute = index_for_graph_and_comp.some,
.for_presentation = index_for_presentation.some
} };
}
typedef struct {
VkQueue graphics;
VkQueue presentation;
VkQueue compute;
} MargaretUsedQueues;
MargaretUsedQueues margaret_get_device_queues(VkDevice device, MargaretChosenQueueFamilies family) {
MargaretUsedQueues queues;
vkGetDeviceQueue(device, family.for_graphics, 0, &queues.graphics);
if (family.for_presentation == family.for_graphics) {
queues.presentation = queues.graphics;
} else {
vkGetDeviceQueue(device, family.for_presentation, 0, &queues.presentation);
}
if (family.for_compute == family.for_graphics) {
queues.compute = queues.graphics;
} else if (family.for_compute == family.for_presentation) {
queues.compute = queues.presentation;
} else {
vkGetDeviceQueue(device, family.for_compute, 0, &queues.compute);
}
return queues;
}
// These are not the same as instance extensions
VecVecU8 margaret_get_extensions_of_physical_device(VkPhysicalDevice physical_device) {
uint32_t extensions_count = 0;
@ -311,14 +342,20 @@ VkDevice margaret_create_logical_device(VkPhysicalDevice physical_device, Margar
float qfam_queue_priorities[1] = {1.f};
VkDeviceQueueCreateInfo queue_crinfo[2] = { 0 };
int queue_c = 0;
if (queue_fam.for_graphics == queue_fam.for_presentation) {
queue_c = 1;
queue_crinfo[0].queueFamilyIndex = queue_fam.for_graphics;
} else {
queue_c = 2;
queue_crinfo[0].queueFamilyIndex = queue_fam.for_graphics;
queue_crinfo[1].queueFamilyIndex = queue_fam.for_presentation;
queue_crinfo[queue_c].queueFamilyIndex = queue_fam.for_graphics;
queue_c++;
if (queue_fam.for_graphics != queue_fam.for_presentation) {
queue_crinfo[queue_c].queueFamilyIndex = queue_fam.for_presentation;
queue_c++;
}
if (queue_fam.for_compute != queue_fam.for_presentation && queue_fam.for_compute != queue_fam.for_graphics) {
queue_crinfo[queue_c].queueFamilyIndex = queue_fam.for_compute;
queue_c++;
}
for (int i = 0; i < queue_c; i++) {
queue_crinfo[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_crinfo[i].queueCount = 1;
@ -839,22 +876,6 @@ void MargaretSwapchainBundle_drop_with_device(VkDevice device, MargaretSwapchain
// Now swapchain bundle is 100% dropped
}
VkShaderModule margaret_VkShaderModule_new(VkDevice device, VecU8 code) {
if (code.len < 4)
abortf("Kill yourself, please\n");
VkShaderModule shad_module;
check(vkCreateShaderModule(device, &(VkShaderModuleCreateInfo){
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.codeSize = code.len,
// Now this is funny, we can't put arbitrary byte-string here, it should be 4-byte aligned
// Thanks goodness all the strings in VecU8 are allocated with calloc, which gives high alignment to
// virtually everything
.pCode = (const uint32_t*)code.buf
}, NULL, &shad_module) == VK_SUCCESS);
VecU8_drop(code);
return shad_module;
}
VkCommandPool margaret_create_resettable_command_pool(VkDevice device, uint32_t wanted_queue_family) {
VkCommandPoolCreateInfo crinfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
@ -965,9 +986,11 @@ VkSampler margaret_create_sampler(VkPhysicalDevice physical_device, VkDevice dev
}
VkDescriptorPool margaret_create_descriptor_set_pool(
VkDevice device, uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t max_sets
VkDevice device,
uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t storage_buffer_descriptor_count,
uint32_t max_sets
) {
VkDescriptorPoolSize sizes[2];
VkDescriptorPoolSize sizes[3];
int sizes_c = 0;
if (ubo_descriptor_count > 0) {
sizes[sizes_c] = (VkDescriptorPoolSize){
@ -983,6 +1006,13 @@ VkDescriptorPool margaret_create_descriptor_set_pool(
};
sizes_c++;
}
if (storage_buffer_descriptor_count > 0) {
sizes[sizes_c] = (VkDescriptorPoolSize){
.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
.descriptorCount = storage_buffer_descriptor_count
};
sizes_c++;
}
VkDescriptorPool descriptor_pool;
check(vkCreateDescriptorPool(device, &(VkDescriptorPoolCreateInfo){
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
@ -1018,6 +1048,31 @@ void margaret_end_command_buffer(VkCommandBuffer command_buffer){
check(vkEndCommandBuffer(command_buffer) == VK_SUCCESS);
}
VkShaderModule margaret_VkShaderModule_new(VkDevice device, VecU8 code) {
if (code.len < 4)
abortf("Kill yourself, please\n");
VkShaderModule shad_module;
check(vkCreateShaderModule(device, &(VkShaderModuleCreateInfo){
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.codeSize = code.len,
// Now this is funny, we can't put arbitrary byte-string here, it should be 4-byte aligned
// Thanks goodness all the strings in VecU8 are allocated with calloc, which gives high alignment to
// virtually everything
.pCode = (const uint32_t*)code.buf
}, NULL, &shad_module) == VK_SUCCESS);
VecU8_drop(code);
return shad_module;
}
VkPipelineShaderStageCreateInfo margaret_VkPipelineShaderStageCreateInfo_init(
VkShaderStageFlags stage, VkShaderModule module
) {
return (VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = module, .stage = stage, .pName = "main",
};
}
typedef struct {
VkPipelineLayout pipeline_layout;
VecU8 vertex_shader_code;
@ -1037,24 +1092,15 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
MargaretMostImportantPipelineOptions op
){
VkPipelineShaderStageCreateInfo shader_modules[3] = {
(VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = margaret_VkShaderModule_new(device, op.vertex_shader_code),
.stage = VK_SHADER_STAGE_VERTEX_BIT, .pName = "main",
},
(VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = margaret_VkShaderModule_new(device, op.fragment_shader_code),
.stage = VK_SHADER_STAGE_FRAGMENT_BIT, .pName = "main",
},
margaret_VkPipelineShaderStageCreateInfo_init(VK_SHADER_STAGE_VERTEX_BIT,
margaret_VkShaderModule_new(device, op.vertex_shader_code)),
margaret_VkPipelineShaderStageCreateInfo_init(VK_SHADER_STAGE_FRAGMENT_BIT,
margaret_VkShaderModule_new(device, op.fragment_shader_code)),
};
U32 shader_modules_c = 2;
if (op.geometry_shader_code.len > 0) {
shader_modules[shader_modules_c] = (VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = margaret_VkShaderModule_new(device, op.geometry_shader_code),
.stage = VK_SHADER_STAGE_GEOMETRY_BIT, .pName = "main",
};
shader_modules[shader_modules_c] = margaret_VkPipelineShaderStageCreateInfo_init(
VK_SHADER_STAGE_GEOMETRY_BIT, margaret_VkShaderModule_new(device, op.geometry_shader_code)),
shader_modules_c++;
}
@ -1120,7 +1166,7 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
.colorBlendOp = VK_BLEND_OP_ADD,
.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO,
.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
.alphaBlendOp = VK_BLEND_OP_ADD,
.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
@ -1146,9 +1192,21 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
return pipeline;
}
#include "vulkan_memory.h"
VkPipeline margaret_create_compute_pipeline(VkDevice device, VkPipelineLayout layout, VecU8 code) {
VkPipeline pipeline;
VkShaderModule comp_module = margaret_VkShaderModule_new(device, code);
vkCreateComputePipelines(device, VK_NULL_HANDLE, 1, &(VkComputePipelineCreateInfo){
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
.flags = 0,
.layout = layout,
.stage = margaret_VkPipelineShaderStageCreateInfo_init(VK_SHADER_STAGE_COMPUTE_BIT,
margaret_VkShaderModule_new(device, code)),
}, NULL, &pipeline);
vkDestroyShaderModule(device, comp_module, NULL);
return pipeline;
}
// todo: move image copying function here
#include "vulkan_memory.h"
typedef struct {
VkDevice device;
@ -1246,3 +1304,40 @@ void margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(
});
dst->current_layout = dst_new_layout;
}
U64 margaret_singleplane_format_to_sizeof_type(VkFormat type){
switch (type) {
case VK_FORMAT_R32G32B32A32_SFLOAT: return 16;
case VK_FORMAT_R32G32B32_SFLOAT: return 12;
case VK_FORMAT_R32G32_SFLOAT: return 8;
case VK_FORMAT_R32_SFLOAT: return 4;
case VK_FORMAT_R16_SFLOAT: return 2;
case VK_FORMAT_R16G16_SFLOAT: return 4;
case VK_FORMAT_R16G16B16_SFLOAT: return 6;
case VK_FORMAT_R16G16B16A16_SFLOAT: return 8;
case VK_FORMAT_R16_UNORM: return 2;
case VK_FORMAT_R16G16_UNORM: return 4;
case VK_FORMAT_R16G16B16_UNORM: return 6;
case VK_FORMAT_R16G16B16A16_UNORM: return 8;
case VK_FORMAT_R8G8B8A8_UNORM: return 4;
case VK_FORMAT_R8G8B8_UNORM: return 3;
case VK_FORMAT_R8G8_UNORM: return 2;
case VK_FORMAT_R8_UNORM: return 1;
case VK_FORMAT_R32_UINT: return 4;
case VK_FORMAT_R32G32_UINT: return 8;
case VK_FORMAT_R32G32B32_UINT: return 12;
case VK_FORMAT_R32G32B32A32_UINT: return 16;
case VK_FORMAT_R8G8B8A8_SRGB: return 4;
default:
abortf("Jokes on you\n");
}
}
typedef struct {
MargaretImg img;
VkImageView view;
} MargaretTexture;

View File

@ -85,6 +85,10 @@ typedef struct{
RBTreeNodeLucyFaceFixedSize* font_face_of_size_40;
vec3 hero_pos;
U32 ROA_diffuse_tex_slot;
U32 ROA_normal_tex_slot;
U32 ROA_specular_tex_slot;
ListNodeAliceGenericMeshHand* ROA_mesh;
GenericMeshTopology ROA_topology;
RigidBodyState ROA_state;
@ -96,6 +100,8 @@ typedef struct{
U64 misses_count;
U64 hits_count;
Vecvec3 bullets_stuck_on_ROA;
MargaretSubbuf sb;
} R4BetaState;
/* We are surrounded by a giant cubic mesh of light sources */
@ -123,16 +129,8 @@ void physics_update(R4BetaState* st, float t){
* m2 is the mass of bullet. `v` is the speed of bullet */
void RigidBodyState_when_shot(RigidBodyState* self, vec3 imp, float m2, vec3 v){
vec3 IO = vec3_minus(imp);
float IO_norm_sq = vec3_dot(IO, IO);
vec3 linear_speed_gain;
if (IO_norm_sq < 0.00001f) {
linear_speed_gain = v;
} else {
vec3 v_projected = vec3_mul_scal(IO, vec3_dot(IO, v) / IO_norm_sq);
linear_speed_gain = vec3_mul_scal(v_projected, m2 / self->p.mass);
}
self->speed = vec3_add_vec3(self->speed, linear_speed_gain);
self->speed = vec3_add_vec3(self->speed, vec3_mul_scal(v, m2 / self->p.mass));
vec3 www = vec3_mul_scal(vec3_cross(v, IO), m2);
@ -254,8 +252,15 @@ void main_h_on_another_frame(void* data, float fl){
.model_t = RigidBodyState_get_tran_mat_of_mesh(&st->ROA_state),
});
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&st->alice->pipeline0_ubo.staging);
assert(pipeline_0_ubo_point_light_max_count >= st->LS_state.len);
// Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&st->alice->pipeline0_ubo.staging);
// assert(pipeline_0_ubo_point_light_max_count >= st->LS_state.len);
if (st->LS_state.len > alice->pipeline0_light_conf.point_lights.count) {
Alice_set_point_light_count(alice, st->LS_state.len);
}
if (st->LS_state.len + st->bullets_stuck_on_ROA.len > st->LS_mesh->el.instance_attr.count) {
AliceShinyMeshHand_resize_instance_arr(alice, &st->LS_mesh->el, st->LS_state.len + st->bullets_stuck_on_ROA.len);
}
for (size_t i = 0; i < st->LS_state.len; i++) {
LightSourceState* ls = &st->LS_state.buf[i];
@ -263,11 +268,7 @@ void main_h_on_another_frame(void* data, float fl){
.color_on = ls->color,
.model_t = marie_translation_mat4(ls->pos),
});
ubo->point_light_arr[i] = (Pipeline0PointLight){
.pos = ls->pos, .color = vec3_mul_scal(ls->color, 21)};
}
if (st->LS_state.len + st->bullets_stuck_on_ROA.len > st->LS_mesh->el.instance_attr.count) {
AliceShinyMeshHand_resize_instance_arr(alice, &st->LS_mesh->el, st->LS_state.len + st->bullets_stuck_on_ROA.len);
Alice_set_point_light(alice, i, (Pipeline0PointLight){.pos = ls->pos, .color = vec3_mul_scal(ls->color, 21)});
}
for (size_t i = 0; i < st->bullets_stuck_on_ROA.len; i++) {
@ -330,12 +331,13 @@ void run_app(){
VecU8 ROA_mesh_path = vcstr("./gen/l2/models/log_10_2_6.AliceGenericMesh");
st.ROA_topology = alice_expect_read_generic_mesh_from_file(ROA_mesh_path);
st.ROA_diffuse_tex_slot = Alice_load_r8g8b8a8_texture(alice, vcstr("./src/l3/textures/log_10_2_6_diffuse.png"));
st.ROA_normal_tex_slot = Alice_load_r8g8b8a8_texture(alice, vcstr("./gen/l2/textures/log_10_2_6_NORMAL.png"));
st.ROA_specular_tex_slot = Alice_load_r8_texture(alice, vcstr("./src/l3/textures/log_10_2_6_specular.png"));
st.ROA_mesh = Alice_add_generic_mesh(st.alice, &st.ROA_topology,
(AliceGenericMeshTexturePaths){
.diffuse_texture_path = vcstr("./src/l3/textures/log_10_2_6_diffuse.png"),
.normal_texture_path = vcstr("./gen/l2/textures/log_10_2_6_NORMAL.png"),
.specular_texture_path = vcstr("./src/l3/textures/log_10_2_6_specular.png")
});
st.ROA_diffuse_tex_slot, st.ROA_normal_tex_slot, st.ROA_specular_tex_slot);
AliceGenericMeshHand_resize_instance_arr(st.alice, &st.ROA_mesh->el, 1);
const float gamma_l_c = 4.f / 3 / M_PIf;
st.ROA_state = (RigidBodyState){
@ -385,10 +387,6 @@ void run_app(){
}
AliceShinyMeshHand_resize_instance_arr(st.alice, &st.LS_mesh->el, st.LS_state.len);
Pipeline0UBO* ubo = (Pipeline0UBO*)MargaretSubbuf_get_mapped(&st.alice->pipeline0_ubo.staging);
assert(pipeline_0_ubo_point_light_max_count >= st.LS_state.len);
ubo->point_light_count = (int)st.LS_state.len;
ubo->spotlight_count = 0;
st.bullet_config = (BulletConfig){.mass = 0.01f, .velocity = 1000};
st.misses_count = 0;

View File

@ -1,4 +1,6 @@
#version 450
#version 460
#extension GL_EXT_nonuniform_qualifier : require
layout(location = 0) in vec3 tang_norm;
layout(location = 1) in vec3 tang_U;
@ -6,34 +8,36 @@ layout(location = 2) in vec3 tang_V;
layout(location = 3) in vec2 tex;
layout(location = 4) in vec3 pos;
/* Right now all in set 0 */
layout(location = 0) out vec4 fin_color;
/* Yes, even these guys */
layout(binding = 1) uniform sampler2D color_tex;
layout(binding = 2) uniform sampler2D normal_map;
layout(binding = 3) uniform sampler2D specular_map;
layout(set = 1, binding = 0) uniform sampler2D textures[];
layout(push_constant, std430) uniform pc {
layout(offset = 64) vec3 camera_pos;
uint diffuse_tex_slot;
uint normal_tex_slot;
uint specular_tex_slot;
};
struct Pipeline0PointLight {
struct PointLight {
vec3 pos;
vec3 color;
};
struct Pipeline0Spotlight {
struct Spotlight {
vec3 pos;
vec3 dir;
vec3 color;
float range;
};
layout(std140, binding = 0) uniform Pipeline0UBO {
layout (std140, set = 0, binding = 0) uniform NumUBO {
int point_light_count;
int spotlight_count;
Pipeline0PointLight point_light_arr[120];
Pipeline0Spotlight spotlight_arr [20];
};
layout (std430, set = 0, binding = 1) readonly buffer PointLightsArray {
PointLight point_light_arr[];
};
float get_intensity(float dist){
@ -41,13 +45,16 @@ float get_intensity(float dist){
}
void main(){
vec3 compressed_normal = texture(normal_map, tex).xyz;
vec3 compressed_normal = texture(textures[nonuniformEXT(normal_tex_slot)], tex).xyz;
vec3 natural_color = texture(textures[nonuniformEXT(diffuse_tex_slot)], tex).xyz;
float specular_c = texture(textures[nonuniformEXT(specular_tex_slot)], tex).x;
vec3 correct_norm_on_tang = compressed_normal * 2 - 1;
vec3 norm = normalize(mat3(tang_U, tang_norm, tang_V) * correct_norm_on_tang);
vec3 diffuse_illumination = vec3(0);
vec3 specular_illumination = vec3(0);
for (int i = 0; i < point_light_count; i++) {
Pipeline0PointLight lamp = point_light_arr[i];
PointLight lamp = point_light_arr[i];
vec3 to_light = -pos + lamp.pos;
float dist = length(to_light);
vec3 U = to_light / dist;
@ -58,11 +65,10 @@ void main(){
vec3 B = to_cam / dist_to_cam;
specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 32) * lamp.color;
}
for (int i = 0; i < spotlight_count; i++) {
Pipeline0Spotlight lamp = spotlight_arr[i];
}
vec3 natural_color = texture(color_tex, tex).xyz;
float specular_c = texture(specular_map, tex).x;
//for (int i = 0; i < spotlight_count; i++) {
// Pipeline0Spotlight lamp = spotlight_arr[i];
//}
vec3 color = natural_color * diffuse_illumination + specular_c * specular_illumination;
fin_color = vec4(color, 1);
}

View File

@ -11,23 +11,25 @@ layout(push_constant, std430) uniform pc {
layout(offset = 64) vec3 camera_pos;
};
struct Pipeline0PointLight {
struct PointLight {
vec3 pos;
vec3 color;
};
struct Pipeline0Spotlight {
struct Spotlight {
vec3 pos;
vec3 dir;
vec3 color;
float range;
};
layout(std140, binding = 0) uniform Pipeline0UBO {
layout (std140, set = 0, binding = 0) uniform NumUBO {
int point_light_count;
int spotlight_count;
Pipeline0PointLight point_light_arr[120];
Pipeline0Spotlight spotlight_arr [20];
};
layout (std430, set = 0, binding = 1) readonly buffer PointLightsArray {
PointLight point_light_arr[];
};
float get_intensity(float dist){
@ -38,7 +40,7 @@ void main(){
vec3 diffuse_illumination = vec3(0);
vec3 specular_illumination = vec3(0);
for (int i = 0; i < point_light_count; i++) {
Pipeline0PointLight lamp = point_light_arr[i];
PointLight lamp = point_light_arr[i];
vec3 to_light = -pos + lamp.pos;
float dist = length(to_light);
vec3 U = to_light / dist;
@ -47,9 +49,9 @@ void main(){
vec3 B = normalize(-pos+camera_pos);
// specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 256) * lamp.color;
}
for (int i = 0; i < spotlight_count; i++) {
Pipeline0Spotlight lamp = spotlight_arr[i];
}
// for (int i = 0; i < spotlight_count; i++) {
// Spotlight lamp = spotlight_arr[i];
// }
vec3 color = color_off * diffuse_illumination + (0.05 + 0.45 * length(color_off)) * specular_illumination + color_on;
fin_color = vec4(color, 1);
}

View File

@ -13,5 +13,4 @@ layout (binding=0) uniform sampler2D images[];
void main(){
float I = texture(images[nonuniformEXT(tex_ind)], tex_cord).r;
fin_color = vec4(color.rgb, color.a * I);
//fin_color = vec4(0, 0, 0, 1);
}

View File

@ -38,4 +38,5 @@ void main(){
vsout_tex_ind = tex_ind;
vec2 pos = all_v_pos[gl_VertexIndex % 6];
gl_Position = vec4(deng(width, pos.x), deng(height, pos.y), 0, 1);
}