Compare commits

...

18 Commits

Author SHA1 Message Date
6a0681b42e Removed more unrelated stuff that I don't want to touch anymore 2025-12-23 23:02:09 +03:00
1328d194be Removed unrelated old stuff that is completely unrelated now 2025-12-23 22:51:36 +03:00
19f92d9207 I wrote Lucy. It works. I can render text. Example is written in r0. Too bad I am running out of time. Exam is the next day after tomorrow :( 2025-12-23 22:47:15 +03:00
516fab6ff6 MMA is good enough 2025-12-19 18:13:40 +03:00
d4f9ed214f Added cool shit. Now r0 is awesome 2025-12-19 18:06:39 +03:00
0f59830bdf 50% through making Daria family-friendly 2025-12-19 03:44:49 +03:00
5615594762 Added this stupid normal vector inference bullshit from learnopeng.com. I checked, it's garbage. Will probably change it. But now I have to write GenericModel normal vector inference. And it's, like, 200x times harder. And normal texture generation is the type of hell you don't just drop on somebody. I dropped it on myself anyway... 2025-12-19 01:21:01 +03:00
8e3a306459 I FINALLY REWROTE r0 TO USE NORMAL STUFF!! YEEEES. AFTER 100 years. But now session is back on my ass, again 2025-12-14 04:42:35 +03:00
aadc346f43 What I have been doing for two months could actually be have been done in one day effortlessly 2025-12-11 02:12:21 +03:00
6f418827dc I just realized I wasted 100 days of my life, of the most important period of my life on a useless shit that served no purpose. I just deleted MargaretMemAllocator 2025-12-10 21:32:16 +03:00
202e11ab56 Saving progress. I just realized that my current MaragertMemAllocator is still very very bloated. I am about to change that. Yep, another rewrite 2025-12-08 23:31:05 +03:00
f80dc0ded0 Removed useless crap that I sepnt a lot of time on, but din't actually planeed to use. src/l_15/anne/marie/clipping.h says: Please, forget that I had ever lived 2025-12-08 19:59:54 +03:00
d6edf7566b Creating VkPipelines is not much easier 2025-12-08 19:57:46 +03:00
9be2b24c9d AAA, I hate this AAAAAAAA 2025-12-05 03:20:31 +03:00
cadde8714b Fixed r0.c problems. Finally, now r0.c runs on MargaretMemAllocator!!!!! And everything is wroking!!! 2025-12-02 20:09:29 +03:00
fac2fde22b Finished rewriting r0 to MargaretMemAllocator. Changed MMA interface during refactoring. It compiles. Finally. It took only 5 weeks to write compiling version. Unfortunately, it crashed before even starting. Today will be the long day 2025-12-02 04:52:06 +03:00
dc67475e7a Saving progress. I am getting insane just by looking at this crap. November is over. Engine is still not done. I can't take this anymore 2025-12-01 01:48:46 +03:00
438015b842 Saving progress. Rewrote r0_scene.h, r0_assets.h, vk_ctx struct according to new design. Но потом я отвлёкся от прогресса из-за обязанностей няньки. Anyway. I need to test somrthing really quick. Going back to master 2025-11-30 03:54:02 +03:00
53 changed files with 3596 additions and 5120 deletions

View File

@ -27,22 +27,24 @@ add_compile_definitions(_POSIX_C_SOURCE=200112L)
add_compile_definitions(_GNU_SOURCE)
add_compile_options(-fno-trapping-math)
#add_executable(codegen_l1 src/l1/anne/codegen.c)
#target_compile_definitions(codegen_l1
# PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
add_executable(codegen_l1 src/l1/anne/codegen.c)
target_compile_definitions(codegen_l1
PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
#add_executable(0_test src/l1_4/tests/t0.c)
#add_executable(1_test src/l1_4/tests/t1.c)
add_executable(3_test src/l1_4/tests/t3.c)
target_link_libraries(3_test -lm)
#
#add_executable(l1_4_t2 src/l1_4/tests/t2.c)
add_executable(codegen_l1_5 src/l1_5/anne/codegen.c)
#add_executable(0_render_test src/l2/tests/r0/r0.c gen/l_wl_protocols/xdg-shell-private.c)
#target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -lpng)
add_executable(0_render_test src/l2/tests/r0/r0.c gen/l_wl_protocols/xdg-shell-private.c)
target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -lpng -lfreetype)
#add_executable(0r_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
#target_link_libraries(0r_tex_init_prep -lm -lpng)
add_executable(0r_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
target_link_libraries(0r_tex_init_prep -lm -lpng)
#add_executable(1_render_test src/l2/tests/r1/r1.c gen/l_wl_protocols/xdg-shell-private.c)
#target_link_libraries(1_render_test -lwayland-client -lrt -lm -lxkbcommon)
@ -56,7 +58,7 @@ add_executable(codegen_l1_5 src/l1_5/anne/codegen.c)
#add_executable(l2t0_2 src/l2/tests/data_structures/t0_2.c) // todo: I will get back
add_executable(l2t0 src/l2/tests/data_structures/t0.c)
add_executable(l2t0_3 src/l2/tests/data_structures/t0_3.c)
add_executable(l2t2 src/l2/tests/data_structures/t2.c)
#add_executable(l2t2 src/l2/tests/data_structures/t2.c)
#add_executable(l2t0 src/l2/tests/data_structures/t0.c)
#add_executable(l2t1 src/l2/tests/data_structures/t1.c)

View File

@ -1,13 +1,20 @@
find_headers = $(shell find src/$(1) -type f -name '*.h')
find_headers = $(shell find src/$(1) -type f -name '*.h' )
find_assets = $(shell find src/$(1) -type f \( -name "*.vert" -o -name "*.frag" -o -name "*.geom" -o -name "*.comp" \) )
HEADERS_src_l1 := $(call find_headers,l1)
HEADERS_gen_l1 := $(HEADERS_src_l1) gen/l1/dorothy.txt
#HEADERS_gen_l1 := $(HEADERS_src_l1) gen/l1/dorothy.txt
HEADERS_gen_l1 := gen/l1/dorothy.txt
HEADERS_src_l1_5 = $(HEADERS_gen_l1) $(call find_headers,l1_5)
HEADERS_gen_l1_5 := $(HEADERS_src_l1_5) gen/l1_5/dorothy.txt
#HEADERS_gen_l1_5 := $(HEADERS_src_l1_5) gen/l1_5/dorothy.txt
HEADERS_gen_l1_5 := gen/l1_5/dorothy.txt
ASSETS_src_l_adele = $($call find_assets,l_adele)
ASSETS_gen_l_adele = gen/l_adele/dorothy.txt
HEADERS_src_l2 := $(HEADERS_gen_l1_5) $(call find_headers,l2)
HEADERS_gen_l2 := $(HEADERS_src_l2) gen/l2/dorothy.txt
#HEADERS_gen_l2 := $(HEADERS_src_l2) gen/l2/dorothy.txt
HEADERS_gen_l2 := gen/l2/dorothy.txt
cflags := -Wall -Wextra -Werror=implicit-function-declaration -Werror=return-type -Wno-unused-parameter \
--std=c99 -g -ggdb -O0 \
@ -17,6 +24,9 @@ cc := gcc
wl_protocols := $(shell pkg-config --variable=pkgdatadir wayland-protocols)
libpipewire_flags := $(shell pkg-config --cflags --libs libpipewire-0.3)
xdg_shell_private := gen/l_wl_protocols/xdg-shell-private.c
l_wl_protocols := gen/l_wl_protocols/xdg-shell-client.h $(xdg_shell_private)
out/l1/codegen: src/l1/anne/codegen.c $(HEADERS_src_l1)
mkdir -p out/l1
$(cc) $(cflags) -D PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8 -o $@ $<
@ -50,10 +60,23 @@ gen/l_wl_protocols/xdg-shell-private.c: $(wl_protocols)/stable/xdg-shell/xdg-she
mkdir -p gen/l_wl_protocols
wayland-scanner private-code $< $@
xdg_shell_private := gen/l_wl_protocols/xdg-shell-private.c
l_wl_protocols := gen/l_wl_protocols/xdg-shell-client.h $(xdg_shell_private)
.PHONY: gen/l_wl_protocols
gen/l_wl_protocols : $(l_wl_protocols)
compile_vert_shader = glslc -o gen/l_adele/$(1)/vert.spv src/l_adele/$(1)/$(1).vert
compile_frag_shader = glslc -o gen/l_adele/$(1)/frag.spv src/l_adele/$(1)/$(1).frag
define compile_shader
mkdir -p gen/l_adele/$(1)
$(call compile_vert_shader,$(1))
$(call compile_frag_shader,$(1))
endef
gen/l_adele/dorothy.txt: $(ASSETS_src_l_adele)
$(call compile_shader,lucy)
touch gen/l_adele/dorothy.txt
out/l2/t0: src/l2/tests/data_structures/t0.c $(HEADERS_gen_l1_5)
mkdir -p out/l2
$(cc) $(cflags) -o $@ $<
@ -105,15 +128,6 @@ out/l2/r3: src/l2/tests/r3/r3.c $(HEADERS_src_l2) $(l_wl_protocols)
run_r3: out/l2/r3
./out/l2/r3
out/SICK_JOKE_H.c: src/l2/tests/r_alg/H.c $(HEADERS_gen_l1_5)
python src/l1/sobiralka.py $< $@
out/SICK_JOKE_I.c: src/l2/tests/r_alg/I.c $(HEADERS_gen_l1_5)
python src/l1/sobiralka.py $< $@
out/SICK_JOKE_J.c: src/l2/tests/r_alg/J.c $(HEADERS_gen_l1_5)
python src/l1/sobiralka.py $< $@
.PHONY: clean
clean:

View File

@ -12,6 +12,7 @@
#include "liza.h"
#include "embassy_l1_5.h"
#include "margaret/png_pixel_masses.h"
#include "lucy.h"
int main() {
mkdir_nofail("l1");
@ -28,6 +29,7 @@ int main() {
mkdir_nofail("l1/margaret");
generate_margaret_eve_for_vulkan_utils(); /* margaret misc */
generate_margaret_png_pixel_masses_header();
generate_l1_lucy_headers();
finish_layer(cstr("l1"));
return 0;
}

View File

@ -337,6 +337,81 @@ NODISCARD VecU8 generate_square_xmatn_methods(SpanU8 xmat, SpanU8 xvec, SpanU8 m
return res;
}
NODISCARD VecU8 generate_xmat_inverse_methods(SpanU8 xmat, SpanU8 xvec, SpanU8 memb){
VecU8 res = VecU8_fmt("%s4 %s4_inverse(%s4 A) {\n", xmat, xmat, xmat);
VecU8_append_vec(&res, VecU8_fmt(SPACE "%s m2[6][6] = {\n", memb));
SpanU8 first_of_pair[6] = {cstr("x"), cstr("x"), cstr("x"), cstr("y"), cstr("y"), cstr("z")};
SpanU8 second_of_pair[6] = {cstr("y"), cstr("z"), cstr("w"), cstr("z"), cstr("w"), cstr("w")};
for (int w_col = 0; w_col < 6; w_col++) {
VecU8_append_span(&res, cstr(SPACE SPACE "{ "));
for (int w_row = 0; w_row < 6; w_row++) {
if (w_row)
VecU8_append_span(&res, cstr(", "));
/* first first = A second first = B
* first second = C second second = D
* A * D - B * C */
VecU8_append_vec(&res, VecU8_fmt("A.%s.%s * A.%s.%s - A.%s.%s * A.%s.%s",
first_of_pair[w_col], first_of_pair[w_row], second_of_pair[w_col], second_of_pair[w_row],
second_of_pair[w_col], first_of_pair[w_row], first_of_pair[w_col], second_of_pair[w_row]
));
}
VecU8_append_span(&res, cstr(" },\n"));
}
VecU8_append_span(&res, cstr(SPACE "};\n"));
U64 a0_contr[4] = {5, 5, 4, 3};
U64 a1_contr[4] = {4, 2, 2, 1};
U64 a2_contr[4] = {3, 1, 0, 0};
SpanU8 a0[4] = {cstr("y"), cstr("x"), cstr("x"), cstr("x")};
SpanU8 a1[4] = {cstr("z"), cstr("z"), cstr("y"), cstr("y")};
SpanU8 a2[4] = {cstr("w"), cstr("w"), cstr("w"), cstr("z")};
VecU8_append_vec(&res, VecU8_fmt(SPACE "%s m3[4][4] = {\n", memb));
for (int no_col = 0; no_col < 4; no_col++) {
SpanU8 walking_column = a0[no_col];
U64 minor_col_pair = a0_contr[no_col];
VecU8_append_span(&res, cstr(SPACE SPACE "{ "));
for (int no_row = 0; no_row < 4; no_row++) {
if (no_row)
VecU8_append_span(&res, cstr(", \n" SPACE SPACE));
VecU8_append_vec(&res, VecU8_fmt(
"A.%s.%s * m2[%u][%u] - A.%s.%s * m2[%u][%u] + A.%s.%s * m2[%u][%u]",
walking_column, a0[no_row], minor_col_pair, a0_contr[no_row],
walking_column, a1[no_row], minor_col_pair, a1_contr[no_row],
walking_column, a2[no_row], minor_col_pair, a2_contr[no_row]));
}
VecU8_append_span(&res, cstr(" },\n"));
}
VecU8_append_span(&res, cstr(SPACE "};\n"));
VecU8_append_vec(&res, VecU8_fmt(
SPACE "%s d = 1 / (A.x.x * m3[0][0] - A.x.y * m3[0][1] + A.x.z * m3[0][2] - A.x.w * m3[0][3]);\n"
SPACE "return (mat4){ "
, memb));
for (U64 i = 0; i < 4; i++) {
if (i)
VecU8_append_span(&res, cstr(",\n" SPACE SPACE ));
VecU8_append_vec(&res, VecU8_fmt(".%s={ ", vec_field_name((int)i)));
for (U64 j = 0; j < 4; j++) {
if (j)
VecU8_append_span(&res, cstr(", "));
VecU8_append_vec(&res, VecU8_fmt("%sm3[%u][%u] * d",
(i + j) % 2 ? cstr("-") : cstr(""), j, i));
}
VecU8_append_span(&res, cstr(" }"));
}
VecU8_append_span(&res, cstr(" };\n}\n\n"));
VecU8_append_vec(&res, VecU8_fmt(
"%s2 %s2_inverse(%s2 A) {\n" /* xmat, xmat, xmat */
SPACE "%s d = 1 / (A.x.x * A.y.y - A.y.x * A.x.y);\n" /* memb */
SPACE "return (%s2){ .x = { A.y.y * d, -A.x.y * d}, .y = {-A.y.x * d, A.x.x * d}};\n" /* xmat */
"}\n\n", xmat, xmat, xmat, memb, xmat));
// VecU8_append_vec(&res, VecU8_fmt( "%s3 %s3_inverse(%s3 A) {\n", xmat, xmat, xmat));
// VecU8_append_vec(&res, VecU8_fmt(SPACE "%s d = 1 / ("));
// VecU8_append_span(&res, cstr("}\n"));
return res;
}
NODISCARD VecU8 generate_xmatnm_method_mul_xmatkn(SpanU8 xmat, int n, int m, int k) {
VecU8 g_xmatkm = codegen_name_xmatnm(xmat, k, m);
VecU8 g_xmatnm = codegen_name_xmatnm(xmat, n, m);
@ -414,6 +489,7 @@ NODISCARD VecU8 generate_xmat234x234_structs_methods(SpanU8 xmat, SpanU8 xvec, S
}
}
}
VecU8_append_vec(&res, generate_xmat_inverse_methods(xmat, xvec, memb));
return res;
}
@ -428,7 +504,7 @@ void generate_geom_header() {
VecU8_append_vec(&res.result, generate_xvec234_structs_and_cool_methods(cstr("vec"), cstr("float"), cstr("sqrtf")));
VecU8_append_vec(&res.result, generate_xvec234_structs_and_cool_methods(cstr("dvec"), cstr("double"), cstr("sqrt")));
VecU8_append_vec(&res.result, generate_xmat234x234_structs_methods(cstr("mat"), cstr("vec"), cstr("float"), sizeof(float)));
VecU8_append_vec(&res.result, generate_xmat234x234_structs_methods(cstr("dmat"), cstr("dvec"), cstr("double"), sizeof(double)));
/* VecU8_append_vec(&res.result, generate_xmat234x234_structs_methods(cstr("dmat"), cstr("dvec"), cstr("double"), sizeof(double))); */
finish_header(res);
}

24
src/l1/anne/lucy.h Normal file
View File

@ -0,0 +1,24 @@
#ifndef prototype1_src_l1_anne_lucy_h
#define prototype1_src_l1_anne_lucy_h
#include "../codegen/util_template_inst.h"
#include "../codegen/list_template_inst.h"
void generate_l1_lucy_headers(){
SpanU8 l = cstr("l1"), ns = cstr("lucy");
mkdir_nofail("l1/eve/lucy");
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){
.T = cstr("LucyImage"), .t_primitive = true}, true);
generate_eve_span_company_for_primitive(l, ns, cstr("KVPU32ToLucyStoredGlyph"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("KVPU32ToLucyFaceFixedSize"), true, false);
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("LucyGlyphCachingRequest"), true, true);
/* Vector of iterators */
generate_eve_span_company_for_primitive(l, ns, cstr("RefListNodeLucyImage"), true, false);
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("LucyPositionedStagingGlyph"), .vec = true, .sort = true,
});
}
#endif

View File

@ -9,43 +9,23 @@ void generate_margaret_eve_for_vulkan_utils() {
SpanU8 ns = cstr("margaret");
mkdir_nofail("l1/eve/margaret");
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("MargaretScoredPhysicalDevice"), .t_primitive = true, .vec = true, .span = true,
.mut_span = true, .collab_vec_span = true, .span_sort = true
.T = cstr("MargaretScoredPhysicalDevice"), .t_primitive = true, .vec = true, .sort = true
});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretCommandForImageCopying"), true, true);
/* Под снос */
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBufferInMemoryInfo"), true, false);
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("PtrMargaretBufferInMemoryInfo"), .t_primitive = true, .vec = true, .span = true, .mut_span = true,
.collab_vec_span = true
});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretImageInMemoryInfo"), true, false);
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("PtrMargaretImageInMemoryInfo"), .t_primitive = true, .vec = true, .span = true, .mut_span = true,
.collab_vec_span = true
});
/* For l2/margaret/vulkan_memory_claire.h */
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretMemAllocatorOneBlock")}, false);
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorRequestFreeOccupant"), true, false);
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
.T = cstr("MargaretMemAllocatorRequestResizeBuffer"), .t_primitive = true,
.vec_extended = true /* We need unordered_pop to do some tomfoolery */});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorRequestAllocBuffer"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorRequestAllocImage"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretFreeMemSegment"), true, false);
/* For l2/margaret/{ vulkan_img_claire.h , vulkan_buffer_claire.h } */
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretIAFreeSegment"), true, false);
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("MargaretFreeMemSegment"), .t_primitive = true});
.T = cstr("MargaretIAFreeSegment"), .t_primitive = true});
// todo: add to BufRBTree instantiator option to create necessary shit by itself
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBAFreeSegment"), true, false);
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("MargaretBAFreeSegment"), .t_primitive = true});
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretFreeMemSegment")});
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment")});
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMABufferExpansionRecord"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMANewMovedBufRecord"), true, false);
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretImgAllocatorOneBlock"), true, false);
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretBufAllocatorOneBlock")}, true);
}

View File

@ -100,7 +100,7 @@ NODISCARD VecU8 generate_texture_data_struct_and_necc_methods(SpanU8 tex, SpanU8
"}\n\n", resoftex, tex, resoftex, resoftex, memb, resoftex, tex, tex, resoftex));
/* Method _read_from_file */
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_read_from_file(const char* path) {\n"
"%s %s_read_from_file(SpanU8 path) {\n"
SPACE "VecU8 data = read_whole_file_or_abort(path);\n"
SPACE "%s res = %s_from_bitmap_text(VecU8_to_span(&data));\n"
SPACE "if (res.variant != Result_Ok) {\n"

View File

@ -8,22 +8,19 @@ void generate_headers_for_r0_r1_r2_r3() {
mkdir_nofail("l1/eve/r0");
{ /* Needed in r0_assets.h */
SpanU8 ns = cstr("r0");
generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshVertex"), true, true);
generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshVertexInc"), true, true);
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("GenericMeshInSceneTemplate"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshInstance"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshVertex"), true, true);
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshInstance"), true, false);
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("ShinyMeshInSceneTemplate"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("Pipeline0Spotlight"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("Pipeline0PointLight"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshVertexInc"), true, true);
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("ShinyMeshTopology"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("Wimbzle"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("Nibzle"), true, false);
/* r0_scene.h */
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("UsedGenericModelOnScene"), true, false);
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("UsedShinyModelOnScene"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelOnSceneMem"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyModelOnSceneMem"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("ObjectInfo"), true, false);
/* r0 */
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelTopAndTexInMemoryInfo"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyModelTopInMemoryInfo"), true, false);
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelTexVulkPointers"), true, false);
}
mkdir_nofail("l1/eve/r2");
{ /* r2 */

View File

@ -48,13 +48,25 @@ void generate_util_temp_very_base_headers() {
VecU8_drop(SpanT);
VecU8_drop(dependency);
}
generate_guarded_span_company_for_primitive(cstr("l1"), cstr(""),
cstr("CSTR"), cstr(""), true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("CSTR"), cstr(""), true, false);
generate_ResultType_templ_inst_guarded_header(cstr("l1"), cstr(""),
generate_ResultType_templ_inst_guarded_header(l, ns,
cstr(""), cstr("VecU8"), cstr("#include \"VecAndSpan_U8.h\""), true, false);
generate_ResultType_templ_inst_guarded_header(cstr("l1"), cstr(""),
generate_ResultType_templ_inst_guarded_header(l, ns,
cstr(""), cstr("SpanU8"), cstr("#include \"VecAndSpan_U8.h\""), true, true);
generate_guarded_span_company_for_primitive(l, ns, cstr("U32Segment"),
cstr("#include \"../../src/l1/core/uint_segments.h\""), true, true);
/* Not very basic but definitely very common */
generate_guarded_span_company_for_non_primitive_clonable(l, ns, cstr("TextureDataR8G8B8A8"),
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
generate_guarded_span_company_for_non_primitive_clonable(l, ns, cstr("TextureDataR8G8B8"),
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
generate_guarded_span_company_for_non_primitive_clonable(l, ns, cstr("TextureDataR8"),
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("KVPU64ToU64"), cstr(""), true, false);
}
#endif

View File

@ -39,9 +39,9 @@ void generate_util_templ_inst_for_vulkan_headers() {
generate_guarded_span_company_for_primitive(l, ns, cstr("VkImageView"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkFramebuffer"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkSemaphore"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkDescriptorPoolSize"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkBufferCopy"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkImageMemoryBarrier"), vulkan_dep, true, false);
generate_guarded_span_company_for_primitive(l, ns, cstr("VkDescriptorImageInfo"), vulkan_dep, true, false);
}
#endif

View File

@ -44,7 +44,7 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
op.T, op.T, op.T, op.T, op.T,
op.t_primitive ? vcstr("") : VecU8_fmt(SPACE SPACE "%s_drop(cur->el);\n", op.T)));
VecU8_append_vec(&res, VecU8_fmt(
"void List%s_insert(List%s* self, %s el) {\n" /* op.T, op.T, op.T */
"ListNode%s* List%s_insert(List%s* self, %s el) {\n" /* op.T, op.T, op.T, op.T */
SPACE "ListNode%s* new_node = safe_malloc(sizeof(ListNode%s));\n" /* op.T, op.T */
SPACE "new_node->prev = NULL;\n"
SPACE "new_node->next = self->first;\n"
@ -52,7 +52,8 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
SPACE "if (self->first)\n"
SPACE SPACE "self->first->prev = new_node;\n"
SPACE "self->first = new_node;\n"
"}\n\n", op.T, op.T, op.T, op.T, op.T));
SPACE "return new_node;\n"
"}\n\n", op.T, op.T, op.T, op.T, op.T, op.T));
VecU8_append_vec(&res, VecU8_fmt(
"void List%s_insert_node(List%s* self, ListNode%s* new_node) {\n" /* op.T, op.T, op.T */
SPACE "new_node->prev = NULL;\n"
@ -91,7 +92,8 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
}
void generate_List_templ_inst_eve_header(SpanU8 layer, SpanU8 bonus_ns, list_instantiation_op op, bool gen_node_declaration) {
void generate_List_templ_inst_eve_header(
SpanU8 layer, SpanU8 bonus_ns, list_instantiation_op op, bool gen_node_declaration) {
generate_SOME_templ_inst_eve_header(layer, bonus_ns,
generate_List_template_instantiation(op, gen_node_declaration), VecU8_fmt("List%s", op.T));
}

View File

@ -260,9 +260,9 @@ void codegen_append_some_span_span_method(VecU8* res, SpanU8 SpanT) {
/* T must be sized. Option `add_sort` requires option `add_mutable` and method T_less
* add_mutable option generates MutSpanT.
* add_equal option generates equal method. add_extended option generated extended methods
* add_sort option generates T_qcompare and MutSpanT_sort methods */
*/
NODISCARD VecU8 generate_SpanT_struct_and_methods(
SpanU8 T, bool integer, bool add_mutable, bool add_equal, bool add_extended, bool add_sort
SpanU8 T, bool integer, bool add_mutable, bool add_equal, bool add_extended
) {
VecU8 g_SpanT = VecU8_fmt("Span%s", T);
VecU8 g_MutSpanT = VecU8_fmt("MutSpan%s", T);
@ -293,31 +293,38 @@ NODISCARD VecU8 generate_SpanT_struct_and_methods(
codegen_append_some_span_span_method(&res, MutSpanT);
}
if (add_sort) {
assert(add_mutable);
VecU8_append_vec(&res, VecU8_fmt(
"int %s_qcompare(const void* a, const void* b) {\n"
SPACE "const %s* A = a;\n"
SPACE "const %s* B = b;\n", T, T, T));
if (integer) {
VecU8_append_span(&res, cstr(SPACE "return (int)(B < A) - (int)(A < B);\n"));
} else {
VecU8_append_vec(&res, VecU8_fmt(
SPACE "return (int)%s_less_%s(B, A) - (int)%s_less_%s(A, B);\n", T, T, T, T));
}
VecU8_append_vec(&res, VecU8_fmt(
"}\n\n"
"void %s_sort(%s self) {\n"
SPACE "qsort(self.data, self.len, sizeof(%s), %s_qcompare);\n"
"}\n\n", MutSpanT, MutSpanT, T, T));
}
VecU8_drop(g_MutSpanT);
VecU8_drop(g_SpanT);
return res;
}
// void codegen_append_vec_some_span_method(VecU8* res, SpanU8 mod, SpanU8 )
NODISCARD VecU8 generate_span_company_sort_methods(SpanU8 T, bool t_integer, bool mut_span, bool vec){
VecU8 res = VecU8_new();
VecU8_append_vec(&res, VecU8_fmt(
"int %s_qcompare(const void* a, const void* b) {\n" /* T */
SPACE "const %s* A = a;\n" /* T */
SPACE "const %s* B = b;\n" /* T */
SPACE "return %v;\n" /* we return stuff */
"}\n\n",
T, T, T, t_integer ? vcstr("(int)(*B < *A) - (int)(*A < *B)") :
VecU8_fmt("(int)%s_less_%s(B, A) - (int)%s_less_%s(A, B)", T, T, T, T)));
if (mut_span) {
VecU8_append_vec(&res, VecU8_fmt(
"void MutSpan%s_sort(MutSpan%s self) {\n"
SPACE "qsort(self.data, self.len, sizeof(%s), %s_qcompare);\n"
"}\n\n", T, T, T, T));
}
if (vec) {
VecU8_append_vec(&res, VecU8_fmt(
"void Vec%s_sort(Vec%s* self) {\n"
SPACE "qsort(self->buf, self->len, sizeof(%s), %s_qcompare);\n"
"}\n\n", T, T, T, T));
}
return res;
}
/* T must be trivially movable. If !primitive, requires methods T_drop (implicitly) and, if clonable, requires T_clone */
NODISCARD VecU8 generate_SpanT_VecT_trivmove_collab(SpanU8 T, bool primitive, bool clonable, bool add_mutable, bool add_extended) {
@ -416,7 +423,7 @@ typedef struct {
bool span;
bool mut_span;
bool span_extended;
bool span_sort;
bool sort;
bool collab_vec_span;
bool collab_vec_span_extended;
} util_templates_instantiation_options;
@ -433,8 +440,6 @@ void util_templates_instantiation_options_fix(util_templates_instantiation_optio
op->vec = true;
if (op->span_extended)
op->span = true;
if (op->span_sort)
op->span = true;
if (op->mut_span)
op->span = true;
if (op->collab_vec_span_extended)
@ -467,7 +472,10 @@ NODISCARD VecU8 generate_util_templates_instantiation(util_templates_instantiati
VecU8_append_vec(&res, generate_VecT_new_of_size_method(op.T));
}
if (op.span) {
VecU8_append_vec(&res, generate_SpanT_struct_and_methods(op.T, op.t_integer, op.mut_span, false, op.span_extended, op.span_sort));
VecU8_append_vec(&res, generate_SpanT_struct_and_methods(op.T, op.t_integer, op.mut_span, false, op.span_extended));
}
if (op.sort) {
VecU8_append_vec(&res, generate_span_company_sort_methods(op.T, op.t_integer, op.mut_span, op.vec));
}
if (op.collab_vec_span) {
assert(op.vec && op.span);

View File

@ -135,7 +135,12 @@ void U64_stringification_into_buf(U64 x, VecU8* targ){
}
}
// todo: add %d (when I figure out how to do it)
/* %s - SpanU8
* %v - VecU8
* %u - U64
* %c - int (one byte character)
* %i - S64
*/
NODISCARD VecU8 VecU8_fmt(const char* fmt, ...) {
assert(fmt);
size_t k = 0;
@ -214,5 +219,37 @@ bool strings_in_spans_equal(SpanU8 a, SpanU8 b) {
return true;
}
/* 0 means error */
U32 SpanU8_decode_as_utf8(SpanU8* rem){
assert(rem->len > 0);
U8 first = rem->data[0];
rem->data++;
rem->len--;
if (!(first & 0b10000000))
return first;
uint8_t a = 0b11000000;
uint8_t b = 0b00100000;
for (int sz = 1; sz <= 3; sz++){
if ((first & (a | b)) == a) {
/* sz is the character size in bytes */
if (rem->len < (size_t)sz)
return 0;
U32 res = first & (b - 1);
for (int i = 1; i < sz; i++) {
U8 th = rem->data[0];
if ((th & 0b11000000) != 0b10000000)
return 0;
res <<= 6;
res |= (th & 0b00111111);
rem->data++;
}
rem->len -= sz;
return res;
}
a |= b;
b >>= 1;
}
return 0;
}
#endif

View File

@ -8,6 +8,11 @@ typedef struct {
U64 len;
} U64Segment;
typedef struct{
U32 start;
U32 len;
} U32Segment;
U64 U64Segment_get_length_resp_alignment(U64Segment self, U8 alignment_exp) {
if (self.start & ((1ull << alignment_exp) - 1)) {
U64 pad_left = (1ull << alignment_exp) - (self.start & ((1ull << alignment_exp) - 1));

View File

@ -87,6 +87,11 @@ typedef struct {
U32 height;
} SizeOfRectangleU32;
typedef struct{
U64 key;
U64 value;
} KVPU64ToU64;
#define check(expr) if (!(expr)) { abortf("Assertion failed at %s : %d : " #expr "\n", __FILE__, __LINE__); }
#endif

View File

@ -28,11 +28,12 @@ typedef struct {
void Result_ok_or_int_drop(Result_ok_or_int obj) {}
NODISCARD VecU8 read_whole_file_or_abort(const char* filename) {
FILE* fp = fopen(filename, "rb");
if (!fp) {
abortf("Can't open file %s: %s\n", filename, strerror(errno));
}
NODISCARD VecU8 read_whole_file_or_abort(SpanU8 path) {
VecU8 filename = VecU8_fmt("%s%c", path, 0);
FILE* fp = fopen((const char*)filename.buf, "rb");
if (!fp)
abortf("Can't open file %s: %s\n", (const char*)filename.buf, strerror(errno));
VecU8_drop(filename);
if (fseek(fp, 0, SEEK_END) != 0) {
abortf("fseek: %s\n", strerror(errno));
}
@ -52,6 +53,12 @@ NODISCARD VecU8 read_whole_file_or_abort(const char* filename) {
return result;
}
NODISCARD VecU8 read_file_by_path(VecU8 path){
VecU8 content = read_whole_file_or_abort(VecU8_to_span(&path));
VecU8_drop(path);
return content;
}
void write_whole_file_or_abort(const char* filename, SpanU8 content) {
FILE* fd = fopen(filename, "wb");
if (!fd) {

75
src/l1_4/tests/t3.c Normal file
View File

@ -0,0 +1,75 @@
#include "../../../gen/l1/geom.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
float random_float(float a, float b){
int r = rand();
return a + (b - a) * ((float)r / (float)RAND_MAX);
}
float random_float100(){
return random_float(-100, 100);
}
mat4 random_big_matrix(){
return mat4_new(random_float100(), random_float100(), random_float100(), random_float100(),
random_float100(), random_float100(), random_float100(), random_float100(),
random_float100(), random_float100(), random_float100(), random_float100(),
random_float100(), random_float100(), random_float100(), random_float100());
}
mat2 random_smol_matrix(){
return mat2_new(random_float100(), random_float100(), random_float100(), random_float100());
}
#define flPr "%02.05f"
void test_mat4(mat4 A){
mat4 iA = mat4_inverse(A);
mat4 product = mat4_mul_mat4(iA, A);
printf(flPr " " flPr " " flPr " " flPr "\n"
flPr " " flPr " " flPr " " flPr "\n"
flPr " " flPr " " flPr " " flPr "\n"
flPr " " flPr " " flPr " " flPr "\n",
product.x.x, product.y.x, product.z.x, product.w.x,
product.x.y, product.y.y, product.z.y, product.w.y,
product.x.z, product.y.z, product.z.z, product.w.z,
product.x.w, product.y.w, product.z.w, product.w.w);
}
void test_mat2(mat2 A){
mat2 iA = mat2_inverse(A);
mat2 product = mat2_mul_mat2(iA, A);
printf(flPr " " flPr "\n"
flPr " " flPr "\n",
product.x.x, product.y.x,
product.x.y, product.y.y);
}
void test(){
mat2x3 A = (mat2x3){.x = {1, 2, 3}, .y = {4, 5, 6}};
mat3x2 At = mat2x3_transpose(A);
assert(At.x.x == 1);
assert(At.x.y == 4);
assert(At.y.x == 2);
assert(At.y.y == 5);
assert(At.z.x == 3);
assert(At.z.y == 6);
}
int main() {
test();
test_mat4(random_big_matrix());
test_mat4(random_big_matrix());
test_mat4(random_big_matrix());
test_mat4(random_big_matrix());
test_mat4(random_big_matrix());
test_mat4(random_big_matrix());
test_mat4(random_big_matrix());
test_mat2(random_smol_matrix());
test_mat2(random_smol_matrix());
test_mat2(random_smol_matrix());
test_mat2(random_smol_matrix());
}

View File

@ -1,17 +1,16 @@
#include "../../l1/system/fsmanip.h"
#include "marie/clipping.h"
#include "liza.h"
#include "l1_5_templ_very_base.h"
#include "margaret.h"
#include "lucy.h"
int main() {
mkdir_nofail("l1_5");
mkdir_nofail("l1_5/marie");
generate_marie_clipping_header();
generate_l1_5_liza_headers();
generate_l1_5_template_instantiation_for_base_types();
generate_l1_5_template_instantiations_for_margaret();
generate_l1_5_lucy_headers();
finish_layer(cstr("l1_5"));
return 0;
}

View File

@ -11,11 +11,15 @@ void generate_l1_5_template_instantiation_for_base_types(){
generate_buf_rbtree_Set_templ_inst_guarded_header(l, ns, cstr("#include \"../l1/VecAndSpan_S64.h\""),
(set_instantiation_op){.T = cstr("S64"), .t_integer = true});
// l1/core/int_primitives is included in l1_5/core/rb_tree_node.h, hence no additional dependencies needed
/* l1/core/utils.h is included in l1_5/core/rb_tree_node.h, hence no additional dependencies needed */
generate_rbtree_Set_templ_inst_guarded_header(l, ns, cstr(""), (set_instantiation_op){
.T = cstr("U64"), .t_integer = true }, true);
generate_rbtree_Set_templ_inst_guarded_header(l, ns, cstr(""), (set_instantiation_op){
.T = cstr("S64"), .t_integer = true }, true);
// todo: move vector declaration HERE
generate_buf_rbtree_Map_templ_inst_guarded_header(l, ns, cstr("#include \"../../gen/l1/VecKVPU64ToU64.h\"\n"),
(map_instantiation_op){.K = cstr("U64"), .k_integer = true, .V = cstr("U64"), .v_integer = true,});
}
#endif

16
src/l1_5/anne/lucy.h Normal file
View File

@ -0,0 +1,16 @@
#ifndef prototype1_src_l1_5_anne_lucy_h
#define prototype1_src_l1_5_anne_lucy_h
#include "../codegen/buff_rbtree_set_map_template_inst.h"
void generate_l1_5_lucy_headers(){
SpanU8 l = cstr("l1_5"), ns = cstr("lucy");
mkdir_nofail("l1_5/eve/lucy");
generate_buf_rbtree_Map_templ_inst_eve_header(l, ns, (map_instantiation_op){
.K = cstr("U32"), .k_integer = true, .V = cstr("LucyStoredGlyph"), .v_primitive = true});
generate_rbtree_Map_templ_inst_eve_header(l, ns, (map_instantiation_op){
.K = cstr("U32"), .k_integer = true, .V = cstr("LucyFaceFixedSize")}, true);
}
#endif

View File

@ -9,17 +9,20 @@ void generate_l1_5_template_instantiations_for_margaret(){
mkdir_nofail("l1_5/eve");
mkdir_nofail("l1_5/eve/margaret");
/* For MargaretMemAllocator */
/* For l2/margaret/{ vulkan_img_claire.h , vulkan_buffer_claire.h } */
generate_buf_rbtree_Set_templ_inst_eve_header(l, ns, (set_instantiation_op){
.T = cstr("MargaretFreeMemSegment"), .t_primitive = true,
.T = cstr("MargaretIAFreeSegment"), .t_primitive = true,
/* comparison takes additional U8 parameter */
.alternative_less = cstr("MargaretFreeMemSegment_less_resp_align"),
.alternative_less = cstr("MargaretIAFreeSegment_less_resp_align"),
.alternative_comp_set_name_embed = cstr("LenRespAlign"),
.guest_data_T = cstr("U8"),
});
generate_rbtree_Map_templ_inst_eve_header(l, ns, (map_instantiation_op){
.K = cstr("U64"), .k_integer = true, .V = cstr("MargaretMAOccupation"), .v_primitive = true,
}, true /* We want RBTreeNode_KVPU64ToMargaretMemoryOccupation to be generated here for us */ );
generate_buf_rbtree_Set_templ_inst_eve_header(l, ns, (set_instantiation_op){
.T = cstr("MargaretBAFreeSegment"), .t_primitive = true,
/* comparison takes additional U8 parameter */
.alternative_less = cstr("MargaretBAFreeSegment_less_len"),
.alternative_comp_set_name_embed = cstr("Len"),
});
}
#endif

View File

@ -1,419 +0,0 @@
#ifndef PROTOTYPE1_SRC_L1_CODEGEN_CLIPPING_H
#define PROTOTYPE1_SRC_L1_CODEGEN_CLIPPING_H
#include "../../../l1/codegen/codegen.h"
#include "../../../../gen/l1/VecAndSpan_SpanU8.h"
// todo: move all of this to marie namespace
// todo: instead of returning triangles, return points of convex polygon
// todo: I would say that I need to rewrite all with VecU8_fmt, but I am not sure I even need this code
// todo: rewrite if I decide not to delete
typedef struct {
int order;
bool negate;
} PossiblyNegatedTriangle;
int comparison_triang_groups[18][3] = {
{10, 11, 20}, {10, 11, 21}, {10, 11, 22},
{11, 12, 20}, {11, 12, 21}, {11, 12, 22},
{12, 10, 20}, {12, 10, 21}, {12, 10, 22},
{20, 21, 10}, {20, 21, 11}, {20, 21, 12},
{21, 22, 10}, {21, 22, 11}, {21, 22, 12},
{22, 20, 10}, {22, 20, 11}, {22, 20, 12},
};
int permutations_of_sigma3(const int ns[static 3]) {
return (ns[0] > ns[1]) + (ns[1] > ns[2]) + (ns[0] > ns[2]);
}
PossiblyNegatedTriangle get_order_var_of_triangle_merged_ns(int arg[static 3]) {
for (int ord = 0; ord < 18; ord++) {
for (int x = 0; x < 3; x++) {
for (int y = 0; y < 3; y++) {
if (comparison_triang_groups[ord][y] == arg[x])
goto found_this_one;
}
goto nah_not_this_one;
found_this_one:
}
return (PossiblyNegatedTriangle){ .order = ord,
.negate = ((permutations_of_sigma3(comparison_triang_groups[ord]) +
permutations_of_sigma3(arg)) % 2 == 1) };
nah_not_this_one: /* We continue out search*/
}
abortf("Impossible");
}
PossiblyNegatedTriangle get_order_var_of_triangle(char tri, int idi, char trj, int idj, char tru, int idu) {
assert(tri == 'C' || tri == 'T');
assert(trj == 'C' || trj == 'T');
assert(tru == 'C' || tru == 'T');
assert(0 <= idi && idi < 3);
assert(0 <= idj && idj < 3);
assert(0 <= idu && idu < 3);
assert(!(tri == trj && trj == tru));
assert(!(tri == trj && idi == idj));
assert(!(trj == tru && idj == idu));
assert(!(tri == tru && idi == idu));
int arg[3] = { (tri == 'C' ? 10 : 20) + idi, (trj == 'C' ? 10 : 20) + idj, (tru == 'C' ? 10 : 20) + idu };
return get_order_var_of_triangle_merged_ns(arg);
}
/* Appends code to string with code.
* Triangle is either 'T' or 'C'
* Vertexes in triangle are numbered 0,1,2
* vertex vi: (index idi of triangle tri)
* vertex vj: (index idj of triangle trj)
* vertex u: (index idu of triangle tru)
* We walk along the vi -> vj ray. If u is on the left, this statement will show true
*/
void append_on_the_left_stmt(VecU8* str, char tri, int idi, char trj, int idj, char tru, int idu) {
PossiblyNegatedTriangle measure = get_order_var_of_triangle(tri, idi, trj, idj, tru, idu);
VecU8_append_vec(str, VecU8_format("(M%d %s 0)", measure.order, measure.negate ? "<=" : ">="));
}
void append_on_the_right_stmt(VecU8* str, char tri, int idi, char trj, int idj, char tru, int idu) {
PossiblyNegatedTriangle measure = get_order_var_of_triangle(tri, idi, trj, idj, tru, idu);
VecU8_append_vec(str, VecU8_format("(M%d %s 0)", measure.order, measure.negate ? ">=" : "<="));
}
/* Generates statement that intersects two segments from 2 different triangles:
* First segment: (tr1::A1) to (tr1::B1)
* Second segment: (tr2::A2) to (tr2::B2)
* */
void append_intersection_eol_stmt(VecU8* str, char tr1, int A1, int B1, char tr2, int A2, int B2) {
assert((tr1 == 'C' && tr2 == 'T') || (tr1 == 'T' && tr2 == 'C'));
assert(0 <= A1 && A1 < 3);
assert(0 <= B1 && B1 < 3);
assert(0 <= A2 && A2 < 3);
assert(0 <= B2 && B2 < 3);
assert(A1 != B1 && A2 != B2);
VecU8_append_vec(str, VecU8_format("marie_intersect_lines(%c.v%d, %c.v%d, %c.v%d, %c.v%d);\n",
tr1, A1, tr1, B1, tr2, A2, tr2, B2));
}
SpanU8 marie_names_of_two_clipping_triangles[6] = {
cstr("C.v0"), cstr("C.v1"), cstr("C.v2"),
cstr("T.v0"), cstr("T.v1"), cstr("T.v2"),
};
NODISCARD SpanU8 get_firstborn_vertex_stmt(char tr, int id) {
assert(0 <= id && id < 3);
if (tr == 'C')
return marie_names_of_two_clipping_triangles[id];
if (tr == 'T')
return marie_names_of_two_clipping_triangles[3 + id];
abortf("Wrong triangle");
}
void append_triangle_registration_stmt(VecU8* str, SpanU8 P0, SpanU8 P1, SpanU8 P2) {
VecU8_append_span(str, cstr("VecMarieTriangle_append(pile, (MarieTriangle){"));
VecU8_append_span(str, P0);
VecU8_append_span(str, cstr(", "));
VecU8_append_span(str, P1);
VecU8_append_span(str, cstr(", "));
VecU8_append_span(str, P2);
VecU8_append_span(str, cstr("});\n"));
}
void append_answering_stmt(VecU8* res, SpanSpanU8 vertices, int tabulation_lvl) {
size_t n = vertices.len;
assert(n >= 3);
for (size_t i = 0; i < n - 2; i++) {
for (int sp = 0; sp < tabulation_lvl; sp++)
VecU8_append(res, ' ');
append_triangle_registration_stmt(res, *SpanSpanU8_at(vertices, i),
*SpanSpanU8_at(vertices, i + 1), *SpanSpanU8_at(vertices, n - 1));
}
for (int sp = 0; sp < tabulation_lvl; sp++)
VecU8_append(res, ' ');
VecU8_append_span(res, cstr("return;\n"));
}
int mod3_inc(int x) {
return x == 2 ? 0 : (x + 1);
}
int mod3_dec(int x) {
return x ? (x - 1) : 2;
}
void generate_func_clip_triang_on_triang_case_where_some_vertex_stuck(VecU8* res, char tC, char tT, bool tables_turned) {
/* Case where all 3 vertices of tT are inside tC */
VecU8_append_span(res, cstr(SPACE "if ("));
for (int cs = 0; cs < 3; cs++) {
for (int tv = 0; tv < 3; tv++) {
if (cs != 0 || tv != 0)
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tC, cs, tC, (cs + 1) % 3, tT, tv);
}
}
VecU8_append_span(res, cstr(") {\n" SPACE8));
append_triangle_registration_stmt(res,
get_firstborn_vertex_stmt(tT, 0), get_firstborn_vertex_stmt(tT, 1), get_firstborn_vertex_stmt(tT, 2));
VecU8_append_span(res, cstr(SPACE8 "return;\n" SPACE "}\n\n"));
/* Cases where two vertices of tT are inside tC, but one is outside */
for (int ti = 0; ti < 3; ti++) {
VecU8_append_span(res, cstr(SPACE "if ("));
int TA = mod3_inc(ti);
int TB = mod3_inc(TA);
for (int j = 1; j <= 2; j++) {
for (int cs = 0; cs < 3; cs++) {
if (cs != 0 || j != 1)
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tC, cs, tC, mod3_inc(cs), tT, (ti + j) % 3);
}
}
VecU8_append_span(res, cstr(") {\n"));
for (int sc = 0; sc < 3; sc++) {
VecU8_append_span(res, cstr(SPACE8 "if ("));
append_on_the_right_stmt(res, tC, sc, tC, mod3_inc(sc), tT, ti);
VecU8_append_span(res, cstr(") {\n"));
{
/* 'Result hits one edge' case */
VecU8_append_span(res, cstr(SPACE12 "if ("));
append_on_the_left_stmt(res, tT, TA, tC, sc, tT, ti);
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tT, TB, tC, sc, tT, ti);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, tT, TA, tC, mod3_inc(sc), tT, ti);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, tT, TB, tC, mod3_inc(sc), tT, ti);
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE16 "vec2 PB = "));
append_intersection_eol_stmt(res, tC, sc, mod3_inc(sc), tT, ti, TB);
VecU8_append_span(res, cstr(SPACE16 "vec2 PA = "));
append_intersection_eol_stmt(res, tC, sc, mod3_inc(sc), tT, ti, TA);
SpanU8 quad[4] = {
get_firstborn_vertex_stmt(tT, TB), cstr("PB"), cstr("PA"), get_firstborn_vertex_stmt(tT, TA) };
append_answering_stmt(res, (SpanSpanU8){.data = quad, .len = ARRAY_SIZE(quad)}, 16);
}
VecU8_append_span(res, cstr(SPACE12 "}\n"));
if (!tables_turned) {
/* 'Result hits the angle and two edges' case */
VecU8_append_span(res, cstr(SPACE12 "if ("));
append_on_the_left_stmt(res, tT, TA, tC, sc, tT, ti);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, tT, TB, tC, sc, tT, ti);
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE16 "vec2 PB = "));
append_intersection_eol_stmt(res, tC, sc, mod3_dec(sc), tT, ti, TB);
VecU8_append_span(res, cstr(SPACE16 "vec2 PA = "));
append_intersection_eol_stmt(res, tC, sc, mod3_inc(sc), tT, ti, TA);
SpanU8 pentagon[5] = { get_firstborn_vertex_stmt(tT, TB), cstr("PB"),
get_firstborn_vertex_stmt(tC, sc), cstr("PA"), get_firstborn_vertex_stmt(tT, TA)};
append_answering_stmt(res, (SpanSpanU8){.data = pentagon, .len = ARRAY_SIZE(pentagon)}, 16);
}
VecU8_append_span(res, cstr(SPACE12 "}\n"));
}
}
VecU8_append_span(res, cstr(SPACE8 "}\n"));
}
VecU8_append_span(res, cstr(SPACE "}\n\n"));
}
/* Case where one vertice of tT is inside tC, but other two are outside tC */
for (int pl = 0; pl < 3; pl++) {
int TA = mod3_inc(pl);
int TB = mod3_inc(TA);
VecU8_append_span(res, cstr(SPACE "if ("));
for (int cb = 0; cb < 3; cb++) {
if (cb)
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tC, cb, tC, mod3_inc(cb), tT, pl);
}
VecU8_append_span(res, cstr(") {\n"));
for (int cr = 0; cr < 3; cr++) {
/* Cases where one vertex (pl) of tT is inside tC, but two other (TA and TB) are in
* the same 'third of a surface' */
VecU8_append_span(res, cstr(SPACE8 "if ("));
append_on_the_left_stmt(res, tT, pl, tC, cr, tT, TA);
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tT, pl, tC, cr, tT, TB);
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tT, pl, tC, mod3_inc(cr), tT, TA);
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tT, pl, tC, mod3_inc(cr), tT, TB);
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE12 "vec2 PA = "));
append_intersection_eol_stmt(res, tT, pl, TA, tC, cr, mod3_inc(cr));
VecU8_append_span(res, cstr(SPACE12 "vec2 PB = "));
append_intersection_eol_stmt(res, tT, pl, TB, tC, cr, mod3_inc(cr));
SpanU8 trig[3] = {get_firstborn_vertex_stmt(tT, pl), cstr("PA"), cstr("PB")};
append_answering_stmt(res, (SpanSpanU8){.data = trig, .len = ARRAY_SIZE(trig)}, 12);
}
VecU8_append_span(res, cstr(SPACE8 "}\n"));
}
for (int rc = 0; rc < 3; rc++) {
VecU8_append_span(res, cstr(SPACE8 "if ("));
append_on_the_left_stmt(res, tT, pl, tC, rc, tT, TA);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, tT, pl, tC, mod3_inc(rc), tT, TA);
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, tT, pl, tC, mod3_inc(rc), tT, TB);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, tT, pl, tC, mod3_dec(rc), tT, TB);
VecU8_append_span(res, cstr(") {\n"));
{
/* Case where TA and TB are in different 'thirds of surface' and the vertex of tC that defines
* border is outside tT. Result is a pentagon */
VecU8_append_span(res, cstr(SPACE12 "if ("));
append_on_the_right_stmt(res, tT, TA, tT, TB, tC, mod3_inc(rc));
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE16 "vec2 PA = "));
append_intersection_eol_stmt(res, tT, pl, TA, tC, rc, mod3_inc(rc));
VecU8_append_span(res, cstr(SPACE16 "vec2 QA = "));
append_intersection_eol_stmt(res, tT, TA, TB, tC, rc, mod3_inc(rc));
VecU8_append_span(res, cstr(SPACE16 "vec2 QB = "));
append_intersection_eol_stmt(res, tT, TA, TB, tC, mod3_inc(rc), mod3_dec(rc));
VecU8_append_span(res, cstr(SPACE16 "vec2 PB = "));
append_intersection_eol_stmt(res, tT, pl, TB, tC, mod3_inc(rc), mod3_dec(rc));
SpanU8 pent[5] = {get_firstborn_vertex_stmt(tT, pl), cstr("PA"), cstr("QA"), cstr("QB"), cstr("PB")};
append_answering_stmt(res, (SpanSpanU8){.data = pent, .len = ARRAY_SIZE(pent)}, 16);
}
VecU8_append_span(res, cstr(SPACE12 "}"));
if (!tables_turned) {
/* Case where TA and TB are in different sectors and rc++ is inside tT
* Result is a quadrangle */
VecU8_append_span(res, cstr(" else {\n"));
VecU8_append_span(res, cstr(SPACE16 "vec2 PA = "));
append_intersection_eol_stmt(res, tT, pl, TA, tC, rc, mod3_inc(rc));
VecU8_append_span(res, cstr(SPACE16 "vec2 PB = "));
append_intersection_eol_stmt(res, tT, pl, TB, tC, mod3_inc(rc), mod3_dec(rc));
SpanU8 quad[4] = {get_firstborn_vertex_stmt(tT, pl), cstr("PA"),
get_firstborn_vertex_stmt(tC, mod3_inc(rc)), cstr("PB")};
append_answering_stmt(res, (SpanSpanU8){.data = quad, .len = ARRAY_SIZE(quad)}, 16);
VecU8_append_span(res, cstr(SPACE12 "}"));
}
VecU8_append_span(res, cstr("\n"));
}
VecU8_append_span(res, cstr(SPACE8 "}\n"));
}
VecU8_append_span(res, cstr(SPACE "}\n\n"));
}
}
/* It is assumed that it goes after two passes of generate_func_clip_triang_on_triang_case_where_some_vertex_stuck */
void generate_func_clip_triang_on_triang_case_boring(VecU8* res) {
/* Star of David case */
for (int cb = 0; cb < 3; cb++) {
VecU8_append_span(res, cstr(SPACE "if ("));
for (int i = 0; i < 3; i++) {
if (i)
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, 'C', (i + cb) % 3, 'C', (i + cb + 1) % 3, 'T', i);
}
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE8 "vec2 hex[6] = {\n"));
for (int ti = 0; ti < 3; ti++) {
for (int cj = 0; cj < 2; cj++) {
VecU8_append_vec(res, VecU8_format(SPACE12 "marie_intersect_lines(T.v%d, T.v%d, C.v%d, C.v%d),\n",
ti, (ti + 1) % 3, (ti + cb + cj) % 3, (ti + cb + cj + 1) % 3));
}
}
VecU8_append_span(res, cstr(SPACE8 "};\n"));
VecU8_append_span(res, cstr(SPACE8 "for (int i = 0; i < 4; i++)\n"
SPACE12 "VecMarieTriangle_append(pile, (MarieTriangle){hex[i], hex[i + 1], hex[5]});\n"));
}
VecU8_append_span(res, cstr(SPACE "}\n"));
}
/* Wedge cases */
for (int cf = 0; cf < 3; cf++) {
for (int ti = 0; ti < 3; ti++){
VecU8_append_span(res, cstr(SPACE "if ("));
append_on_the_left_stmt(res, 'T', ti, 'T', mod3_dec(ti), 'C', cf);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, 'T', mod3_inc(ti), 'T', mod3_dec(ti), 'C', (cf + 2) % 3);
VecU8_append_span(res, cstr(" && "));
append_on_the_left_stmt(res, 'C', cf, 'C', (cf + 2) % 3, 'T', (ti + 2) % 3);
VecU8_append_span(res, cstr(") {\n"));
{
SpanU8 quad[4] = {cstr("PA"), cstr("PB"), cstr("PC"), cstr("PD")};
/* case A */
VecU8_append_span(res, cstr(SPACE8 "if ("));
append_on_the_left_stmt(res, 'T', ti, 'T', mod3_dec(ti), 'C', mod3_inc(cf));
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, 'C', mod3_inc(cf), 'C', mod3_dec(cf), 'T', ti);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, 'C', mod3_inc(cf), 'C', mod3_dec(cf), 'T', mod3_inc(ti));
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE12 "vec2 PA = "));
append_intersection_eol_stmt(res, 'T', mod3_dec(ti), ti, 'C', mod3_inc(cf), mod3_dec(cf));
VecU8_append_span(res, cstr(SPACE12 "vec2 PB = "));
append_intersection_eol_stmt(res, 'T', mod3_inc(ti), mod3_dec(ti), 'C', mod3_inc(cf), mod3_dec(cf));
VecU8_append_span(res, cstr(SPACE12 "vec2 PC = "));
append_intersection_eol_stmt(res, 'T', mod3_inc(ti), mod3_dec(ti), 'C', mod3_dec(cf), cf);
VecU8_append_span(res, cstr(SPACE12 "vec2 PD = "));
append_intersection_eol_stmt(res, 'T', mod3_dec(ti), ti, 'C', mod3_dec(cf), cf);
append_answering_stmt(res, (SpanSpanU8){.data = quad, ARRAY_SIZE(quad)}, 12);
}
VecU8_append_span(res, cstr(SPACE8 "}\n"));
/* case B */
VecU8_append_span(res, cstr(SPACE8 "if ("));
append_on_the_right_stmt(res, 'T', mod3_inc(ti), 'T', mod3_dec(ti), 'C', mod3_inc(cf));
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, 'C', cf, 'C', mod3_inc(cf), 'T', ti);
VecU8_append_span(res, cstr(" && "));
append_on_the_right_stmt(res, 'C', cf, 'C', mod3_inc(cf), 'T', mod3_inc(ti));
VecU8_append_span(res, cstr(") {\n"));
{
VecU8_append_span(res, cstr(SPACE12 "vec2 PA = "));
append_intersection_eol_stmt(res, 'T', mod3_dec(ti), ti, 'C', cf, mod3_inc(cf));
VecU8_append_span(res, cstr(SPACE12 "vec2 PB = "));
append_intersection_eol_stmt(res, 'T', mod3_inc(ti), mod3_dec(ti), 'C', cf, mod3_inc(cf));
VecU8_append_span(res, cstr(SPACE12 "vec2 PC = "));
append_intersection_eol_stmt(res, 'T', mod3_inc(ti), mod3_dec(ti), 'C', mod3_dec(cf), cf);
VecU8_append_span(res, cstr(SPACE12 "vec2 PD = "));
append_intersection_eol_stmt(res, 'T', mod3_dec(ti), ti, 'C', mod3_dec(cf), cf);
append_answering_stmt(res, (SpanSpanU8){.data = quad, ARRAY_SIZE(quad)}, 12);
}
VecU8_append_span(res, cstr(SPACE8 "}\n"));
}
VecU8_append_span(res, cstr(SPACE "}\n"));
}
}
}
NODISCARD VecU8 generate_func_clip_ccw_triang_with_ccw_triang_append_to_Vec() {
VecU8 res = VecU8_from_cstr(
"void marie_clip_ccw_triang_with_ccw_triang_append_to_Vec(MarieTriangle C, MarieTriangle T, VecMarieTriangle* pile) {\n");
for (int ord = 0; ord < 18; ord++) {
VecU8_append_vec(&res, VecU8_format(SPACE "float M%d = marie_surface(", ord));
for (int a = 0; a < 3; a++) {
if (a)
VecU8_append_span(&res, cstr(", "));
int vsh = comparison_triang_groups[ord][a];
VecU8_append(&res, (vsh / 10) == 1 ? 'C' : 'T');
VecU8_append_span(&res, cstr(".v"));
VecU8_append(&res, '0' + vsh % 10);
}
VecU8_append_span(&res, cstr(");\n"));
}
generate_func_clip_triang_on_triang_case_where_some_vertex_stuck(&res, 'C', 'T', false);
generate_func_clip_triang_on_triang_case_where_some_vertex_stuck(&res, 'T', 'C', true);
generate_func_clip_triang_on_triang_case_boring(&res);
VecU8_append_span(&res, cstr("}\n\n"));
return res;
}
void generate_marie_clipping_header() {
GeneratedHeader res = begin_header(cstr("l1_5/marie/clipping.h"));
VecU8_append_span(&res.result, cstr("#include \"../../l1/geom.h\"\n"
"#include \"../../../src/l1/marie/geom_alg_utils.h\"\n\n"));
VecU8_append_vec(&res.result, generate_func_clip_ccw_triang_with_ccw_triang_append_to_Vec());
finish_header(res);
}
#endif

View File

@ -471,7 +471,7 @@ void codegen_append_buff_rbtree_map__method_at_iter(VecU8* res, map_instantiatio
op.k_integer ? VecU8_from_span(op.K) : VecU8_fmt("const %s*", op.K),
mut ? VecU8_fmt("%s*", op.V) : (op.v_integer ? VecU8_from_span(op.V) : VecU8_fmt("const %s*", op.V)),
op.k_integer ? cstr("") : cstr("&"), op.v_integer ? cstr("") : cstr("&")));
op.k_integer ? cstr("") : cstr("&"), (op.v_integer && !mut) ? cstr("") : cstr("&")));
}
NODISCARD VecU8 get_name_of_buf_rbtree_map_structure(map_instantiation_op op){

384
src/l2/lucy/glyph_cache.h Normal file
View File

@ -0,0 +1,384 @@
#ifndef prototype1_src_l2_lucy_glyph_cache_h
#define prototype1_src_l2_lucy_glyph_cache_h
#include "../margaret/vulkan_utils.h"
#include <ft2build.h>
#include FT_FREETYPE_H
#include "../../../gen/l1/VecAndSpan_U32Segment.h"
#include "../../../gen/l1/vulkan/VecVkDescriptorImageInfo.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../l1_5/core/rb_tree_node.h"
#define LUCY_MAX_DESCRIPTOR_COUNT 100
typedef struct {
/* This value is actually Option<MargaretSubbuf>. If staging_buffer is already deleted (after it is no longer used),
* staging_buffer.len will be 0 */
MargaretSubbuf staging_buffer;
MargaretImg img;
VkImageView img_view;
U64 usage;
U64 pos_in_desc_array;
/* 0 if this image isn't scheduled for deletion on th next cycle.
* 1 if it is */
int scheduled_for_deletion;
} LucyImage;
#include "../../../gen/l1/eve/lucy/ListLucyImage.h"
typedef ListNodeLucyImage* RefListNodeLucyImage;
#include "../../../gen/l1/eve/lucy/VecRefListNodeLucyImage.h"
typedef struct {
ListNodeLucyImage* img;
U32 w, h;
U32 advance_x;
ivec2 bearing;
uvec2 pos_on_atlas;
} LucyStoredGlyph;
typedef struct {
U32 key;
LucyStoredGlyph value;
} KVPU32ToLucyStoredGlyph;
#include "../../../gen/l1/eve/lucy/VecKVPU32ToLucyStoredGlyph.h"
#include "../../../gen/l1_5/eve/lucy/BufRBTree_MapU32ToLucyStoredGlyph.h"
typedef struct LucyFace LucyFace;
typedef struct{
LucyFace* p;
BufRBTree_MapU32ToLucyStoredGlyph glyphs;
} LucyFaceFixedSize;
void LucyFaceFixedSize_drop(LucyFaceFixedSize self){
BufRBTree_MapU32ToLucyStoredGlyph_drop(self.glyphs);
}
#include "../../../gen/l1_5/eve/lucy/RBTree_MapU32ToLucyFaceFixedSize.h"
// This is a very useful alias
typedef RBTreeNode_KVPU32ToLucyFaceFixedSize RBTreeNodeLucyFaceFixedSize;
typedef struct LucyGlyphCache LucyGlyphCache;
struct LucyFace {
LucyGlyphCache* p;
FT_Face ft_face;
RBTree_MapU32ToLucyFaceFixedSize sizes;
};
struct LucyGlyphCache {
MargaretEngineReference ve;
ListLucyImage images;
VkDescriptorSetLayout descriptor_set_layout;
VkDescriptorSet descriptor_set;
/* to_be_freed_of_old_staging_next_cycle never intersect with to_be_copied_to_device_next_cycle */
VecRefListNodeLucyImage to_be_freed_of_old_staging_next_cycle;
VecRefListNodeLucyImage to_be_copied_to_device_next_cycle;
/* deletion will be performed last */
VecRefListNodeLucyImage to_be_deleted;
};
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve){
VkDescriptorSetLayout my_desc_set_layout;
check(vkCreateDescriptorSetLayout(ve.device, &(VkDescriptorSetLayoutCreateInfo){
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = 1,
.pBindings = (VkDescriptorSetLayoutBinding[]){{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = LUCY_MAX_DESCRIPTOR_COUNT,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
}},
}, NULL, &my_desc_set_layout) == VK_SUCCESS);
VkDescriptorSet descriptor_set = margaret_allocate_descriptor_set(ve.device, ve.descriptor_pool,
my_desc_set_layout);
return (LucyGlyphCache){.ve = ve, .images = ListLucyImage_new(),
.descriptor_set_layout = my_desc_set_layout, .descriptor_set = descriptor_set};
}
void LucyFaceFixedSize_get_rid_of_myself(LucyFaceFixedSize* self){
LucyGlyphCache* cache = self->p->p;
BufRBTree_MapU32ToLucyStoredGlyph* glyphs = &self->glyphs;
for (size_t gid = 0; gid < glyphs->el.len; gid++) {
ListNodeLucyImage* img = glyphs->el.buf[gid].value.img;
assert(img->el.usage > 0);
if (--img->el.usage) {
assert(!img->el.scheduled_for_deletion);
img->el.scheduled_for_deletion = 1;
VecRefListNodeLucyImage_append(&cache->to_be_deleted, img);
}
}
BufRBTree_MapU32ToLucyStoredGlyph_sink(glyphs);
}
typedef struct {
RBTreeNodeLucyFaceFixedSize* sized_face;
VecU32Segment codepoint_ranges;
} LucyGlyphCachingRequest;
void LucyGlyphCachingRequest_drop(LucyGlyphCachingRequest self){
VecU32Segment_drop(self.codepoint_ranges);
}
#include "../../../gen/l1/eve/lucy/VecAndSpan_LucyGlyphCachingRequest.h"
/* Helper structure */
typedef struct {
LucyFaceFixedSize* sized_face;
U32 codepoint;
TextureDataR8 bitmap;
/* Will be determined in the next phase */
uvec2 pos;
ListNodeLucyImage* img;
} LucyPositionedStagingGlyph;
bool LucyPositionedStagingGlyph_less_LucyPositionedStagingGlyph(
const LucyPositionedStagingGlyph* A, const LucyPositionedStagingGlyph* B){
return A->bitmap.height < B->bitmap.height;
}
void LucyPositionedStagingGlyph_drop(LucyPositionedStagingGlyph self){
TextureDataR8_drop(self.bitmap);
}
/* Instantiation for helper type */
#include "../../../gen/l1/eve/lucy/VecLucyPositionedStagingGlyph.h"
/* Helper function */
void LucyGlyphCache_add_glyphs__close_img(
LucyGlyphCache* cache, ListNodeLucyImage* img, U32 img_width, U32 img_height
){
assert(img->el.usage > 0);
img_width = MAX_U32(img_width, 10); // Just a precaution. empty buffers aren't supported by Margaret
img_height = MAX_U32(img_height, 10);
VecRefListNodeLucyImage_append(&cache->to_be_copied_to_device_next_cycle, img);
img->el.staging_buffer = MargaretBufAllocator_alloc(cache->ve.staging_buffers, img_width * img_height * 1);
img->el.img = MargaretImgAllocator_alloc(cache->ve.dev_local_images, img_width, img_height, VK_FORMAT_R8_UNORM,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
img->el.img_view = margaret_create_view_for_image(cache->ve.device, img->el.img.a.image,
VK_FORMAT_R8_UNORM, VK_IMAGE_ASPECT_COLOR_BIT);
}
void LucyGlyphCache_add_glyphs(VecLucyGlyphCachingRequest requests_for_faces){
if (requests_for_faces.len == 0)
return;
LucyGlyphCache* cache = requests_for_faces.buf[0].sized_face->value.p->p;
for (size_t fi = 0; fi < requests_for_faces.len; fi++) {
assert(cache == requests_for_faces.buf[fi].sized_face->value.p->p);
}
VkPhysicalDeviceProperties properties;
vkGetPhysicalDeviceProperties(cache->ve.physical_device, &properties);
U32 max_dim = properties.limits.maxImageDimension2D;
check(max_dim >= 10);
VecLucyPositionedStagingGlyph ready = VecLucyPositionedStagingGlyph_new();
for (size_t fi = 0; fi < requests_for_faces.len; fi++) {
LucyGlyphCachingRequest req = requests_for_faces.buf[fi];
FT_Face ft_face = req.sized_face->value.p->ft_face;
U32 font_height = req.sized_face->key;
check(FT_Set_Pixel_Sizes(ft_face, 0, font_height) == 0);
BufRBTree_MapU32ToLucyStoredGlyph* glyph_set = &req.sized_face->value.glyphs;
/* Phase 1, where we add some elements to glyph_set, but we don't
* know how many image we will have and we don't know where new glyphs will be placed */
for (size_t ri = 0; ri < req.codepoint_ranges.len; ri++) {
U32 range_start = req.codepoint_ranges.buf[ri].start;
U32 range_end = range_start + req.codepoint_ranges.buf[ri].len;
for (U32 codepoint = range_start; codepoint < range_end; codepoint++) {
if (BufRBTree_MapU32ToLucyStoredGlyph_find(glyph_set, codepoint) != 0)
continue;
FT_UInt glyph_index = FT_Get_Char_Index(ft_face, (FT_ULong)codepoint);
check(FT_Load_Glyph(ft_face, glyph_index, 0) == 0);
FT_GlyphSlot slot = ft_face->glyph;
FT_Bitmap* bitmap = &slot->bitmap;
TextureDataR8 my_bitmap = TextureDataR8_new(bitmap->width, bitmap->rows);
check(bitmap->pixel_mode == FT_PIXEL_MODE_GRAY);
if (slot->format != FT_GLYPH_FORMAT_BITMAP) {
check(FT_Render_Glyph(slot, FT_RENDER_MODE_NORMAL) == 0);
}
/* Here we dismiss very big glyphs. This guarantees that each glyph on it's own fits into VkImage */
check(bitmap->width <= max_dim && bitmap->rows <= max_dim);
assert(bitmap->rows == 0 || bitmap->width != 0 || bitmap->buffer != NULL);
for (S64 y = 0; y < bitmap->rows; y++) {
for (S64 x = 0; x < bitmap->width; x++) {
*TextureDataR8_mat(&my_bitmap, x, y) = *(bitmap->buffer + y * bitmap->pitch + x * sizeof(U8));
}
}
BufRBTree_MapU32ToLucyStoredGlyph_insert(glyph_set, codepoint, (LucyStoredGlyph){
.w = bitmap->width, .h = bitmap->rows,
.advance_x = slot->advance.x >> 6,
.bearing = (ivec2){ slot->bitmap_left, -slot->bitmap_top },
/* x_on_atlas, y_on_atlas and img will be set later, when `ready` vector is ready */
});
VecLucyPositionedStagingGlyph_append(&ready, (LucyPositionedStagingGlyph){
.sized_face = &req.sized_face->value, .codepoint = codepoint, .bitmap = my_bitmap,
/* pos and img will be filled later by packing algorithm */
});
}
}
/* Phase 2. Here we determine (in ready vector) where each new glyph sits (atlas image + pos).
* But we won't copy TextureDataR8 to staging buffer before everything is known */
VecLucyPositionedStagingGlyph_sort(&ready);
/* Variables, that have to be reset after each image overflow */
U32 starting_x = 0;
VecU32 landscape = VecU32_new_reserved(200);
U32 img_width = 0, img_height = 0;
ListNodeLucyImage* img = ListLucyImage_insert(&cache->images, (LucyImage){0});
for (size_t j = 0; j < ready.len; j++) {
LucyPositionedStagingGlyph* p_glyph;
one_more_chance:
p_glyph = &ready.buf[j];
U64 new_width_required = p_glyph->bitmap.width + starting_x;
if (new_width_required > max_dim) {
/* Resetting row */
starting_x = 0;
goto one_more_chance;
}
for (U32 h = img_width; h < new_width_required; h++) {
VecU32_append(&landscape, 0);
}
img_width = MAX_U64(img_width, new_width_required);
U32 height_here = 0;
for (size_t x = 0; x < p_glyph->bitmap.width; x++) {
height_here = MAX_U32(height_here, *VecU32_at(&landscape, starting_x + x));
}
U64 new_height_required = height_here + p_glyph->bitmap.height;
if (new_height_required > max_dim) {
/* Resetting image */
LucyGlyphCache_add_glyphs__close_img(cache, img, img_width, img_height);
starting_x = 0;
landscape.len = 0;
img_width = 0;
img_height = 0;
img = ListLucyImage_insert(&cache->images, (LucyImage){0});
goto one_more_chance;
}
/* Success */
for (size_t x = 0; x < p_glyph->bitmap.width; x++) {
*VecU32_mat(&landscape, starting_x + x) = new_height_required;
}
img_height = MAX_U64(img_height, new_height_required);
p_glyph->img = img;
p_glyph->pos = (uvec2){starting_x, height_here};
img->el.usage++; /* p_glyph uses it, that's a rock fact */
BufRBTree_MapU32ToLucyStoredGlyph *glyphs = &p_glyph->sized_face->glyphs;
U64 map_it = BufRBTree_MapU32ToLucyStoredGlyph_find(glyphs, p_glyph->codepoint);
assert(map_it > 0 && map_it < glyphs->tree.len);
LucyStoredGlyph* actual_glyph = &glyphs->el.buf[map_it - 1].value;
actual_glyph->pos_on_atlas = (uvec2){starting_x, height_here};
actual_glyph->img = img;
starting_x += p_glyph->bitmap.width;
}
LucyGlyphCache_add_glyphs__close_img(cache, img, img_width, img_height);
/* Phase 3. We have all the data. Now what?
* Now we fill staging buffers with glyphs bitsets from `ready` vector */
for (size_t j = 0; j < ready.len; j++) {
LucyPositionedStagingGlyph* p_glyph = &ready.buf[j];
U64 staging_width = p_glyph->img->el.img.width;
U8* staging = (U8*)MargaretSubbuf_get_mapped(&p_glyph->img->el.staging_buffer);
for (U64 y = 0; y < p_glyph->bitmap.height; y++) {
U64 Y = y + p_glyph->pos.y;
for (U64 x = 0; x < p_glyph->bitmap.width; x++) {
U64 X = x + p_glyph->pos.x;
staging[Y * staging_width + X] = *TextureDataR8_at(&p_glyph->bitmap, x, y);
}
}
}
}
VecLucyPositionedStagingGlyph_drop(ready);
VecLucyGlyphCachingRequest_drop(requests_for_faces);
}
/* This must not happen before all the LucyFaceFixedSizes are destroyed */
void LucyGlyphCache_drop(LucyGlyphCache self){
assert(self.images.first == NULL);
VecRefListNodeLucyImage_drop(self.to_be_freed_of_old_staging_next_cycle);
VecRefListNodeLucyImage_drop(self.to_be_copied_to_device_next_cycle);
VecRefListNodeLucyImage_drop(self.to_be_deleted);
}
void LucyGlyphCache_another_frame(LucyGlyphCache* self){
for (size_t i = 0; i < self->to_be_freed_of_old_staging_next_cycle.len; i++) {
LucyImage* img = &self->to_be_freed_of_old_staging_next_cycle.buf[i]->el;
assert(img->staging_buffer.len != 0);
MargaretBufAllocator_free(self->ve.staging_buffers, img->staging_buffer);
img->staging_buffer.len = 0;
}
for (size_t i = 0; i < self->to_be_copied_to_device_next_cycle.len; i++) {
ListNodeLucyImage* img_node = self->to_be_copied_to_device_next_cycle.buf[i];
LucyImage* img = &img_node->el;
assert(img->staging_buffer.len != 0);
if (img->scheduled_for_deletion)
continue;
margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(self->ve.transfer_cmd_buffer,
&img->staging_buffer, &img->img, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT);
VecRefListNodeLucyImage_append(&self->to_be_freed_of_old_staging_next_cycle, img_node);
}
/* We technically could carry out each deletion request in O(1) and each img creation request in O(1),
* but who cares, it's no problem going over the entire descriptor set when something get's added or deleted */
for (size_t i = 0; i < self->to_be_deleted.len; i++) {
ListNodeLucyImage* img_node = self->to_be_deleted.buf[i];
LucyImage* img = &img_node->el;
assert(img->scheduled_for_deletion);
assert(img->usage == 0);
if (img->staging_buffer.len != 0)
MargaretBufAllocator_free(self->ve.staging_buffers, img->staging_buffer);
MargaretImgAllocator_free(self->ve.dev_local_images, img->img.a);
ListLucyImage_erase_by_it(&self->images, img_node);
}
if ((self->to_be_copied_to_device_next_cycle.len > 0) || (self->to_be_deleted.len > 0)) {
U32 descriptor_i = 0;
VecVkDescriptorImageInfo desc_elements = VecVkDescriptorImageInfo_new();
for (ListNodeLucyImage* list_node = self->images.first; list_node; list_node = list_node->next) {
if (descriptor_i == LUCY_MAX_DESCRIPTOR_COUNT) {
abortf("Today you are out of luck\n");
}
LucyImage* img = &list_node->el;
img->pos_in_desc_array = descriptor_i;
VecVkDescriptorImageInfo_append(&desc_elements, (VkDescriptorImageInfo){
.sampler = self->ve.nearest_sampler, .imageView = img->img_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
});
descriptor_i++;
}
vkUpdateDescriptorSets(self->ve.device, 1, &(VkWriteDescriptorSet){
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = self->descriptor_set, .dstBinding = 0, .dstArrayElement = 0,
.descriptorCount = desc_elements.len,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = desc_elements.buf
}, 0, NULL);
VecVkDescriptorImageInfo_drop(desc_elements);
}
self->to_be_freed_of_old_staging_next_cycle.len = 0;
self->to_be_copied_to_device_next_cycle.len = 0;
self->to_be_deleted.len = 0;
}
/* This function does not check font file for correctness, use only with trusted fonts */
LucyFace LucyFace_new(FT_Library lib, LucyGlyphCache* cache, VecU8 path){
VecU8_append(&path, 0); // Making it null-terminated
FT_Face face;
FT_Error ret = FT_New_Face(lib, (const char*)path.buf, 0, &face);
check(ret == 0);
VecU8_drop(path);
return (LucyFace){.p = cache, .ft_face = face, .sizes = RBTree_MapU32ToLucyFaceFixedSize_new()};
}
RBTreeNodeLucyFaceFixedSize* LucyFace_of_size(LucyFace* self, U32 size){
RBTreeNodeLucyFaceFixedSize* nahodka = RBTree_MapU32ToLucyFaceFixedSize_find(&self->sizes, size);
if (nahodka)
return nahodka;
RBTree_MapU32ToLucyFaceFixedSize_insert(&self->sizes, size, (LucyFaceFixedSize){
.p = self, .glyphs = BufRBTree_MapU32ToLucyStoredGlyph_new()
});
// todo: add a method to RBTree for proper node insertion. This is just pure crap
return RBTree_MapU32ToLucyFaceFixedSize_find(&self->sizes, size);
}
#endif

195
src/l2/lucy/glyph_render.h Normal file
View File

@ -0,0 +1,195 @@
#ifndef prototype1_src_l2_lucy_rendering_h
#define prototype1_src_l2_lucy_rendering_h
#include "glyph_cache.h"
#include "../../../gen/l1/pixel_masses.h"
#include "../../../gen/l1/geom.h"
// todo: rewrite this shit crap using instances
typedef struct{
vec4 color;
vec2 pos;
vec2 tex_cord;
U32 tex_ind;
} LucyVertex;
typedef struct {
MargaretEngineReference ve;
LucyGlyphCache* cache;
VkPipelineLayout pipeline_layout;
VkPipeline pipeline;
U64 vertex_count;
MargaretSubbuf staging_vbo;
MargaretSubbuf vbo;
bool need_to_transfer;
} LucyRenderer;
typedef struct {
float width, height;
} LucyRendererPushConstants;
LucyRenderer LucyRenderer_new(
MargaretEngineReference ve, LucyGlyphCache* cache, SpanU8 root_dir,
VkRenderPass render_pass, U32 renderpass_subpass){
VkPipelineLayout pipeline_layout;
check(vkCreatePipelineLayout(ve.device, &(VkPipelineLayoutCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.setLayoutCount = 1,
.pSetLayouts = &cache->descriptor_set_layout,
.pushConstantRangeCount = 1,
.pPushConstantRanges = (VkPushConstantRange[]){{
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT, .offset = 0, .size = sizeof(LucyRendererPushConstants)
}},
}, NULL, &pipeline_layout) == VK_SUCCESS);
VkPipeline pipeline = margaret_create_triangle_pipeline_one_attachment(ve.device,
render_pass, renderpass_subpass, (MargaretMostImportantPipelineOptions){
.pipeline_layout = pipeline_layout,
.vertex_shader_code = read_file_by_path(VecU8_fmt("%s/gen/l_adele/lucy/vert.spv", root_dir)),
.fragment_shader_code = read_file_by_path(VecU8_fmt("%s/gen/l_adele/lucy/frag.spv", root_dir)),
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = (VkVertexInputBindingDescription[]){
{ .binding = 0, .stride = sizeof(LucyVertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX } },
.vertexAttributeDescriptionCount = 4,
.pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]){
{.location = 0, .binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT, .offset = offsetof(LucyVertex, color)},
{.location = 1, .binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof(LucyVertex, pos)},
{.location = 2, .binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof(LucyVertex, tex_cord)},
{.location = 3, .binding = 0,
.format = VK_FORMAT_R32_UINT, .offset = offsetof(LucyVertex, tex_ind)},
},
.depthTestEnable = false, .depthWriteEnable = false, .blendEnable = true,
});
return (LucyRenderer){.ve = ve, .cache = cache, .pipeline_layout = pipeline_layout, .pipeline = pipeline,
.vertex_count = 0, .staging_vbo = MargaretBufAllocator_alloc(ve.staging_buffers, 67),
.vbo = MargaretBufAllocator_alloc(ve.dev_local_buffers, 67)
};
}
/* When another_frame starts, you are safe to call this function, but you also have to call it
* before LucyRenderer_another_frame
*/
void LucyRenderer_clear(LucyRenderer* self){
self->vertex_count = 0;
}
void LucyRenderer__append_vertex_to_vao(LucyRenderer* self, LucyVertex vert_data){
self->vertex_count++;
assert(self->vertex_count * sizeof(LucyVertex) <= self->staging_vbo.len);
LucyVertex* staging = (LucyVertex*)MargaretSubbuf_get_mapped(&self->staging_vbo);
staging[self->vertex_count - 1] = vert_data;
}
/* When another_frame starts, you are safe to call this function, but you also have to call it
* before LucyRenderer_another_frame, because _another_frame method records transfer of data
*/
void LucyRenderer_add_text(
LucyRenderer* self, RBTreeNodeLucyFaceFixedSize* ffs, vec4 color,
U32 additional_y_advance, SpanU8 text, ivec2 start_pos
){
self->need_to_transfer = true;
U32 font_height = ffs->key;
ivec2 pos = (ivec2){start_pos.x, start_pos.y + (S32)font_height};
/* `text` variable will be modified during decoding */
while (text.len > 0) {
U32 codepoint = SpanU8_decode_as_utf8(&text);
if (codepoint == (U32)'\n') {
pos = (ivec2){start_pos.x, pos.y + (S32)font_height + (S32)additional_y_advance};
continue;
}
BufRBTree_MapU32ToLucyStoredGlyph *glyphs = &ffs->value.glyphs;
U64 map_it = BufRBTree_MapU32ToLucyStoredGlyph_find(glyphs, codepoint);
if (map_it == 0) {
/* We probably should have requested LucyCache to load more glyphs or draw 'unknown character'
* character, but we just skip things. We will force someone else to do that job */
continue;
}
assert(map_it > 0 && map_it < glyphs->tree.len);
LucyStoredGlyph* glyph = &glyphs->el.buf[map_it - 1].value;
if (glyph->w > 0 && glyph->h > 0) {
float atlas_w = (float)glyph->img->el.img.width;
float atlas_h = (float)glyph->img->el.img.height;
U32 desc_elem_id = glyph->img->el.pos_in_desc_array;
ivec2 positioned = ivec2_add_ivec2(pos, glyph->bearing);
LucyVertex v0 = {
.color = color, .pos = (vec2){(float)positioned.x, (float)positioned.y},
.tex_cord = (vec2){
(float)(glyph->pos_on_atlas.x) / atlas_w,
(float)(glyph->pos_on_atlas.y) / atlas_h
}, .tex_ind = desc_elem_id
};
LucyVertex v1 = {
.color = color, .pos = (vec2){(float)(positioned.x + glyph->w), (float)positioned.y},
.tex_cord = (vec2){
(float)(glyph->pos_on_atlas.x + glyph->w) / atlas_w,
(float)(glyph->pos_on_atlas.y) / atlas_h
}, .tex_ind = desc_elem_id
};
LucyVertex v2 = {
.color = color, .pos = (vec2){(float)positioned.x, (float)(positioned.y + glyph->h)},
.tex_cord = (vec2){
(float)(glyph->pos_on_atlas.x) / atlas_w,
(float)(glyph->pos_on_atlas.y + glyph->h) / atlas_h
}, .tex_ind = desc_elem_id
};
LucyVertex v3 = {
.color = color, .pos = (vec2){(float)(positioned.x + glyph->w), (float)(positioned.y + glyph->h)},
.tex_cord = (vec2){
(float)(glyph->pos_on_atlas.x + glyph->w) / atlas_w,
(float)(glyph->pos_on_atlas.y + glyph->h) / atlas_h
}, .tex_ind = desc_elem_id
};
/* What if we run out of space? */
U64 needed_vbo_length = (self->vertex_count + 6) * sizeof(LucyVertex);
if (self->staging_vbo.len < needed_vbo_length) {
printf("LucyRenderer Staging Buffer: Gotta replace %lu with %lu\n",
self->staging_vbo.len, needed_vbo_length);
MargaretBufAllocator_expand_or_move_old_host_visible(
self->ve.staging_buffers, &self->staging_vbo, needed_vbo_length);
}
LucyRenderer__append_vertex_to_vao(self, v1);
LucyRenderer__append_vertex_to_vao(self, v0);
LucyRenderer__append_vertex_to_vao(self, v2);
LucyRenderer__append_vertex_to_vao(self, v1);
LucyRenderer__append_vertex_to_vao(self, v2);
LucyRenderer__append_vertex_to_vao(self, v3);
}
pos.x += (S32)glyph->advance_x;
}
}
/* It only records transfer commands (transfer command buffer is passed in MargaretEngineReference object) */
void LucyRenderer_another_frame(LucyRenderer* self){
if (self->vbo.len < self->vertex_count * sizeof(LucyVertex)) {
MargaretBufAllocator_expand_or_free_old(self->ve.dev_local_buffers, &self->vbo,
self->vertex_count * sizeof(LucyVertex));
}
if (self->need_to_transfer && self->vertex_count > 0) {
self->need_to_transfer = false;
margaret_rec_cmd_copy_buffer_one_to_one_part(self->ve.transfer_cmd_buffer,
&self->staging_vbo, &self->vbo, 0, self->vertex_count * sizeof(LucyVertex));
}
}
// todo: merge with the function above. In the future all the shit will be recorded simultaneously
void LucyRenderer_another_frame_rec_drawing(
LucyRenderer* self, VkCommandBuffer drawing_cmd_buf, VkExtent2D image_extent){
vkCmdBindPipeline(drawing_cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, self->pipeline);
vkCmdPushConstants(drawing_cmd_buf, self->pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT,
0, sizeof(LucyRendererPushConstants),
&(LucyRendererPushConstants){ .width = (float)image_extent.width, .height = (float)image_extent.height });
vkCmdBindVertexBuffers(drawing_cmd_buf, 0, 1,
(VkBuffer[]){MargaretSubbuf_get_buffer(&self->vbo)}, (VkDeviceSize[]){self->vbo.start});
vkCmdBindDescriptorSets(drawing_cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, self->pipeline_layout, 0,
1, (VkDescriptorSet[]){self->cache->descriptor_set}, 0, NULL);
vkCmdDraw(drawing_cmd_buf, self->vertex_count, 1, 0, 0);
}
#endif

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,345 @@
// Same dependencies as vulkan memory allocator
#include "../../l1/core/uint_segments.h"
#include "../../l1/core/util.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
typedef struct MargaretBufAllocatorOneBlock MargaretBufAllocatorOneBlock;
typedef struct {
MargaretBufAllocatorOneBlock* block;
U64 start;
U64 len;
} MargaretBAFreeSegment;
bool MargaretBAFreeSegment_less_len(const MargaretBAFreeSegment* A, const MargaretBAFreeSegment* B){
if (A->len == B->len) {
if (A->block == B->block) {
return A->start < B->start;
}
return (uintptr_t)A->block < (uintptr_t)B->block;
}
return A->len < B->len;
}
U64 margaret_bump_buffer_size_to_alignment(U64 A, U8 alignment_exp){
if (A & ((1ull << alignment_exp) - 1))
A = A - (A & ((1ull << alignment_exp) - 1)) + (1ull << alignment_exp);
return A;
}
typedef struct {
MargaretBufAllocatorOneBlock* block;
U64 start;
U64 len;
} MargaretSubbuf;
struct MargaretBufAllocatorOneBlock{
BufRBTree_MapU64ToU64 occupants;
U64 capacity;
U64 occupation_counter;
VkDeviceMemory mem_hand;
VkBuffer buf_hand;
void* mapped_memory;
};
void MargaretBufAllocatorOneBlock_drop(MargaretBufAllocatorOneBlock self){
BufRBTree_MapU64ToU64_drop(self.occupants);
}
#include "../../../gen/l1/eve/margaret/ListMargaretBufAllocatorOneBlock.h"
#include "../../../gen/l1/eve/margaret/OptionMargaretBAFreeSegment.h"
#include "../../../gen/l1/eve/margaret/VecMargaretBAFreeSegment.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLen_SetMargaretBAFreeSegment.h"
typedef struct {
ListMargaretBufAllocatorOneBlock blocks;
BufRBTreeByLen_SetMargaretBAFreeSegment mem_free_space;
VkDevice device;
VkPhysicalDevice physical_device;
VkBufferUsageFlags usage;
U8 memory_type_id;
U8 alignment_exp;
bool host_visible;
} MargaretBufAllocator;
void MargaretBufAllocator__erase_gap(
MargaretBufAllocator* self, MargaretBufAllocatorOneBlock* block, U64 start, U64 len){
if (len == 0)
return;
bool eret = BufRBTreeByLen_SetMargaretBAFreeSegment_erase(&self->mem_free_space,
&(MargaretBAFreeSegment){.block = block, .start = start, .len = len});
assert(eret);
block->occupation_counter += len;
assert(block->occupation_counter <= block->capacity);
}
void MargaretBufAllocator__insert_gap(
MargaretBufAllocator* self, MargaretBufAllocatorOneBlock* block, U64 start, U64 len){
if (len == 0)
return;
bool iret = BufRBTreeByLen_SetMargaretBAFreeSegment_insert(&self->mem_free_space,
(MargaretBAFreeSegment){.block = block, .start = start, .len = len});
assert(iret);
assert(len <= block->occupation_counter);
block->occupation_counter -= len;
}
OptionMargaretBAFreeSegment MargaretBufAllocator__search_gap(MargaretBufAllocator* self, U64 req_size){
assert(req_size % (1ull << self->alignment_exp) == 0);
U64 sit = BufRBTreeByLen_SetMargaretBAFreeSegment_find_min_grtr_or_eq(&self->mem_free_space,
&(MargaretBAFreeSegment){.len = req_size});
if (sit == 0)
return None_MargaretBAFreeSegment();
return Some_MargaretBAFreeSegment(*BufRBTreeByLen_SetMargaretBAFreeSegment_at_iter(&self->mem_free_space, sit));
}
void MargaretBufAllocator__add_block(MargaretBufAllocator* self, U64 capacity){
VkBuffer buffer;
check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = capacity,
.usage = self->usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE
}, NULL, &buffer) == VK_SUCCESS);
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(self->device, buffer, &memory_requirements);
VkDeviceMemory memory;
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = memory_requirements.size,
.memoryTypeIndex = self->memory_type_id
}, NULL, &memory) == VK_SUCCESS);
check(vkBindBufferMemory(self->device, buffer, memory, 0) == VK_SUCCESS);
void* mapped_memory = NULL;
if (self->host_visible) {
check(vkMapMemory(self->device, memory, 0, capacity, 0, &mapped_memory) == VK_SUCCESS);
}
ListMargaretBufAllocatorOneBlock_insert(&self->blocks, (MargaretBufAllocatorOneBlock){
.occupants = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = capacity,
.mem_hand = memory, .buf_hand = buffer, .mapped_memory = mapped_memory
});
}
MargaretBufAllocator MargaretBufAllocator_new(
VkDevice device, VkPhysicalDevice physical_device,
VkBufferUsageFlags usage, U8 memory_type_id, U8 alignment_exp, bool host_visible, U64 initial_block_size
){
MargaretBufAllocator self = {
.blocks = ListMargaretBufAllocatorOneBlock_new(),
.mem_free_space = BufRBTreeByLen_SetMargaretBAFreeSegment_new_reserved(1),
.device = device, .physical_device = physical_device, .usage = usage, .memory_type_id = memory_type_id,
.alignment_exp = alignment_exp, .host_visible = host_visible
};
MargaretBufAllocator__add_block(&self, initial_block_size);
MargaretBufAllocator__insert_gap(&self, &self.blocks.first->el, 0, initial_block_size);
return self;
}
void MargaretBufAllocator__put_buf_to_a_gap(MargaretBufAllocator* self, MargaretBAFreeSegment segment, U64 req_size){
assert(req_size <= segment.len);
MargaretBufAllocator__erase_gap(self, segment.block, segment.start, segment.len);
MargaretBufAllocator__insert_gap(self, segment.block,
segment.start + req_size, segment.len - req_size);
BufRBTree_MapU64ToU64* occupants = &segment.block->occupants;
bool iret = BufRBTree_MapU64ToU64_insert(occupants, segment.start, req_size);
assert(iret);
}
U64Segment MargaretBufAllocator__get_left_free_space(
const MargaretBufAllocator* self, const MargaretSubbuf* allocation){
U64 occ_start = allocation->start;
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&allocation->block->occupants, allocation->start);
if (prev_occ_it != 0) {
U64 prev_occ_start;
U64 prev_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&allocation->block->occupants, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
assert(prev_occ_start + prev_occ_taken_size <= occ_start);
return (U64Segment){
.start = prev_occ_start + prev_occ_taken_size,
.len = occ_start - (prev_occ_start + prev_occ_taken_size)};
}
return (U64Segment){.start = 0, .len = occ_start};
}
U64Segment MargaretBufAllocator__get_right_free_space(
const MargaretBufAllocator* self, const MargaretSubbuf* allocation){
U64 occ_start = allocation->start;
U64 occ_taken_size = allocation->len;
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&allocation->block->occupants, allocation->start);
if (next_occ_it != 0) {
U64 next_occ_start;
U64 next_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&allocation->block->occupants, next_occ_it, &next_occ_start, &next_occ_taken_size);
assert(occ_start + occ_taken_size <= next_occ_start);
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
}
return (U64Segment){.start = occ_start + occ_taken_size, .len = allocation->block->capacity - (occ_start + occ_taken_size)};
}
void MargaretBufAllocator_drop(MargaretBufAllocator self){
for (ListNodeMargaretBufAllocatorOneBlock* bi = self.blocks.first; bi; bi = bi->next) {
vkDestroyBuffer(self.device, bi->el.buf_hand, NULL);
vkFreeMemory(self.device, bi->el.mem_hand, NULL);
}
ListMargaretBufAllocatorOneBlock_drop(self.blocks);
BufRBTreeByLen_SetMargaretBAFreeSegment_drop(self.mem_free_space);
}
/* Free one subbuffer, not a whole MBA :) */
void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretSubbuf allocation){
U64Segment left_free_space = MargaretBufAllocator__get_left_free_space(self, &allocation);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, &allocation);
MargaretBufAllocator__erase_gap(self, allocation.block, left_free_space.start, left_free_space.len);
MargaretBufAllocator__erase_gap(self, allocation.block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation.block,
left_free_space.start,
right_free_space.start + right_free_space.len - left_free_space.start);
}
/* Idk how to hide this monster */
void MargaretBufAllocator_debug(const MargaretBufAllocator* self){
printf(" ======== MargaretBufAllocator state ======== \n");
int n_segments = (int)self->mem_free_space.el.len;
printf("Blocks:\n");
for (ListNodeMargaretBufAllocatorOneBlock* block_it = self->blocks.first; block_it; block_it = block_it->next) {
U64 free_space_acc_segs = 0;
U64 occ_space_acc_occ = 0;
MargaretBufAllocatorOneBlock* block = &block_it->el;
int n_occupants = (int)block->occupants.el.len;
printf("-*- occupied: %lu/%lu, occupants: %d\n", block->occupation_counter, block->capacity, n_occupants);
for (int si = 0; si < n_segments; si++) {
MargaretBAFreeSegment fseg = self->mem_free_space.el.buf[si];
if (fseg.block == block) {
assert(fseg.start + fseg.len <= block->capacity);
free_space_acc_segs += fseg.len;
}
}
for (int oi = 0; oi < n_occupants; oi++) {
KVPU64ToU64 occ = block->occupants.el.buf[oi];
assert(occ.key + occ.value <= block->capacity);
occ_space_acc_occ += occ.value;
for (int sc = 0; sc < n_occupants; sc++) {
KVPU64ToU64 occ2 = block->occupants.el.buf[sc];
if (sc != oi) {
assert(occ.key + occ.value <= occ2.key || occ2.key + occ2.value <= occ.key);
}
}
}
assert(free_space_acc_segs == block->capacity - block->occupation_counter);
assert(occ_space_acc_occ == block->occupation_counter);
}
}
NODISCARD MargaretSubbuf MargaretBufAllocator_alloc(MargaretBufAllocator* self, U64 req_size){
req_size = margaret_bump_buffer_size_to_alignment(req_size, self->alignment_exp);
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
};
VkPhysicalDeviceProperties2 properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &maintenance3_properties,
};
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
check(req_size <= maintenance3_properties.maxMemoryAllocationSize);
OptionMargaretBAFreeSegment free_gap = MargaretBufAllocator__search_gap(self, req_size);
if (free_gap.variant == Option_None) {
assert(self->blocks.first != NULL);
U64 pitch = self->blocks.first->el.capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(req_size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
MargaretBufAllocator__add_block(self, new_capacity);
MargaretBufAllocatorOneBlock* new_block = &self->blocks.first->el;
MargaretBufAllocator__insert_gap(self, new_block, req_size, new_capacity - req_size);
new_block->occupation_counter = req_size;
bool iret = BufRBTree_MapU64ToU64_insert(&new_block->occupants, 0, req_size);
assert(iret);
return (MargaretSubbuf){.block = &self->blocks.first->el, 0, req_size};
}
MargaretBufAllocator__put_buf_to_a_gap(self, free_gap.some, req_size);
return (MargaretSubbuf){.block = free_gap.some.block, .start = free_gap.some.start, req_size};
}
void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 smaller_size){
smaller_size = margaret_bump_buffer_size_to_alignment(smaller_size, self->alignment_exp);
assert(smaller_size > 0);
assert(smaller_size <= allocation->len);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation->block,
allocation->start + smaller_size,
right_free_space.len + (allocation->len - smaller_size));
allocation->len = smaller_size;
}
/* It actually may returns a 'null-MargaretBuf-allocation' : if return value .len field is zero it means
* that expansion in-place was possible and the allocator argument was updated with a new size and nothing was returned.
* But if ret value .len field is non-zero it means a valid MargaretSubbuf object was returned and the
* `allocation` argument was untouched. It remains a valid object, you need to deallocate it yourself
*/
NODISCARD MargaretSubbuf MargaretBufAllocator_expand(
MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size){
bigger_size = margaret_bump_buffer_size_to_alignment(bigger_size, self->alignment_exp);
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
if (allocation->start + bigger_size > right_free_space.start + right_free_space.len){
return MargaretBufAllocator_alloc(self, bigger_size);
}
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
MargaretBufAllocator__insert_gap(self, allocation->block,
allocation->start + bigger_size,
right_free_space.len + (allocation->len - bigger_size));
allocation->len = bigger_size;
return (MargaretSubbuf){0};
}
char* MargaretSubbuf_get_mapped(const MargaretSubbuf* allocation){
assert(allocation->block->mapped_memory);
assert(allocation->start + allocation->len <= allocation->block->capacity);
return (char*)allocation->block->mapped_memory + allocation->start;
}
VkBuffer MargaretSubbuf_get_buffer(const MargaretSubbuf* allocation){
assert(allocation->start + allocation->len <= allocation->block->capacity);
return allocation->block->buf_hand;
}
/* It tries to expand buffer, but if it fails, it creates a freshly-new buffer. It copies all
* the data from old buffer to new one and frees the old buffer, while replacing
* info in `allocation` variable with info about new allocation.
*/
void MargaretBufAllocator_expand_or_move_old_host_visible(
MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size){
assert(self->host_visible);
MargaretSubbuf maybe_bigger = MargaretBufAllocator_expand(self, allocation, bigger_size);
if (maybe_bigger.len > 0) {
memcpy(MargaretSubbuf_get_mapped(&maybe_bigger), MargaretSubbuf_get_mapped(allocation), allocation->len);
MargaretBufAllocator_free(self, *allocation);
*allocation = maybe_bigger;
}
}
/* It tries to expand buffer, but if it fails, it creates a freshly-new buffer. It
* frees the old buffer, while replacing
* info in `allocation` variable with info about new allocation. Old data gets lost
*/
void MargaretBufAllocator_expand_or_free_old(
MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size){
MargaretSubbuf maybe_bigger = MargaretBufAllocator_expand(self, allocation, bigger_size);
if (maybe_bigger.len > 0) {
MargaretBufAllocator_free(self, *allocation);
*allocation = maybe_bigger;
}
}

View File

@ -0,0 +1,521 @@
/* This is a Claire header. Do not include it in more that one place.
* This Claire requires vulkan api:
*
*
* typedef integer VkResult
*
* const VkResult VK_SUCCESS
*
* typedef integer VkStructureType
*
* const VkStructureType VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
* const VkStructureType VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
* const VkStructureType VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
*
* typedef integer VkBufferCreateFlags
* typedef integer VkDeviceSize
* typedef integer VkBufferUsageFlags
* typedef integer VkSharingMode
*
* const VkSharingMode VK_SHARING_MODE_EXCLUSIVE
*
*
* typedef handler VkPhysicalDevice
* typedef handler VkDevice
* typedef handler VkBuffer
* typedef handler VkImage
* typedef handler VkDeviceMemory
* typedef handler VkCommandBuffer
*
* typedef struct {
* VkStructureType sType;
* const void* pNext;
* VkBufferCreateFlags flags;
* VkDeviceSize size;
* VkBufferUsageFlags usage;
* VkSharingMode sharingMode;
* uint32_t queueFamilyIndexCount;
* const uint32_t* pQueueFamilyIndices;
* } VkBufferCreateInfo
*
* typedef integer VkMemoryPropertyFlags
*
* const VkMemoryPropertyFlags VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
*
* typedef struct {
* VkMemoryPropertyFlags propertyFlags;
* ...
* } VkMemoryType
*
* #define VK_MAX_MEMORY_TYPES 32
*
* typedef struct {
* uint32_t memoryTypeCount;
* VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
* ...
* } VkPhysicalDeviceMemoryProperties
*
* void vkGetPhysicalDeviceMemoryProperties(
* VkPhysicalDevice physicalDevice,
* VkPhysicalDeviceMemoryProperties* pMemoryProperties)
*
* typedef void VkAllocationCallbacks
* (seriously, this type is only used as a pointer in an exposed api function, and it always takes NULL value)
*
* VkResult vkCreateBuffer(
* VkDevice device,
* const VkBufferCreateInfo* pCreateInfo,
* const VkAllocationCallbacks* pAllocator,
* VkBuffer* pBuffer)
*
* typedef struct {
* VkDeviceSize size;
* VkDeviceSize alignment;
* uint32_t memoryTypeBits;
* } VkMemoryRequirements
*
* void vkGetBufferMemoryRequirements(
* VkDevice device,
* VkBuffer buffer,
* VkMemoryRequirements* pMemoryRequirements)
*
* typedef integer VkImageCreateFlags
* typedef integer VkImageType
*
* const VkImageType VK_IMAGE_TYPE_2D
*
* typedef integer VkFormat
*
* typedef struct {
* uint32_t width;
* uint32_t height;
* uint32_t depth;
* } VkExtent3D
*
* typedef integer VkSampleCountFlagBits
*
* const VkSampleCountFlagBits VK_SAMPLE_COUNT_1_BIT
*
* typedef integer VkImageTiling
*
* const VkImageTiling VK_IMAGE_TILING_LINEAR
*
* const VkImageTiling VK_IMAGE_TILING_OPTIMAL
*
* typedef integer VkImageUsageFlags
* typedef integer VkImageLayout
*
* const VkImageLayout VK_IMAGE_LAYOUT_UNDEFINED
*
* typedef struct {
* VkStructureType sType;
* const void* pNext;
* VkImageCreateFlags flags;
* VkImageType imageType;
* VkFormat format;
* VkExtent3D extent;
* uint32_t mipLevels;
* uint32_t arrayLayers;
* VkSampleCountFlagBits samples;
* VkImageTiling tiling;
* VkImageUsageFlags usage;
* VkSharingMode sharingMode;
* uint32_t queueFamilyIndexCount;
* const uint32_t* pQueueFamilyIndices;
* VkImageLayout initialLayout;
* } VkImageCreateInfo
*
* VkResult vkCreateImage(
* VkDevice device,
* const VkImageCreateInfo* pCreateInfo,
* const VkAllocationCallbacks* pAllocator,
* VkImage* pImage)
*
* void vkGetImageMemoryRequirements(
* VkDevice device,
* VkImage image,
* VkMemoryRequirements* pMemoryRequirements)
*
* typedef struct {
* VkStructureType sType;
* const void* pNext;
* VkDeviceSize allocationSize;
* uint32_t memoryTypeIndex;
* } VkMemoryAllocateInfo
*
* VkResult vkAllocateMemory(
* VkDevice device,
* const VkMemoryAllocateInfo* pAllocateInfo,
* const VkAllocationCallbacks* pAllocator,
* VkDeviceMemory* pMemory)
*
* VkResult vkBindBufferMemory(
* VkDevice device,
* VkBuffer buffer,
* VkDeviceMemory memory,
* VkDeviceSize memoryOffset)
*
* VkResult vkBindImageMemory(
* VkDevice device,
* VkImage image,
* VkDeviceMemory memory,
* VkDeviceSize memoryOffset)
*
* void vkDestroyBuffer(
* VkDevice device,
* VkBuffer buffer,
* const VkAllocationCallbacks* pAllocator)
*
* void vkDestroyImage(
* VkDevice device,
* VkImage image,
* const VkAllocationCallbacks* pAllocator)
*/
// todo: get rid of this whole VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT crap. MargaretMA is for non-host-visible
// todo: for staging buffers you better use MargaretBufferAllocator. Ou, yeah, I have yet to write them
// todo: fucking rewrite all of this. Yes, I want all of this shit rewritten
#include "../../l1/core/util.h"
#include "../../l1_5/core/buff_rb_tree_node.h"
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
typedef struct{
U64 block;
U64 start;
U64 len;
} MargaretIAFreeSegment;
#include "../../l1/core/uint_segments.h"
// todo: substitute U64Segment_get_length_resp_alignment by my own function
bool MargaretIAFreeSegment_less_resp_align(const MargaretIAFreeSegment* A, const MargaretIAFreeSegment* B, U8 alignment_exp){
U64 A_len = U64Segment_get_length_resp_alignment((U64Segment){A->start, A->len}, alignment_exp);
U64 B_len = U64Segment_get_length_resp_alignment((U64Segment){B->start, B->len}, alignment_exp);
if (A_len == B_len) {
if (A->block == B->block) {
return A->start < B->start;
}
return A->block < B->block;
}
return A_len < B_len;
}
/* Does not include all parameters needed for relocation. Because relocation is needed only
* during controlled defragmentation */
typedef struct {
U64 block;
VkImage image;
U64 start;
} MargaretImgAllocation;
/* Not primitive */
typedef struct {
BufRBTree_MapU64ToU64 images;
U64 capacity;
U64 occupation_counter;
VkDeviceMemory mem_hand;
void* mapped_memory;
} MargaretImgAllocatorOneBlock;
void MargaretImgAllocatorOneBlock_drop(MargaretImgAllocatorOneBlock self){
BufRBTree_MapU64ToU64_drop(self.images);
}
#include "../../../gen/l1/eve/margaret/VecMargaretImgAllocatorOneBlock.h"
#include "../../../gen/l1/VecAndSpan_U8.h"
#include "../../../gen/l1/eve/margaret/VecMargaretIAFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionMargaretIAFreeSegment.h"
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment.h"
#include "../../../gen/l1/eve/margaret/OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment.h"
#define MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP 28
/* Superstructure for managing free segments of memory of some type in ALL BLOCKS */
typedef struct {
OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment free_space_in_memory[MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP];
VecU8 set_present;
} MargaretMemFreeSpaceManager;
MargaretMemFreeSpaceManager MargaretMemFreeSpaceManager_new(){
MargaretMemFreeSpaceManager res = {.set_present = VecU8_new_zeroinit(1)};
res.set_present.buf[0] = 3;
for (U8 algn = 0; algn < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; algn++)
res.free_space_in_memory[algn] = None_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment();
res.free_space_in_memory[3] = Some_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment(
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_new_reserved(3, 1));
return res;
}
void MargaretMemFreeSpaceManager_drop(MargaretMemFreeSpaceManager self){
for (U8 alignment_exp = 0; alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; alignment_exp++)
OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_drop(self.free_space_in_memory[alignment_exp]);
VecU8_drop(self.set_present);
}
void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
if (len == 0)
return;
assert(man->set_present.len > 0);
for (size_t aj = 0; aj < man->set_present.len; aj++) {
U8 alignment = man->set_present.buf[aj];
assert(alignment < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
assert(man->free_space_in_memory[alignment].variant == Option_Some);
bool eret = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_erase(&
man->free_space_in_memory[alignment].some,
&(MargaretIAFreeSegment){.block = block, .start = start, .len = len});
assert(eret);
}
}
void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
if (len == 0)
return;
assert(man->set_present.len > 0); /* MargaretMemFreeSpaceManager will do that for us with 2^3 */
for (size_t aj = 0; aj < man->set_present.len; aj++) {
U8 alignment = man->set_present.buf[aj];
assert(alignment < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
assert(man->free_space_in_memory[alignment].variant == Option_Some);
bool iret = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_insert(&
man->free_space_in_memory[alignment].some, (MargaretIAFreeSegment){.block = block, .start = start, .len = len});
assert(iret);
}
}
OptionMargaretIAFreeSegment MargaretMemFreeSpaceManager_search(
MargaretMemFreeSpaceManager* man, U8 alignment_exp, U64 req_size) {
check(alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
if (man->free_space_in_memory[alignment_exp].variant == Option_None) {
assert(man->set_present.len > 0);
assert(man->free_space_in_memory[man->set_present.buf[0]].variant == Option_Some);
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment* have = &man->free_space_in_memory[man->set_present.buf[0]].some;
man->free_space_in_memory[alignment_exp] = Some_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment(
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_new_reserved(alignment_exp, have->el.len));
for (size_t i = 0; i < have->el.len; i++) {
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_insert(
&man->free_space_in_memory[alignment_exp].some, *VecMargaretIAFreeSegment_at(&have->el, i));
}
}
assert(man->free_space_in_memory[alignment_exp].variant == Option_Some);
U64 sit = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_find_min_grtr_or_eq(
&man->free_space_in_memory[alignment_exp].some, &(MargaretIAFreeSegment){.len = req_size,});
if (sit == 0)
return None_MargaretIAFreeSegment();
return Some_MargaretIAFreeSegment(*BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_at_iter(
&man->free_space_in_memory[alignment_exp].some, sit));
}
/* VkDevice and VkPhysicalDevice stay remembered here. Don't forget that, please */
typedef struct {
VecMargaretImgAllocatorOneBlock blocks;
MargaretMemFreeSpaceManager mem_free_space;
VkDevice device;
VkPhysicalDevice physical_device;
U8 memory_type_id;
} MargaretImgAllocator;
void MargaretImgAllocator__erase_gap(MargaretImgAllocator* self, U64 block_id, U64 start, U64 len){
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, block_id, start, len);
MargaretImgAllocatorOneBlock* BLOCK = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, block_id);
BLOCK->occupation_counter += len;
assert(BLOCK->occupation_counter <= BLOCK->capacity);
}
void MargaretImgAllocator__insert_gap(MargaretImgAllocator* self, U64 block_id, U64 start, U64 len){
MargaretMemFreeSpaceManager_insert(&self->mem_free_space, block_id, start, len);
MargaretImgAllocatorOneBlock* BLOCK = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, block_id);
assert(len <= BLOCK->occupation_counter);
BLOCK->occupation_counter -= len;
}
void MargaretImgAllocator__add_block(MargaretImgAllocator* self, U64 capacity){
VkDeviceMemory memory;
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = capacity, .memoryTypeIndex = self->memory_type_id
}, NULL, &memory) == VK_SUCCESS);
VecMargaretImgAllocatorOneBlock_append(&self->blocks, (MargaretImgAllocatorOneBlock){
.images = BufRBTree_MapU64ToU64_new_reserved(1),
.capacity = capacity,
.occupation_counter = capacity,
.mem_hand = memory,
.mapped_memory = NULL /* not supported */});
}
MargaretImgAllocator MargaretImgAllocator_new(
VkDevice device, VkPhysicalDevice physical_device, U8 memory_type_id, U64 initial_block_size
){
MargaretImgAllocator self = {
.blocks = VecMargaretImgAllocatorOneBlock_new(),
.mem_free_space = MargaretMemFreeSpaceManager_new(),
.device = device,
.physical_device = physical_device,
.memory_type_id = memory_type_id,
};
MargaretImgAllocator__add_block(&self, initial_block_size);
MargaretImgAllocator__insert_gap(&self, 0, 0, initial_block_size);
return self;
}
U64 margaret_get_alignment_left_padding(U64 unaligned_start, U8 alignment_exp){
U64 hit = unaligned_start & (1ull << alignment_exp) - 1;
return (hit ? (1ull << alignment_exp) - hit : 0);
}
U64 MargaretImgAllocator__add_img_given_gap(
MargaretImgAllocator* self, MargaretIAFreeSegment segment, U64 required_size, U8 alignment_exp
){
U64 gap_start = segment.start;
U64 gap_len = segment.len;
U64 af = margaret_get_alignment_left_padding(gap_start, alignment_exp);
U64 aligned_start = gap_start + af;
assert(aligned_start + required_size <= gap_start + gap_len);
MargaretImgAllocator__erase_gap(self, segment.block, gap_start, gap_len);
MargaretImgAllocator__insert_gap(self, segment.block, gap_start, af);
MargaretImgAllocator__insert_gap(self, segment.block, aligned_start + required_size,
gap_start + gap_len - (aligned_start + required_size));
BufRBTree_MapU64ToU64* images = &VecMargaretImgAllocatorOneBlock_mat(&self->blocks, segment.block)->images;
bool iret = BufRBTree_MapU64ToU64_insert(images, aligned_start, required_size);
assert(iret);
return aligned_start;
}
U64Segment MargaretImgAllocator__get_left_free_space(
const MargaretImgAllocator* self, MargaretImgAllocation allocation){
const MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_at(&self->blocks, allocation.block);
U64 occ_start = allocation.start;
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&block->images, allocation.start);
if (prev_occ_it != 0) {
U64 prev_occ_start;
U64 prev_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&block->images, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
assert(prev_occ_start + prev_occ_taken_size <= occ_start);
return (U64Segment){
.start = prev_occ_start + prev_occ_taken_size,
.len = occ_start - (prev_occ_start + prev_occ_taken_size)};
}
return (U64Segment){.start = 0, .len = occ_start};
}
U64Segment MargaretImgAllocator__get_right_free_space(
const MargaretImgAllocator* self, MargaretImgAllocation allocation){
const MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_at(&self->blocks, allocation.block);
U64 occ_start = allocation.start;
VkMemoryRequirements occ_memory_requirements;
vkGetImageMemoryRequirements(self->device, allocation.image, &occ_memory_requirements);
U64 occ_taken_size = occ_memory_requirements.size;
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&block->images, allocation.start);
if (next_occ_it != 0) {
U64 next_occ_start;
U64 next_occ_taken_size;
BufRBTree_MapU64ToU64_at_iter(&block->images, next_occ_it, &next_occ_start, &next_occ_taken_size);
assert(occ_start + occ_taken_size <= next_occ_start);
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
}
return (U64Segment){.start = occ_start + occ_taken_size, .len = block->capacity - (occ_start + occ_taken_size)};
}
void MargaretImgAllocator_drop(MargaretImgAllocator self){
for (size_t bi = 0; bi < self.blocks.len; bi++) {
vkFreeMemory(self.device, self.blocks.buf[bi].mem_hand, NULL);
}
VecMargaretImgAllocatorOneBlock_drop(self.blocks);
MargaretMemFreeSpaceManager_drop(self.mem_free_space);
}
void MargaretImgAllocator_free(MargaretImgAllocator* self, MargaretImgAllocation allocation){
U64Segment left_free_space = MargaretImgAllocator__get_left_free_space(self, allocation);
U64Segment right_free_space = MargaretImgAllocator__get_right_free_space(self, allocation);
vkDestroyImage(self->device, allocation.image, NULL);
MargaretImgAllocator__erase_gap(self, allocation.block, left_free_space.start, left_free_space.len);
MargaretImgAllocator__erase_gap(self, allocation.block, right_free_space.start, right_free_space.len);
MargaretImgAllocator__insert_gap(self, allocation.block,
left_free_space.start,
right_free_space.start + right_free_space.len - left_free_space.start);
}
NODISCARD MargaretImgAllocation MargaretImgAllocator__alloc(
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
VkImageUsageFlags usage_flags
){
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
};
VkPhysicalDeviceProperties2 properties = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &maintenance3_properties,
};
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
VkImage fresh_img;
check(vkCreateImage(self->device, &(VkImageCreateInfo){
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = VK_IMAGE_TYPE_2D,
.format = format,
.extent = (VkExtent3D){.width = width, .height = height,.depth = 1,},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = usage_flags,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
}, NULL, &fresh_img) == VK_SUCCESS);
VkMemoryRequirements mem_requirements;
vkGetImageMemoryRequirements(self->device, fresh_img, &mem_requirements);
check(U64_is_2pow(mem_requirements.alignment));
U8 alignment_exp = U64_2pow_log(mem_requirements.alignment);
check(mem_requirements.size <= maintenance3_properties.maxMemoryAllocationSize);
OptionMargaretIAFreeSegment free_gap =
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
if (free_gap.variant == Option_None) {
assert(self->blocks.len > 0);
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
// Old blocks remain intact
U64 new_capacity = MAX_U64(mem_requirements.size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
MargaretImgAllocator__add_block(self, new_capacity);
U64 bid = self->blocks.len - 1;
MargaretImgAllocator__insert_gap(self, bid, mem_requirements.size, new_capacity - mem_requirements.size);
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, bid);
block->occupation_counter = mem_requirements.size;
bool iret = BufRBTree_MapU64ToU64_insert(&block->images, 0, mem_requirements.size);
assert(iret);
check(vkBindImageMemory(self->device, fresh_img, block->mem_hand, 0) == VK_SUCCESS);
return (MargaretImgAllocation){.block = bid, fresh_img, 0};
}
U64 aligned_pos = MargaretImgAllocator__add_img_given_gap(self, free_gap.some, mem_requirements.size, alignment_exp);
VkDeviceMemory memory = VecMargaretImgAllocatorOneBlock_at(&self->blocks, free_gap.some.block)->mem_hand;
check(vkBindImageMemory(self->device, fresh_img, memory, aligned_pos) == VK_SUCCESS);
return (MargaretImgAllocation){.block = free_gap.some.block, .image = fresh_img, .start = aligned_pos};
}
typedef struct{
MargaretImgAllocation a;
U64 width;
U64 height;
VkFormat format;
VkImageUsageFlags usage_flags;
VkImageLayout current_layout;
} MargaretImg;
NODISCARD MargaretImg MargaretImgAllocator_alloc(
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
VkImageUsageFlags usage_flags
){
return (MargaretImg){.a = MargaretImgAllocator__alloc(self, width, height, format, usage_flags),
.width = width, .height = height, .format = format, .usage_flags = usage_flags,
.current_layout = VK_IMAGE_LAYOUT_UNDEFINED};
}

View File

@ -0,0 +1,8 @@
#ifndef prototype1_src_l2_margaret_vulkan_memory_h
#define prototype1_src_l2_margaret_vulkan_memory_h
#include <vulkan/vulkan.h>
#include "vulkan_images_claire.h"
#include "vulkan_buffer_claire.h"
#endif

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,6 @@
#include "../../../gen/l1/vulkan/VecVkPhysicalDevice.h"
#include "../../../gen/l1/vulkan/SpanVkFormat.h"
#include "../../../gen/l1/vulkan/OptionVkFormat.h"
#include "../../../gen/l1/vulkan/VecVkDescriptorPoolSize.h"
#include "../../../gen/l1/vulkan/VecVkQueueFamilyProperties.h"
#include "../../../gen/l1/vulkan/OptionVkCompositeAlphaFlagBitsKHR.h"
#include "../../../gen/l1/vulkan/VecVkPresentModeKHR.h"
@ -28,6 +27,7 @@
#include "../../../gen/l1/vulkan/VecVkSurfaceFormatKHR.h"
#include "../../../gen/l1/vulkan/OptionVkSurfaceFormatKHR.h"
#include <vulkan/vulkan_wayland.h>
#include "../../../gen/l1/vulkan/VecVkImageMemoryBarrier.h"
void margaret_create_debug_utils_messenger_EXT(
VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
@ -302,7 +302,7 @@ NODISCARD VecU8 margaret_stringify_device_memory_properties_2(VkPhysicalDevice p
return VecU8_fmt(
"maxMemoryAllocationsCount: %u\n"
"maxMemoryAllocationSize: %u\n"
"maxBufferSize: %u!!!!!!!!!\n",
"maxBufferSize: %u\n",
maxMemoryAllocationCount, maxMemoryAllocationSize, maxBufferSize);
}
@ -320,15 +320,22 @@ VkDevice margaret_create_logical_device(VkPhysicalDevice physical_device, Margar
logical_device_queue_crinfo[0].queueFamilyIndex = queue_fam.for_graphics;
logical_device_queue_crinfo[1].queueFamilyIndex = queue_fam.for_presentation;
VkPhysicalDeviceVulkan12Features used_vk12_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
.runtimeDescriptorArray = true,
.shaderSampledImageArrayNonUniformIndexing = true,
};
// We DEMAND synchronization2
VkPhysicalDeviceSynchronization2Features used_synchronization2_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
.pNext = &used_vk12_features,
.synchronization2 = VK_TRUE,
};
VkPhysicalDeviceFeatures2 used_features2 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = (void*)&used_synchronization2_features,
.pNext = &used_synchronization2_features,
.features = (VkPhysicalDeviceFeatures) {
.geometryShader = true,
.samplerAnisotropy = physical_features.samplerAnisotropy,
},
};
@ -399,8 +406,8 @@ OptionVkExtent2D margaret_choose_image_extent(const VkSurfaceCapabilitiesKHR* ca
if (capabilities->minImageExtent.width > sane_limits.width ||
capabilities->minImageExtent.height > sane_limits.height)
return None_VkExtent2D();
return Some_VkExtent2D((VkExtent2D) { MIN_U32(sane_limits.width, sane_limits.width),
MIN_U32(sane_limits.height, sane_limits.height) });
return Some_VkExtent2D((VkExtent2D) { MIN_U32(sane_limits.width, capabilities->maxImageExtent.width),
MIN_U32(sane_limits.height, capabilities->maxImageExtent.height) });
}
/* May be bigger, than a sane limit */
return Some_VkExtent2D(capabilities->currentExtent);
@ -546,12 +553,16 @@ MargaretScoredPhysicalDevice margaret_score_physical_device(
SpanU8_print(VecU8_to_span(&txt));
VecU8_drop(txt);
}
VkPhysicalDeviceVulkan12Features vk12_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES
};
VkPhysicalDeviceSynchronization2Features synchronization2_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
.pNext = &vk12_features
};
VkPhysicalDeviceFeatures2 features2 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = (void*)&synchronization2_features,
.pNext = &synchronization2_features,
};
vkGetPhysicalDeviceFeatures2(dev, &features2);
// printf("Device %s\nmaxBoundDescriptorSets: %" PRIu32 " \nmaxPerStageDescriptorUniformBuffers: %" PRIu32 "\n"
@ -565,11 +576,13 @@ MargaretScoredPhysicalDevice margaret_score_physical_device(
else if (properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU)
score += 100;
if (!features2.features.geometryShader)
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No geometry shader")};
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No geometry shaders")};
if (!synchronization2_features.synchronization2)
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No synchronization2")};
if (features2.features.samplerAnisotropy)
score += 2;
if (!vk12_features.shaderSampledImageArrayNonUniformIndexing)
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No shaderSampledImageArrayNonUniformIndexing")};
if (!vk12_features.runtimeDescriptorArray)
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No runtimeDescriptorArray")};
ResultMargaretChosenQueueFamiliesOrSpanU8 queue_families = margaret_choose_good_queue_families(dev, surface);
if (queue_families.variant == Result_Err)
return (MargaretScoredPhysicalDevice){dev, -1, queue_families.err};
@ -601,7 +614,7 @@ MargaretScoredPhysicalDevice margaret_score_physical_device(
#define MargaretScoredPhysicalDevice_less_MargaretScoredPhysicalDevice(cap, cbp) ((cap)->score < (cbp)->score)
#include "../../../gen/l1/eve/margaret/VecAndSpan_MargaretScoredPhysicalDevice.h"
#include "../../../gen/l1/eve/margaret/VecMargaretScoredPhysicalDevice.h"
VecMargaretScoredPhysicalDevice margaret_get_physical_devices_scored(
VkInstance instance, VkSurfaceKHR surface,
@ -620,7 +633,7 @@ VecMargaretScoredPhysicalDevice margaret_get_physical_devices_scored(
favourite_word, forbidden_word, sane_image_extent_limit
);
}
MutSpanMargaretScoredPhysicalDevice_sort(VecMargaretScoredPhysicalDevice_to_mspan(&scored_devices));
VecMargaretScoredPhysicalDevice_sort(&scored_devices);
VecVkPhysicalDevice_drop(physical_devices);
return scored_devices;
}
@ -735,6 +748,7 @@ VecVkFramebuffer margaret_create_swapchain_framebuffers(
VecVkFramebuffer swapchain_framebuffers = VecVkFramebuffer_new_zeroinit(swapchain_image_views->len);
for (uint32_t i = 0; i < swapchain_image_views->len; i++) {
VkImageView attachments[1] = {*VecVkImageView_at(swapchain_image_views, i)};
printf("CREATING FRAMEBUFFER: %u %u\n", image_extent.width, image_extent.height);
VkFramebufferCreateInfo framebuffer_crinfo = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.renderPass = render_pass,
@ -820,34 +834,19 @@ void MargaretSwapchainBundle_drop_with_device(VkDevice device, MargaretSwapchain
VkShaderModule margaret_VkShaderModule_new(VkDevice device, VecU8 code) {
if (code.len < 4)
abortf("Kill yourself, please\n");
VkShaderModuleCreateInfo shad_mod_crinfo = {
VkShaderModule shad_module;
check(vkCreateShaderModule(device, &(VkShaderModuleCreateInfo){
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.codeSize = code.len,
// Now this is funny, we can't put arbitrary byte-string here, it should be 4-byte aligned
// Thanks goodness all the strings in VecU8 are allocated with calloc, which gives high alignment to
// virtually everything
.pCode = (const uint32_t*)code.buf
};
VkShaderModule shad_module;
if (vkCreateShaderModule(device, &shad_mod_crinfo, NULL, &shad_module) != VK_SUCCESS)
abortf("vkCreateShaderModule\n");
}, NULL, &shad_module) == VK_SUCCESS);
VecU8_drop(code);
return shad_module;
}
VkPipelineShaderStageCreateInfo margaret_shader_stage_vertex_crinfo(VkShaderModule module) {
return (VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .module = module,
.stage = VK_SHADER_STAGE_VERTEX_BIT, .pName = "main",
};
}
VkPipelineShaderStageCreateInfo margaret_shader_stage_fragment_crinfo(VkShaderModule module) {
return (VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .module = module,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT, .pName = "main",
};
}
VkCommandPool margaret_create_resettable_command_pool(VkDevice device, uint32_t wanted_queue_family) {
VkCommandPoolCreateInfo crinfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
@ -909,334 +908,21 @@ VkDeviceSize margaret_align_start_of_buffer(VkDeviceSize was, VkDeviceSize align
return was % alignment ? (was + alignment - was % alignment) : was;
}
// We first specify the necessary fields `sz`, `usage` and then Snow White creation function fills `
// Used in autogenerated code
typedef struct {
// necessary
VkDeviceSize sz;
VkBufferUsageFlags usage;
// filled
VkDeviceSize offset;
VkBuffer buffer;
} MargaretBufferInMemoryInfo;
typedef MargaretBufferInMemoryInfo* PtrMargaretBufferInMemoryInfo;
// Used in autogenerated code
typedef struct {
// necessary
uint32_t width;
uint32_t height;
VkFormat format;
VkImageUsageFlags usage;
// filled
VkDeviceSize offset;
VkImage image;
} MargaretImageInMemoryInfo;
typedef MargaretImageInMemoryInfo* PtrMargaretImageInMemoryInfo;
#include "../../../gen/l1/eve/margaret/VecMargaretBufferInMemoryInfo.h"
#include "../../../gen/l1/eve/margaret/VecAndSpan_PtrMargaretBufferInMemoryInfo.h"
#include "../../../gen/l1/eve/margaret/VecMargaretImageInMemoryInfo.h"
#include "../../../gen/l1/eve/margaret/VecAndSpan_PtrMargaretImageInMemoryInfo.h"
// A handy function to initialize buffers and images (attaching them to allocated memory)
VkDeviceMemory margaret_initialize_buffers_and_images(
VkPhysicalDevice physical_device, VkDevice device,
MutSpanPtrMargaretBufferInMemoryInfo buffer_hands, MutSpanPtrMargaretImageInMemoryInfo image_hands,
VkMemoryPropertyFlags properties
) {
uint32_t memory_types_allowed = -1;
VkDeviceSize offset = 0;
for (size_t i = 0; i < buffer_hands.len; i++) {
MargaretBufferInMemoryInfo* buf_hand = *MutSpanPtrMargaretBufferInMemoryInfo_at(buffer_hands, i);
VkBufferCreateInfo create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = buf_hand->sz,
.usage = buf_hand->usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
};
if (vkCreateBuffer(device, &create_info, NULL, &buf_hand->buffer) != VK_SUCCESS) {
abortf("vkCreateBuffer");
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(device, buf_hand->buffer, &memory_requirements);
memory_types_allowed &= memory_requirements.memoryTypeBits;
offset = margaret_align_start_of_buffer(offset, memory_requirements.alignment);
buf_hand->offset = offset;
offset = offset + memory_requirements.size;
}
for (size_t i = 0; i < image_hands.len; i++) {
MargaretImageInMemoryInfo* img_hand = *MutSpanPtrMargaretImageInMemoryInfo_at(image_hands, i);
VkImageCreateInfo crinfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = VK_IMAGE_TYPE_2D,
.format = img_hand->format,
.extent = (VkExtent3D){
.width = img_hand->width,
.height = img_hand->height,
.depth = 1,
},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = img_hand->usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
};
if (vkCreateImage(device, &crinfo, NULL, &img_hand->image) != VK_SUCCESS)
abortf("vkCreateImage");
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(device, img_hand->image, &memory_requirements);
memory_types_allowed &= memory_requirements.memoryTypeBits;
offset = margaret_align_start_of_buffer(offset, memory_requirements.alignment);
img_hand->offset = offset;
offset = offset + memory_requirements.size;
}
VkMemoryAllocateInfo alloc_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = offset,
.memoryTypeIndex = margaret_find_memory_type (physical_device, memory_types_allowed, properties),
};
VkDeviceMemory memory;
if (vkAllocateMemory(device, &alloc_info, NULL, &memory) != VK_SUCCESS) {
abortf("Having trouble allocating %lu bytes with memory type %u\n", alloc_info.allocationSize, alloc_info.memoryTypeIndex);
}
for (size_t i = 0; i < buffer_hands.len; i++) {
MargaretBufferInMemoryInfo* buf_hand = *MutSpanPtrMargaretBufferInMemoryInfo_at(buffer_hands, i);
if (vkBindBufferMemory(device, buf_hand->buffer, memory, buf_hand->offset) != VK_SUCCESS)
abortf("vkBindBufferMemory");
}
for (size_t i = 0; i < image_hands.len; i++) {
MargaretImageInMemoryInfo* img_hand = *MutSpanPtrMargaretImageInMemoryInfo_at(image_hands, i);
if (vkBindImageMemory(device, img_hand->image, memory, img_hand->offset) != VK_SUCCESS)
abortf("vkBindImageMemory");
}
return memory;
}
#define margaret_prep_buffer_mem_info_of_gpu_vbo_Definition(TV) \
MargaretBufferInMemoryInfo TV##_buffer_crinfo_of_gpu_vbo(size_t n) { \
return (MargaretBufferInMemoryInfo){ \
.sz = sizeof(TV) * n, \
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT \
}; \
}
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ebo(size_t n) {
return (MargaretBufferInMemoryInfo){ .sz = sizeof(uint32_t) * n,
.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT };
}
// Not very useful (but I used it anyway)
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_small_local_ubo(size_t struct_sz) {
return (MargaretBufferInMemoryInfo){ .sz = struct_sz, .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT };
}
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_srgba(uint32_t w, uint32_t h) {
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_SRGB,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_8(uint32_t w, uint32_t h){
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8_UNORM,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_gpu_texture_unorm_32(uint32_t w, uint32_t h) {
return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_UNORM,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT };
}
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_zbuffer(uint32_t max_width, uint32_t max_height, VkFormat zbuf_format) {
return (MargaretImageInMemoryInfo){ .width = max_width, .height = max_height, .format = zbuf_format,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT };
}
/* Used both for sampling and as a color attachment */
MargaretImageInMemoryInfo margaret_prep_image_mem_info_of_colorbuffer(U32 width, U32 height, VkFormat format) {
return (MargaretImageInMemoryInfo){.width = width, .height = height, .format = format,
.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT};
}
MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ubo(size_t struct_sz) {
return (MargaretBufferInMemoryInfo){ .sz = struct_sz,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT };
}
// Crutch for vulkan
VkCommandBuffer margaret_alloc_and_begin_single_use_command_buffer(VkDevice device, VkCommandPool command_pool) {
VkCommandBuffer command_buffers[1];
VkCommandBufferAllocateInfo alloc_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandPool = command_pool,
.commandBufferCount = ARRAY_SIZE(command_buffers),
};
if (vkAllocateCommandBuffers(device, &alloc_info, command_buffers) != VK_SUCCESS)
abortf("vkAllocateCommandBuffers");
VkCommandBuffer copying_command_buffer = command_buffers[0];
VkCommandBufferBeginInfo beginfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
};
if (vkBeginCommandBuffer(copying_command_buffer, &beginfo) != VK_SUCCESS)
abortf("vkBeginCommandBuffer");
return command_buffers[0];
}
void margaret_end_and_submit_and_free_command_buffer(
VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue,
VkCommandBuffer cmd_buffer
) {
if (vkEndCommandBuffer(cmd_buffer) != VK_SUCCESS)
abortf("vkEndCommandBuffer");
VkSubmitInfo submits_info[1] = {(VkSubmitInfo){
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = &cmd_buffer,
}};
if (vkQueueSubmit(graphics_queue, ARRAY_SIZE(submits_info), submits_info, VK_NULL_HANDLE) != VK_SUCCESS)
abortf("vkQueueSubmit");
if (vkQueueWaitIdle(graphics_queue) != VK_SUCCESS)
abortf("vkQueueWaitIdle");
vkFreeCommandBuffers(device, command_pool, 1, &cmd_buffer);
}
typedef struct {
size_t host_mem_buff_offset;
const MargaretImageInMemoryInfo* dst_image;
} MargaretCommandForImageCopying;
#include "../../../gen/l1/eve/margaret/VecAndSpan_MargaretCommandForImageCopying.h"
#include "../../../gen/l1/vulkan/VecVkImageMemoryBarrier.h"
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void margaret_rerecord_cmd_buff_for_texture_init (
VkCommandBuffer command_buffer, VkBuffer host_mem_buffer,
SpanMargaretCommandForImageCopying commands,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask
){
if (vkResetCommandBuffer(command_buffer, 0) != VK_SUCCESS)
abortf("vkResetCommandBuffer\n");
VkCommandBufferBeginInfo begin_info = {.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,};
if (vkBeginCommandBuffer(command_buffer, &begin_info) != VK_SUCCESS)
abortf("vkBeginCommandBuffer\n");
VecVkImageMemoryBarrier barriers = VecVkImageMemoryBarrier_new_reserved(commands.len);
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = img.dst_image->image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0, /* Flags */
0, NULL, 0, NULL,
barriers.len, barriers.buf);
barriers.len = 0; /* It's ok, VkImageMemoryBarrier is primitive */
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VkBufferImageCopy region = {
.bufferOffset = img.host_mem_buff_offset,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = {
.width = img.dst_image->width,
.height = img.dst_image->height,
.depth = 1
},
};
vkCmdCopyBufferToImage(command_buffer, host_mem_buffer, img.dst_image->image,
// We assume that image was already transitioned to optimal layout transition_image_layout
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
/* filling buffers Vec again */
for (size_t i = 0; i < commands.len; i++) {
MargaretCommandForImageCopying img = commands.data[i];
VecVkImageMemoryBarrier_append(&barriers, (VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = img.dst_image->image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
});
}
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0, /* Flags */
0, NULL, 0, NULL,
barriers.len, barriers.buf
);
VecVkImageMemoryBarrier_drop(barriers);
if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS)
abortf("vkEndCommandBuffer");
}
// For texture
VkImageView margaret_create_view_for_image (
VkDevice device, const MargaretImageInMemoryInfo* image, VkImageAspectFlags aspect_flags
) {
VkImageViewCreateInfo crinfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.image = image->image,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = image->format,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = aspect_flags,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
VkDevice device, VkImage image, VkFormat format, VkImageAspectFlags aspect_flags
){
VkImageView view;
if (vkCreateImageView(device, &crinfo, NULL, &view) != VK_SUCCESS)
abortf("vkCreateImageView");
check(vkCreateImageView(device, &(VkImageViewCreateInfo){
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.image = image,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = format,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = aspect_flags, .baseMipLevel = 0, .levelCount = 1,
.baseArrayLayer = 0, .layerCount = 1,
},
}, NULL, &view) == VK_SUCCESS)
return view;
}
@ -1246,11 +932,12 @@ VkSampler margaret_create_sampler(VkPhysicalDevice physical_device, VkDevice dev
vkGetPhysicalDeviceProperties(physical_device, &physical_device_properties);
VkPhysicalDeviceFeatures physical_device_features;
vkGetPhysicalDeviceFeatures(physical_device, &physical_device_features);
VkSamplerCreateInfo crinfo = {
VkSampler sampler;
check(vkCreateSampler(device, &(VkSamplerCreateInfo){
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.magFilter = make_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST,
.minFilter = make_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST,
.mipmapMode = make_linear? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST,
.mipmapMode = make_linear ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
@ -1264,37 +951,37 @@ VkSampler margaret_create_sampler(VkPhysicalDevice physical_device, VkDevice dev
.maxLod = 0.f,
.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK,
.unnormalizedCoordinates = VK_FALSE,
};
VkSampler sampler;
if (vkCreateSampler(device, &crinfo, NULL, &sampler) != VK_SUCCESS)
abortf("vkCreateSampler");
}, NULL, &sampler) == VK_SUCCESS);
return sampler;
}
VkDescriptorPool margaret_create_descriptor_set_pool(VkDevice device,
uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t max_sets
VkDescriptorPool margaret_create_descriptor_set_pool(
VkDevice device, uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t max_sets
) {
VecVkDescriptorPoolSize sizes = VecVkDescriptorPoolSize_new_reserved(2);
if (ubo_descriptor_count > 0)
VecVkDescriptorPoolSize_append(&sizes, (VkDescriptorPoolSize){
VkDescriptorPoolSize sizes[2];
int sizes_c = 0;
if (ubo_descriptor_count > 0) {
sizes[sizes_c] = (VkDescriptorPoolSize){
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = ubo_descriptor_count
});
if (image_sampler_descriptor_count > 0)
VecVkDescriptorPoolSize_append(&sizes, (VkDescriptorPoolSize){
};
sizes_c++;
}
if (image_sampler_descriptor_count > 0) {
sizes[sizes_c] = (VkDescriptorPoolSize){
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = image_sampler_descriptor_count
});
VkDescriptorPoolCreateInfo crinfo = {
};
sizes_c++;
}
VkDescriptorPool descriptor_pool;
check(vkCreateDescriptorPool(device, &(VkDescriptorPoolCreateInfo){
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.maxSets = max_sets,
.poolSizeCount = sizes.len,
.pPoolSizes = sizes.buf,
};
VkDescriptorPool descriptor_pool;
if (vkCreateDescriptorPool(device, &crinfo, NULL, &descriptor_pool) != VK_SUCCESS)
abortf("vkCreateDescriptorPool");
VecVkDescriptorPoolSize_drop(sizes);
.poolSizeCount = sizes_c,
.pPoolSizes = sizes,
}, NULL, &descriptor_pool) == VK_SUCCESS)
return descriptor_pool;
}
@ -1311,4 +998,245 @@ VkDescriptorSet margaret_allocate_descriptor_set(VkDevice device, VkDescriptorPo
return descriptor_set;
}
/* Aborts on error */
void margaret_reset_and_begin_command_buffer(VkCommandBuffer command_buffer){
check(vkResetCommandBuffer(command_buffer, 0) == VK_SUCCESS);
check(vkBeginCommandBuffer(command_buffer,
&(VkCommandBufferBeginInfo){ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO }) == VK_SUCCESS);
}
/* Aborts on error */
void margaret_end_command_buffer(VkCommandBuffer command_buffer){
check(vkEndCommandBuffer(command_buffer) == VK_SUCCESS);
}
typedef struct {
VkPipelineLayout pipeline_layout;
VecU8 vertex_shader_code;
VecU8 geometry_shader_code;
VecU8 fragment_shader_code;
U32 vertexBindingDescriptionCount;
VkVertexInputBindingDescription* pVertexBindingDescriptions;
U32 vertexAttributeDescriptionCount;
VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
bool depthTestEnable;
bool depthWriteEnable;
bool blendEnable;
} MargaretMostImportantPipelineOptions;
VkPipeline margaret_create_triangle_pipeline_one_attachment(
VkDevice device, VkRenderPass render_pass, U32 renderpass_subpass,
MargaretMostImportantPipelineOptions op
){
VkPipelineShaderStageCreateInfo shader_modules[3] = {
(VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = margaret_VkShaderModule_new(device, op.vertex_shader_code),
.stage = VK_SHADER_STAGE_VERTEX_BIT, .pName = "main",
},
(VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = margaret_VkShaderModule_new(device, op.fragment_shader_code),
.stage = VK_SHADER_STAGE_FRAGMENT_BIT, .pName = "main",
},
};
U32 shader_modules_c = 2;
if (op.geometry_shader_code.len > 0) {
shader_modules[shader_modules_c] = (VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.module = margaret_VkShaderModule_new(device, op.geometry_shader_code),
.stage = VK_SHADER_STAGE_GEOMETRY_BIT, .pName = "main",
};
shader_modules_c++;
}
VkGraphicsPipelineCreateInfo pipeline_crinfo = {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.stageCount = shader_modules_c,
.pStages = shader_modules,
.pVertexInputState = &(VkPipelineVertexInputStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.vertexBindingDescriptionCount = op.vertexBindingDescriptionCount,
.pVertexBindingDescriptions = op.pVertexBindingDescriptions,
.vertexAttributeDescriptionCount = op.vertexAttributeDescriptionCount,
.pVertexAttributeDescriptions = op.pVertexAttributeDescriptions,
},
.pInputAssemblyState = &(VkPipelineInputAssemblyStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
.primitiveRestartEnable = VK_FALSE,
},
.pViewportState = &(VkPipelineViewportStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
// We are using dynamic viewport and scissors, that is why we do not attach viewport/scissor values
// when creating a rendering pipeline. We will do that later
.viewportCount = 1,
.scissorCount = 1,
},
.pRasterizationState = &(VkPipelineRasterizationStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.depthClampEnable = VK_FALSE,
.polygonMode = VK_POLYGON_MODE_FILL,
// .cullMode = VK_CULL_MODE_BACK_BIT,
.cullMode = VK_CULL_MODE_NONE,
.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
.depthBiasEnable = VK_FALSE,
.depthBiasConstantFactor = 0.0f,
.depthBiasClamp = 0.0f,
.depthBiasSlopeFactor = 0.0f,
.lineWidth = 1.0f,
},
.pMultisampleState = &(VkPipelineMultisampleStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.sampleShadingEnable = VK_FALSE,
.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
.minSampleShading = 1.0f,
.pSampleMask = NULL,
.alphaToCoverageEnable = VK_FALSE,
.alphaToOneEnable = VK_FALSE,
},
.pDepthStencilState = &(VkPipelineDepthStencilStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.depthTestEnable = op.depthTestEnable,
.depthWriteEnable = op.depthWriteEnable,
.depthCompareOp = VK_COMPARE_OP_LESS
},
.pColorBlendState = &(VkPipelineColorBlendStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.logicOpEnable = VK_FALSE,
.logicOp = VK_LOGIC_OP_COPY,
.attachmentCount = 1,
.pAttachments = &(VkPipelineColorBlendAttachmentState){
.blendEnable = op.blendEnable,
.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA,
.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
.colorBlendOp = VK_BLEND_OP_ADD,
.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO,
.alphaBlendOp = VK_BLEND_OP_ADD,
.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
},
},
.pDynamicState = &(VkPipelineDynamicStateCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.dynamicStateCount = 2,
.pDynamicStates = (VkDynamicState[]){VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR},
},
.layout = op.pipeline_layout,
.renderPass = render_pass,
.subpass = renderpass_subpass,
.basePipelineHandle = VK_NULL_HANDLE,
};
VkPipeline pipeline;
check(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipeline_crinfo, NULL, &pipeline) == VK_SUCCESS);
for (U32 i = 0; i < shader_modules_c; i++) {
vkDestroyShaderModule(device, shader_modules[i].module, NULL);
}
return pipeline;
}
#include "vulkan_memory.h"
// todo: move image copying function here
typedef struct {
VkDevice device;
VkPhysicalDevice physical_device;
VkCommandBuffer transfer_cmd_buffer;
MargaretImgAllocator* dev_local_images;
MargaretBufAllocator* dev_local_buffers;
MargaretBufAllocator* staging_buffers;
VkDescriptorPool descriptor_pool;
VkSampler linear_sampler;
VkSampler nearest_sampler;
} MargaretEngineReference;
void margaret_rec_cmd_copy_buffer(
VkCommandBuffer cmd_buf,
const MargaretSubbuf* src_allocation, U64 src_offset,
const MargaretSubbuf* dst_allocation, U64 dst_offset, U64 length){
vkCmdCopyBuffer(cmd_buf,
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
1, &(VkBufferCopy){
.srcOffset = src_allocation->start + src_offset, .dstOffset = dst_allocation->start + dst_offset,
.size = length});
}
void margaret_rec_cmd_copy_buffer_one_to_one_part(
VkCommandBuffer cmd_buf,
const MargaretSubbuf* src_allocation,
const MargaretSubbuf* dst_allocation, U64 offset, U64 length){
assert(offset + length <= src_allocation->len);
assert(offset + length <= dst_allocation->len);
vkCmdCopyBuffer(cmd_buf,
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
1, &(VkBufferCopy){
.srcOffset = src_allocation->start + offset, .dstOffset = dst_allocation->start + offset, .size = length});
}
void margaret_rec_cmd_copy_buffer_one_to_one(
VkCommandBuffer cmd_buf, const MargaretSubbuf* src_allocation, const MargaretSubbuf* dst_allocation){
U64 copying_len = MIN_U64(src_allocation->len, dst_allocation->len);
vkCmdCopyBuffer(cmd_buf,
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
1, &(VkBufferCopy){
.srcOffset = src_allocation->start, .dstOffset = dst_allocation->start, .size = copying_len});
}
/* (destination_stage_mask, destination_access_mask) are probably
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
void margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(
VkCommandBuffer cmd_buf, const MargaretSubbuf* src, MargaretImg* dst,
VkImageLayout dst_new_layout,
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask){
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
0 /* Flags */, 0, NULL, 0, NULL, 1, &(VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst->a.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
vkCmdCopyBufferToImage(cmd_buf, MargaretSubbuf_get_buffer(src), dst->a.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &(VkBufferImageCopy){
.bufferOffset = src->start,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = (VkImageSubresourceLayers){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = { .width = dst->width, .height = dst->height, .depth = 1 },
});
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
0 /* Flags */, 0, NULL, 0, NULL, 1, &(VkImageMemoryBarrier){
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
.dstAccessMask = destination_access_mask,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = dst_new_layout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = dst->a.image,
.subresourceRange = (VkImageSubresourceRange){
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
},
});
dst->current_layout = dst_new_layout;
}
#endif

View File

@ -91,5 +91,12 @@ vec3 marie_normal_from_tang_space_gradient(float delt_x, float delta_z) {
return (vec3){-delt_x * N, N, -delta_z * N};
}
mat4 marie_3d_scal_mat4(float scale){
return mat4_new(scale, 0, 0, 0,
0, scale, 0, 0,
0, 0, scale, 0,
0, 0, 0, 1);
}
#endif

View File

@ -361,7 +361,8 @@ void vkCmdCopyImage(
const VkImageCopy* pRegions);
#include "../../margaret/vulkan_memory_claire.h"
// #include "../../margaret/vulkan_memory_claire.h"
// #include "../../margaret/vulkan_me"
int main(){
return 0;

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@ -13,26 +13,33 @@
typedef struct {
vec3 pos;
vec2 tex;
} GenericMeshVertex;
#include "../../../../gen/l1/eve/r0/VecAndSpan_GenericMeshVertex.h"
} GenericMeshVertexInc;
#include "../../../../gen/l1/eve/r0/VecAndSpan_GenericMeshVertexInc.h"
typedef struct {
VecGenericMeshVertex vertices;
GenericMeshVertexInc base;
vec3 norm;
vec3 tang_U;
vec3 tang_V;
} GenericMeshVertex;
typedef struct {
VecGenericMeshVertexInc vertices;
VecU32 indexes;
} GenericMeshTopology;
void GenericMeshTopology_drop(GenericMeshTopology self) {
VecGenericMeshVertex_drop(self.vertices);
VecGenericMeshVertexInc_drop(self.vertices);
VecU32_drop(self.indexes);
}
GenericMeshTopology GenericMeshTopology_clone(const GenericMeshTopology* self) {
return (GenericMeshTopology){.vertices = VecGenericMeshVertex_clone(&self->vertices), .indexes = VecU32_clone(&self->indexes)};
return (GenericMeshTopology){.vertices = VecGenericMeshVertexInc_clone(&self->vertices), .indexes = VecU32_clone(&self->indexes)};
}
typedef struct {
GenericMeshTopology topology;
U32 max_instance_count;
VecU8 diffuse_texture_path;
VecU8 normal_texture_path;
VecU8 specular_texture_path;
@ -46,62 +53,61 @@ void GenericMeshInSceneTemplate_drop(GenericMeshInSceneTemplate self) {
}
GenericMeshInSceneTemplate GenericMeshInSceneTemplate_clone(const GenericMeshInSceneTemplate* self) {
return (GenericMeshInSceneTemplate){.topology = GenericMeshTopology_clone(&self->topology), .max_instance_count = self->max_instance_count};
return (GenericMeshInSceneTemplate){.topology = GenericMeshTopology_clone(&self->topology),
.diffuse_texture_path = VecU8_clone(&self->diffuse_texture_path),
.normal_texture_path = VecU8_clone(&self->normal_texture_path),
.specular_texture_path = VecU8_clone(&self->specular_texture_path)};
}
#include "../../../../gen/l1/eve/r0/VecGenericMeshInSceneTemplate.h"
typedef struct {
mat4 model_t;
} GenericMeshInstance;
#include "../../../../gen/l1/eve/r0/VecGenericMeshInstance.h"
} GenericMeshInstanceInc;
typedef struct {
GenericMeshInstanceInc base;
mat3 normal_t;
} GenericMeshInstance;
typedef struct {
vec3 pos;
vec3 normal;
} ShinyMeshVertex;
#include "../../../../gen/l1/eve/r0/VecAndSpan_ShinyMeshVertex.h"
} ShinyMeshVertexInc;
typedef struct {
VecShinyMeshVertex vertices;
ShinyMeshVertexInc base;
vec3 normal;
} ShinyMeshVertex;
#include "../../../../gen/l1/eve/r0/VecAndSpan_ShinyMeshVertexInc.h"
typedef struct {
VecShinyMeshVertexInc vertices;
VecU32 indexes;
} ShinyMeshTopology;
void ShinyMeshTopology_drop(ShinyMeshTopology self) {
VecShinyMeshVertex_drop(self.vertices);
VecShinyMeshVertexInc_drop(self.vertices);
VecU32_drop(self.indexes);
}
ShinyMeshTopology ShinyMeshTopology_clone(const ShinyMeshTopology* self) {
return (ShinyMeshTopology){.vertices = VecShinyMeshVertex_clone(&self->vertices), VecU32_clone(&self->indexes)};
return (ShinyMeshTopology){.vertices = VecShinyMeshVertexInc_clone(&self->vertices),
VecU32_clone(&self->indexes)};
}
typedef struct {
ShinyMeshTopology topology;
U32 max_instance_count;
} ShinyMeshInSceneTemplate;
#include "../../../../gen/l1/eve/r0/VecShinyMeshTopology.h"
void ShinyMeshInSceneTemplate_drop(ShinyMeshInSceneTemplate self) {
ShinyMeshTopology_drop(self.topology);
}
ShinyMeshInSceneTemplate ShinyMeshInSceneTemplate_clone(const ShinyMeshInSceneTemplate* self) {
return (ShinyMeshInSceneTemplate){.topology = ShinyMeshTopology_clone(&self->topology),
.max_instance_count = self->max_instance_count};
}
#include "../../../../gen/l1/eve/r0/VecShinyMeshInSceneTemplate.h"
typedef struct {
typedef struct{
mat4 model_t;
vec3 color_off;
vec3 color_on;
} ShinyMeshInstanceInc;
typedef struct {
ShinyMeshInstanceInc base;
mat3 normal_t;
} ShinyMeshInstance;
#include "../../../../gen/l1/eve/r0/VecShinyMeshInstance.h"
typedef struct {
vec2 win_scale;
@ -124,7 +130,6 @@ typedef struct {
float d;
char _padding_3[12];
} Pipeline0Spotlight;
#include "../../../../gen/l1/eve/r0/VecPipeline0Spotlight.h"
typedef struct {
vec3 pos;
@ -132,54 +137,18 @@ typedef struct {
vec3 color;
char _padding_1[4];
} Pipeline0PointLight;
#include "../../../../gen/l1/eve/r0/VecPipeline0PointLight.h"
typedef struct {
VecGenericMeshInSceneTemplate generic_models;
VecShinyMeshInSceneTemplate shiny_models;
size_t point_lights_max_count;
size_t spotlights_max_count;
VecShinyMeshTopology shiny_models;
} SceneTemplate;
void SceneTemplate_drop(SceneTemplate self) {
VecGenericMeshInSceneTemplate_drop(self.generic_models);
}
size_t SceneTemplate_get_space_needed_for_all_instance_attributes(const SceneTemplate* self) {
size_t s = 0;
for (size_t mi = 0; mi < self->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&self->generic_models, mi);
s += M->max_instance_count * sizeof(GenericMeshInstance);
}
for (size_t mi = 0; mi < self->shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&self->shiny_models, mi);
s += M->max_instance_count * sizeof(ShinyMeshInstance);
}
return s;
}
size_t SceneTemplate_get_space_needed_for_widest_state_transfer(const SceneTemplate* self) {
return self->point_lights_max_count * sizeof(Pipeline0PointLight) +
self->spotlights_max_count * sizeof(Pipeline0Spotlight) +
SceneTemplate_get_space_needed_for_all_instance_attributes(self);
}
size_t SceneTemplate_get_space_for_initial_model_topology_transfer(const SceneTemplate* self) {
size_t s = 0;
for (size_t mi = 0; mi < self->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* M = VecGenericMeshInSceneTemplate_at(&self->generic_models, mi);
s += M->topology.vertices.len * sizeof(GenericMeshVertex) + M->topology.indexes.len * sizeof(U32);
}
for (size_t mi = 0; mi < self->shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* M = VecShinyMeshInSceneTemplate_at(&self->shiny_models, mi);
s += M->topology.vertices.len * sizeof(ShinyMeshVertex) + M->topology.indexes.len * sizeof(U32);
}
return s;
}
#define pipeline_0_ubo_point_light_max_count 20
#define pipeline_0_ubo_spotlight_max_count 120
#define pipeline_0_ubo_point_light_max_count 120
#define pipeline_0_ubo_spotlight_max_count 20
typedef struct {
int point_light_count;
@ -189,9 +158,7 @@ typedef struct {
Pipeline0Spotlight spotlight_arr[pipeline_0_ubo_spotlight_max_count];
} Pipeline0UBO;
size_t GenericMeshTopology_get_space_needed_for_staging_buffer(const GenericMeshTopology* self) {
return MAX_U64(self->vertices.len * sizeof(GenericMeshVertex), self->indexes.len * sizeof(U32));
}
/* generating my cool textures2 */
void TextureDataR8_pixel_maxing(TextureDataR8* self, S32 x, S32 y, U8 val) {
if (x < 0 || y < 0 || (size_t)x >= self->width)
@ -225,52 +192,62 @@ GenericMeshTopology generate_one_fourth_of_a_cylinder(float w, float r, U32 k) {
assert(k >= 1);
const float a = M_PI_2f / (float)k;
const float l = 2 * r * sinf(M_PI_4f / (float)k);
const vec2 v0tex = {r / (2 * r + w), r / (2 * r + (float)k * l)};
const vec2 v1tex = {(r + w) / (2 * r + w), r / (2 * r + (float)k * l)};
const vec2 v2tex = {r / (2 * r + w), 2 * r / (2 * r + (float)k * l)};
const vec2 v3tex = {(r + w) / (2 * r + w), 2 * r / (2 * r + (float)k * l)};
VecGenericMeshVertex vertices = VecGenericMeshVertex_new_reserved(4 * k + 6);
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {0, 0, 0}, .tex = v0tex});
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {w, 0, 0}, .tex = v1tex});
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {0, r, 0}, .tex = v2tex});
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {w, r, 0}, .tex = v3tex});
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {0, 0, -r}, .tex = {r / (2 * r + w), 0}});
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {w, 0, -r}, .tex = {(r + w) / (2 * r + w), 0}});
for (U32 i = 1; i <= k; i++) {
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
float tex_width = 2 * r + w;
float tex_height = 2 * r + (float)k * l;
const vec2 v0tex = {r / tex_width, r / tex_height};
const vec2 v1tex = {(r + w) / tex_width, r / tex_height};
const vec2 v2tex = {r / tex_width, 2 * r / tex_height};
const vec2 v3tex = {(r + w) / tex_width, 2 * r / tex_height};
VecGenericMeshVertexInc vertices = VecGenericMeshVertexInc_new_reserved(8 + 4 * k + (k + 2) * 2);
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, 0}, .tex = v0tex});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, 0}, .tex = v1tex});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, r, 0}, .tex = v2tex});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, r, 0}, .tex = v3tex});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, 0}, .tex = v0tex});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, 0}, .tex = v1tex});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, -r}, .tex = {r / tex_width, 0}});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, -r}, .tex = {(r + w) / tex_width, 0}});
for (U32 i = 0; i < k; i++) {
for (int j = 0; j < 2; j++) {
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
.pos = {0, cosf(a * (float)(i + j)) * r, -sinf(a * (float)(i + j)) * r},
.tex = {v2tex.x, v2tex.y + (float)(i + j) * l / tex_height}
});
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
.pos = {w, cosf(a * (float)(i + j)) * r, -sinf(a * (float)(i + j)) * r},
.tex = {v3tex.x, v3tex.y + (float)(i + j) * l / tex_height}
});
}
}
assert(vertices.len == 8 + 4 * k);
for (U32 i = 0; i <= k; i++) {
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
.pos = {0, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
.tex = vec2_add_vec2(v0tex, (vec2){r / (2 * r + w) * -sinf(a * (float)i), r / (2*r + (float)k * l) * cosf(a * (float)i)})
.tex = (vec2){ (r - r *sinf(a * (float)i)) / tex_width, (r + r * cosf(a * (float)i)) / tex_height},
});
}
for (U32 i = 1; i <= k; i++) {
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, 0}, .tex = v0tex});
for (U32 i = 0; i <= k; i++) {
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
.pos = {w, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
.tex = vec2_add_vec2(v1tex, (vec2){r / (2 * r + w) * sinf(a * (float)i), r / (2*r + (float)k * l) * cosf(a * (float)i)})
.tex = (vec2){ (r + w + r * sinf(a * (float)i)) / tex_width, (r + r * cosf(a * (float)i)) / tex_height},
});
}
for (U32 i = 1; i <= k; i++) {
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
.pos = {0, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
.tex = {v2tex.x, v2tex.y + (float)i * l / (2*r + (float)k * l)}
});
}
for (U32 i = 1; i <= k; i++) {
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
.pos = {w, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
.tex = {v3tex.x, v3tex.y + (float)i * l / (2*r + (float)k * l)}
});
}
VecU32 indexes = VecU32_new_reserved(3*(2+2+2*k+2*k));
{
U32 _span_0[] = {5, 1, 0, 5, 0, 4, 1, 3, 0, 3, 2, 0};
VecU32_append_span(&indexes, (SpanU32){.data = _span_0, .len = ARRAY_SIZE(_span_0)});
}
for (U32 i = 1; i <= k; i++) {
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, 0}, .tex = v1tex});
assert(vertices.len == 8 + 4 * k + (k + 2) * 2);
VecU32 indexes = VecU32_new_reserved(3*(4+2*k+2*k));
U32 _span_0[] = {7, 5, 4, 7, 4, 6, 1, 3, 0, 3, 2, 0};
VecU32_append_span(&indexes, (SpanU32){.data = _span_0, .len = ARRAY_SIZE(_span_0)});
for (U32 i = 0; i < k; i++) {
U32 _span_1[] = {
0, i > 1 ? 5 + i - 1 : 2, 5 + i,
1, 5 + k + i, i > 1 ? 5 + k + i - 1 : 3,
i > 1 ? 5 + 2 * k + i - 1 : 2, i > 1 ? 5 + 3 * k + i - 1 : 3, 5 + 2 * k + i,
5 + 3 * k + i, 5 + 2 * k + i, i > 1 ? 5 + 3 * k + i - 1 : 3,
8 + 4 * k + k + 1, 8 + 4 * k + i, 8 + 4 * k + i + 1,
8 + 4 * k + 2 * k + 3, 8 + 4 * k + (k + 2) + i + 1, 8 + 4 * k + (k + 2) + i,
8 + 4 * i + 0, 8 + 4 * i + 1, 8 + 4 * i + 3,
8 + 4 * i + 0, 8 + 4 * i + 3, 8 + 4 * i + 2,
};
VecU32_append_span(&indexes, (SpanU32){.data = _span_1, .len = ARRAY_SIZE(_span_1)});
}
@ -496,8 +473,8 @@ cvec3 Bublazhuzhka_get_color(const Bublazhuzhka* self, vec2 v) {
return (cvec3){121 - p * 2, 30 + p, 65 - p};
}
cvec3 compress_normal_vec_into_norm_texel(vec3 n) {
return (cvec3){(U32)roundf(255 * (n.x + 1) / 2), (U32)roundf(255 * (n.y + 1) / 2), (U32)roundf(255 * (n.z + 1) / 2)};
cvec4 compress_normal_vec_into_norm_texel(vec3 n) {
return (cvec4){(U32)roundf(255 * (n.x + 1) / 2), (U32)roundf(255 * (n.y + 1) / 2), (U32)roundf(255 * (n.z + 1) / 2), 255};
}
@ -515,8 +492,7 @@ typedef struct {
void draw_polygon_on_normal_texture_smooth_param_surf_h_draw_cb(void* ug, S32 x, S32 y, vec4 attr) {
draw_polygon_on_normal_texture_smooth_param_surf_H_DrawGuest* g = ug;
vec3 normal = g->my_client.fn(g->my_client.guest, (vec2){attr.x, attr.y});
cvec3 ans = compress_normal_vec_into_norm_texel(normal);
*TextureDataR8G8B8A8_mat(g->tex, x, y) = (cvec4){ans.x, ans.y, ans.z, 255};
*TextureDataR8G8B8A8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel(normal);
}
void draw_polygon_on_normal_texture_smooth_param_surf(
@ -539,21 +515,27 @@ typedef struct {
} FnNormalVectorGenExaggParamCallback;
typedef struct {
TextureDataR8G8B8* tex;
TextureDataR8G8B8A8* tex;
FnNormalVectorGenExaggParamCallback my_client;
mat3 BNT_trans;
} draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest;
void draw_polygon_on_normal_texture_exaggerated_param_surf_draw_cb(void* ug, S32 x, S32 y, vec4 attr) {
draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest* g = ug;
vec3 normal = g->my_client.fn(g->my_client.guest, (vec3){attr.x, attr.y, attr.z});
*TextureDataR8G8B8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel(normal);
vec3 tang_normal = mat3_mul_vec3(g->BNT_trans, normal);
*TextureDataR8G8B8A8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel(tang_normal);
}
/* We can't derive texture coordinates from parameter space coordinates, you have to do it yourself */
/* We can't derive texture coordinates from parameter space coordinates, you have to do it yourself.
* Also, we have to convert normal vector in world space to normal space in tangent space.
* You specify an orthogonal basis of tangent space of that triangle: BNT - { tangent_U, normal vector tangent_ } */
void draw_polygon_on_normal_texture_nat_cords_exaggerated_param_surf(
TextureDataR8G8B8* tex, vec2 ta, vec2 tb, vec2 tc, vec3 pa, vec3 pb, vec3 pc, FnNormalVectorGenExaggParamCallback cb
TextureDataR8G8B8A8* tex, vec2 ta, vec2 tb, vec2 tc, vec3 pa, vec3 pb, vec3 pc, FnNormalVectorGenExaggParamCallback cb,
mat3 BNT
) {
draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest aboba = {.tex = tex, .my_client = cb};
draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest aboba = {.tex = tex, .my_client = cb,
.BNT_trans = mat3_transpose(BNT)};
marie_rasterize_triangle_with_attr(
(MariePlaneVertAttr){.pos = ta, .attr = {pa.x, pa.y, pa.z, 0} },
(MariePlaneVertAttr){.pos = tb, .attr = {pb.x, pb.y, pb.z, 0} },
@ -561,32 +543,33 @@ void draw_polygon_on_normal_texture_nat_cords_exaggerated_param_surf(
(FnMarieRasterizerCallback){draw_polygon_on_normal_texture_exaggerated_param_surf_draw_cb, (void*)&aboba});
}
// todo: add a version for that function with non-native coordinate system (on vertex) (like I did with absolutely flat surface)
// todo: also, maybe, add a function to derive BNT and do cool stuff with trop mat3x2
typedef struct {
TextureDataR8G8B8A8* tex;
cvec3 normal_compr;
} draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest;
void draw_polygon_on_normal_texture_absolutely_flat_h_draw_cb(void* ug, S32 x, S32 y, vec4 attr) {
draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest* g = ug;
*TextureDataR8G8B8A8_mat(g->tex, x, y) = (cvec4){g->normal_compr.x, g->normal_compr.y, g->normal_compr.z, 255};
*TextureDataR8G8B8A8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel((vec3){0, 1, 0});
}
void draw_polygon_on_normal_texture_nat_cords_absolutely_flat(TextureDataR8G8B8A8* tex,
vec2 ta, vec2 tb, vec2 tc, vec3 c_normal
vec2 ta, vec2 tb, vec2 tc
) {
draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest aboba = {tex, compress_normal_vec_into_norm_texel(c_normal)};
draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest aboba = {tex};
marie_rasterize_triangle_with_attr((MariePlaneVertAttr){.pos = ta}, (MariePlaneVertAttr){.pos = tb},
(MariePlaneVertAttr){.pos = tc}, (FnMarieRasterizerCallback){
.fn = draw_polygon_on_normal_texture_absolutely_flat_h_draw_cb, .guest = (void*)&aboba});
}
// todo: replace it with a "color everything in one color" function
void draw_polygon_on_normal_texture_absolutely_flat(TextureDataR8G8B8A8* tex,
vec2 pa, vec2 pb, vec2 pc, mat3x2 trop, vec3 c_normal
) {
vec2 pa, vec2 pb, vec2 pc, mat3x2 trop
) {
draw_polygon_on_normal_texture_nat_cords_absolutely_flat(tex, mat3x2_mul_vec3(trop, vec2_and_one(pa)),
mat3x2_mul_vec3(trop, vec2_and_one(pb)), mat3x2_mul_vec3(trop, vec2_and_one(pc)), c_normal);
mat3x2_mul_vec3(trop, vec2_and_one(pb)), mat3x2_mul_vec3(trop, vec2_and_one(pc)));
}
@ -597,21 +580,20 @@ typedef struct {
} FnHeightMapGradFlatSurfCallback;
typedef struct {
mat3 surf_orient;
FnHeightMapGradFlatSurfCallback my_client;
} draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest;
vec3 draw_polygon_on_normal_texture_flat_param_surf_h_draw_cb(void* ug, vec2 p) {
draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest* g = ug;
vec2 grad = g->my_client.fn(g->my_client.guest, p);
return mat3_mul_vec3(g->surf_orient, marie_normal_from_tang_space_gradient(grad.x, grad.y));
return marie_normal_from_tang_space_gradient(grad.x, grad.y); // todo: remove this cluster, while leaving only this function where it's nee
}
/* The simplest case of normal texture generation: for a smooth flat surface of a polygon */
void draw_polygon_on_normal_texture_flat_param_surf(TextureDataR8G8B8A8* tex, vec2 pa, vec2 pb, vec2 pc, mat3x2 trop,
mat3 surf_orient, FnHeightMapGradFlatSurfCallback height_map_cb
FnHeightMapGradFlatSurfCallback height_map_cb
) {
draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest aboba = {surf_orient, height_map_cb};
draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest aboba = {height_map_cb};
draw_polygon_on_normal_texture_smooth_param_surf(tex, pa, pb, pc, trop, (FnNormalVectorGenCallback){
.fn = draw_polygon_on_normal_texture_flat_param_surf_h_draw_cb, .guest = (void*)&aboba});
}
@ -721,34 +703,32 @@ TextureDataR8G8B8A8 generate_normal_tex_for_one_fourth_of_a_cylinder(float s_res
Bublazhuzhka crap_on_the_back_side = fill_rectangle_with_crap(w, r);
mat3x2 trop_back_side = {.x.x = cord_resol.x, .y.y = cord_resol.y, .z = vec2_mul_vec2((vec2){r, r}, cord_resol)};
mat3 orient_back_side = {.x = {1, 0, 0}, .y = {0, 0, 1}, .z = {0, 1, 0}};
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){w, 0}, (vec2){w, r}, trop_back_side, orient_back_side,
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){w, 0}, (vec2){w, r}, trop_back_side,
(FnHeightMapGradFlatSurfCallback){.fn = height_map_cb_that_uses_bublazhuzhka, .guest = &crap_on_the_back_side});
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){0, r}, (vec2){w, r}, trop_back_side, orient_back_side,
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){0, r}, (vec2){w, r}, trop_back_side,
(FnHeightMapGradFlatSurfCallback){.fn = height_map_cb_that_uses_bublazhuzhka, .guest = &crap_on_the_back_side});
Bublazhuzhka_drop(crap_on_the_back_side);
mat3x2 str = {.x.x = cord_resol.x, .y.y = cord_resol.y};
draw_polygon_on_normal_texture_absolutely_flat(&res, v0tex, v1tex, v4tex, str, (vec3){0, -1, 0});
draw_polygon_on_normal_texture_absolutely_flat(&res, v1tex, v4tex, v5tex, str, (vec3){0, -1, 0});
draw_polygon_on_normal_texture_absolutely_flat(&res, v0tex, v1tex, v4tex, str);
draw_polygon_on_normal_texture_absolutely_flat(&res, v1tex, v4tex, v5tex, str);
for (size_t i = 0; i < k; i++) {
vec2 A = {r - sinf((float)i * a) * r, r + cosf((float)i * a) * r};
vec2 B = {r - sinf((float)(i + 1) * a) * r, r + cosf((float)(i + 1) * a) * r};
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r, r}, str, (vec3){-1, 0, 0});
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r, r}, str);
}
for (size_t i = 0; i < k; i++) {
vec2 A = {r + w + sinf((float)i * a) * r, r + cosf((float)i * a) * r};
vec2 B = {r + w + sinf((float)(i + 1) * a) * r, r + cosf((float)(i + 1) * a) * r};
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r + w, r}, str, (vec3){1, 0, 0});
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r + w, r}, str);
}
for (size_t i = 0; i < k; i++) {
vec2 A = {r, 2 * r + (float)i * l};
vec2 B = {r + w, 2 * r + (float)i * l};
vec2 C = {r, 2 * r + (float)i * l + l};
vec2 D = {r + w, 2 * r + (float)i * l + l};
vec3 n = {0, cosf(a / 2 + a * (float)i), -sinf(a / 2 + a * (float)i)};
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, C, str, n);
draw_polygon_on_normal_texture_absolutely_flat(&res, D, B, C, str, n);
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, C, str);
draw_polygon_on_normal_texture_absolutely_flat(&res, D, B, C, str);
}
return res;
}
@ -756,39 +736,39 @@ TextureDataR8G8B8A8 generate_normal_tex_for_one_fourth_of_a_cylinder(float s_res
U32 quad_to_triangles_conv_arr[6] = {0, 1, 2, 0, 2, 3};
ShinyMeshTopology generate_shiny_cube(float r) {
ShinyMeshVertex vert[24] = {
{{+r, +r, +r}, {1, 0, 0}},
{{+r, -r, +r}, {1, 0, 0}},
{{+r, -r, -r}, {1, 0, 0}},
{{+r, +r, -r}, {1, 0, 0}},
ShinyMeshVertexInc vert[24] = {
{{+r, +r, +r}},
{{+r, -r, +r}},
{{+r, -r, -r}},
{{+r, +r, -r}},
{{-r, -r, -r}, {-1, 0, 0}},
{{-r, -r, +r}, {-1, 0, 0}},
{{-r, +r, +r}, {-1, 0, 0}},
{{-r, +r, -r}, {-1, 0, 0}},
{{-r, -r, -r}},
{{-r, -r, +r}},
{{-r, +r, +r}},
{{-r, +r, -r}},
{{+r, +r, +r}, {0, 1, 0}},
{{+r, +r, -r}, {0, 1, 0}},
{{-r, +r, -r}, {0, 1, 0}},
{{-r, +r, +r}, {0, 1, 0}},
{{+r, +r, +r}},
{{+r, +r, -r}},
{{-r, +r, -r}},
{{-r, +r, +r}},
{{-r, -r, -r}, {0, -1, 0}},
{{+r, -r, -r}, {0, -1, 0}},
{{+r, -r, +r}, {0, -1, 0}},
{{-r, -r, +r}, {0, -1, 0}},
{{-r, -r, -r}},
{{+r, -r, -r}},
{{+r, -r, +r}},
{{-r, -r, +r}},
{{+r, +r, +r}, {0, 0, 1}},
{{-r, +r, +r}, {0, 0, 1}},
{{-r, -r, +r}, {0, 0, 1}},
{{+r, -r, +r}, {0, 0, 1}},
{{+r, +r, +r}},
{{-r, +r, +r}},
{{-r, -r, +r}},
{{+r, -r, +r}},
{{-r, -r, -r}, {0, 0, -1}},
{{-r, +r, -r}, {0, 0, -1}},
{{+r, +r, -r}, {0, 0, -1}},
{{+r, -r, -r}, {0, 0, -1}},
{{-r, -r, -r}},
{{-r, +r, -r}},
{{+r, +r, -r}},
{{+r, -r, -r}},
};
VecShinyMeshVertex vertices_vec = VecShinyMeshVertex_new_zeroinit(24);
memcpy(vertices_vec.buf, vert, sizeof(vert));
VecShinyMeshVertexInc vertices_vec = VecShinyMeshVertexInc_from_span(
(SpanShinyMeshVertexInc){ .data = vert, .len = ARRAY_SIZE(vert) });
VecU32 indexes_vec = VecU32_new_reserved(36);
for (U32 f = 0; f < 6; f++) {
for (U32 j = 0; j < 6; j++)
@ -823,52 +803,8 @@ CubeVertOfFace CubeVertOfFace_next(CubeVertOfFace vert) {
return (CubeVertOfFace){vert.face, (vert.vert_on_it + 1) % 4};
}
ShinyMeshTopology generate_shiny_rhombicuboctahedron(float r) {
ShinyMeshTopology res = generate_shiny_cube(r);
for (int f = 0; f < 6; f++) {
vec3 growth = vec3_mul_scal((*VecShinyMeshVertex_at(&res.vertices, f * 4)).normal, M_SQRT1_2);
for (int i = 0; i < 4; i++) {
vec3* pos = &VecShinyMeshVertex_mat(&res.vertices, f * 4 + i)->pos;
*pos = vec3_add_vec3(*pos, growth);
}
}
for (int f = 0; f < 6; f++) {
for (int i = 0; i < 2; i++) {
CubeVertOfFace vof = {f, 2*i+(f%2)};
ShinyMeshVertex A = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(vof));
ShinyMeshVertex B = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_next(CubeVertOfFace_jump(vof))));
ShinyMeshVertex C = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_jump(vof)));
ShinyMeshVertex D = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_next(vof)));
vec3 norm = vec3_normalize(vec3_add_vec3(A.normal, B.normal));
ShinyMeshVertex quad_v[4] = {{A.pos, norm}, {B.pos, norm}, {C.pos, norm}, {D.pos, norm}};
size_t b = res.vertices.len;
VecShinyMeshVertex_append_span(&res.vertices, (SpanShinyMeshVertex){quad_v, ARRAY_SIZE(quad_v)});
for (U32 j = 0; j < 6; j++)
VecU32_append(&res.indexes, b + quad_to_triangles_conv_arr[j]);
}
}
for (int f = 0; f < 2; f++) {
for (int e = 0; e < 4; e++) {
CubeVertOfFace vof = {f, e};
ShinyMeshVertex A = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_next(vof)));
ShinyMeshVertex B = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_jump(vof)));
ShinyMeshVertex C = *VecShinyMeshVertex_at(&res.vertices,
CubeVertOfFace_to_vid(CubeVertOfFace_next(CubeVertOfFace_jump(CubeVertOfFace_next(vof)))));
vec3 norm = vec3_normalize(vec3_add_vec3(A.normal, vec3_add_vec3(B.normal, C.normal)));
ShinyMeshVertex ang_v[3] = {{A.pos, norm}, {B.pos, norm}, {C.pos, norm}};
size_t b = res.vertices.len;
VecShinyMeshVertex_append_span(&res.vertices, (SpanShinyMeshVertex){ang_v, ARRAY_SIZE(ang_v)});
for (int i = 0; i < 3; i++)
VecU32_append(&res.indexes, b + i);
}
}
return res;
}
GenericMeshInSceneTemplate GenericMeshInSceneTemplate_for_log(U32 w, U32 r, U32 k, U32 max_instance_count) {
GenericMeshInSceneTemplate GenericMeshInSceneTemplate_for_log(U32 w, U32 r, U32 k) {
return (GenericMeshInSceneTemplate){.topology = generate_one_fourth_of_a_cylinder((float)w, (float)r, k),
.max_instance_count = max_instance_count,
.diffuse_texture_path = VecU8_format("textures/log_%u_%u_%u_diffuse.png", w, r, k),
.normal_texture_path = VecU8_format("textures/log_%u_%u_%u_NORMAL.png", w, r, k),
.specular_texture_path = VecU8_format("textures/log_%u_%u_%u_specular.png", w, r, k),

View File

@ -3,53 +3,87 @@
#include "r0_assets.h"
/* No offset yet.
* Contains references to vulkan handlers for buffers */
#include "../../margaret/vulkan_utils.h"
#include "../../lucy/glyph_render.h"
typedef struct {
U64 count;
MargaretSubbuf staging_busy;
MargaretSubbuf staging_updatable;
MargaretSubbuf device_local;
U64 cap;
// todo: delete this crap. This crap turned out to be completely useless. It is another one of my very very dumb ideas
// todo: remove updatable buffer, fill staging buffer in main thread
} PatriciaBuf;
void PatriciaBuf_swap_staging(PatriciaBuf* self){
MargaretSubbuf t = self->staging_updatable;
self->staging_updatable = self->staging_busy;
self->staging_busy = t;
}
typedef struct {
VkBuffer vbo;
VkBuffer ebo;
size_t indexes;
VkBuffer instance_attr_buf;
VkDeviceSize instance_attr_buf_offset;
U32 limit_max_instance_count;
VkImage diffuse_texture;
VkImage normal_texture;
VkImage specular_texture;
MargaretSubbuf staging_vbo;
MargaretSubbuf staging_ebo;
// todo: replace TextureDataXXX with MargaretPngPromises
TextureDataR8G8B8A8 pixels_diffuse;
TextureDataR8G8B8A8 pixels_normal;
TextureDataR8 pixels_specular;
MargaretSubbuf staging_diffuse_tex_buf;
MargaretSubbuf staging_normal_tex_buf;
MargaretSubbuf staging_specular_tex_buf;
MargaretSubbuf vbo;
MargaretSubbuf ebo;
PatriciaBuf instance_attr;
// todo: store dimensions of these images
MargaretImg diffuse_texture;
MargaretImg normal_texture;
MargaretImg specular_texture;
} GenericModelOnSceneMem;
/* Contains both data for model instances attributes and buffer (+offset) where it is stored */
/* Also, I made it non-clonable. Thus */
typedef struct {
GenericModelOnSceneMem model;
VecGenericMeshInstance instances;
} UsedGenericModelOnScene;
#include "../../../../gen/l1/eve/r0/VecGenericModelOnSceneMem.h"
void UsedGenericModelOnScene_drop(UsedGenericModelOnScene self) {
VecGenericMeshInstance_drop(self.instances);
void GenericModelOnSceneMem_set(GenericModelOnSceneMem* self, size_t instance, GenericMeshInstanceInc uncomp){
assert(instance < self->instance_attr.count);
GenericMeshInstance* staging = (GenericMeshInstance*)MargaretSubbuf_get_mapped(&self->instance_attr.staging_updatable);
staging[instance].base = uncomp;
mat4 tr_inv = mat4_transpose(mat4_inverse(uncomp.model_t));
staging[instance].normal_t = mat3_new(
tr_inv.x.x, tr_inv.y.x, tr_inv.z.x,
tr_inv.x.y, tr_inv.y.y, tr_inv.z.y,
tr_inv.x.z, tr_inv.y.z, tr_inv.z.z );
}
#include "../../../../gen/l1/eve/r0/VecUsedGenericModelOnScene.h"
typedef struct {
VkBuffer vbo;
VkBuffer ebo;
size_t indexes;
VkBuffer instance_attr_buf;
VkDeviceSize instance_attr_buf_offset;
U32 limit_max_instance_count;
MargaretSubbuf staging_vbo;
MargaretSubbuf staging_ebo;
MargaretSubbuf vbo;
MargaretSubbuf ebo;
PatriciaBuf instance_attr;
} ShinyModelOnSceneMem;
typedef struct {
ShinyModelOnSceneMem model;
VecShinyMeshInstance instances;
} UsedShinyModelOnScene;
#include "../../../../gen/l1/eve/r0/VecShinyModelOnSceneMem.h"
void UsedShinyModelOnScene_drop(UsedShinyModelOnScene self) {
VecShinyMeshInstance_drop(self.instances);
void ShinyModelOnSceneMem_set(ShinyModelOnSceneMem* self, size_t instance, ShinyMeshInstanceInc uncomp){
assert(instance < self->instance_attr.count);
ShinyMeshInstance* staging = (ShinyMeshInstance*)MargaretSubbuf_get_mapped(&self->instance_attr.staging_updatable);
staging[instance].base = uncomp;
mat4 tr_inv = mat4_transpose(mat4_inverse(uncomp.model_t));
staging[instance].normal_t = mat3_new(
tr_inv.x.x, tr_inv.y.x, tr_inv.z.x,
tr_inv.x.y, tr_inv.y.y, tr_inv.z.y,
tr_inv.x.z, tr_inv.y.z, tr_inv.z.z );
}
#include "../../../../gen/l1/eve/r0/VecUsedShinyModelOnScene.h"
typedef struct {
float fov;
mat3 cam_basis;
@ -87,7 +121,7 @@ void CamControlInfo_up(CamControlInfo* self, float fl) {
CamControlInfo CamControlInfo_new() {
return (CamControlInfo){
.fov = 1.5f, .cam_basis = marie_simple_camera_rot_m_basis_in_cols(0, 0, 0), .pos = {0, 0, 0},
.speed = 2.7f, .sensitivity = 0.5f * M_PIf / 180, .pitch_cap = M_PIf * 0.49f
.speed = 6.7f, .sensitivity = 0.5f * M_PIf / 180, .pitch_cap = M_PIf * 0.49f
};
}
@ -100,81 +134,195 @@ void CamControlInfo_update_direction(CamControlInfo* self, int win_width, int wi
self->cam_basis = marie_simple_camera_rot_m_basis_in_cols(yaw, pitch, 0);
}
typedef struct {
MargaretSubbuf staging_busy;
MargaretSubbuf staging_updatable;
MargaretSubbuf device_local;
} Pipeline0Transfer;
// Just for a test in r0
typedef struct {
mat3 rotation;
vec3 pos;
float scale;
vec3 color_on;
} ObjectInfo;
#include "../../../../gen/l1/eve/r0/VecObjectInfo.h"
/* Non copyable */
typedef struct {
VecUsedGenericModelOnScene generic_models;
VecUsedShinyModelOnScene shiny_models;
VecGenericModelOnSceneMem generic_models;
VecShinyModelOnSceneMem shiny_models;
VkClearColorValue color;
float gamma_correction_factor;
float hdr_factor;
float lsd_factor;
float anim_time; // A timer, passed to functions that push push constants
VecPipeline0Spotlight spotlights;
VecPipeline0PointLight point_lights;
/* point_light_vec_len and spotlight_vec_len are stored in staging (and also device local) buffers */
Pipeline0Transfer pipeline0_ubo;
CamControlInfo cam;
VecObjectInfo smeshnyavka_1;
VecObjectInfo smeshnyavka_3;
VecU8 text_on_screen;
} Scene;
Scene Scene_new() {
return (Scene){.generic_models = VecUsedGenericModelOnScene_new(), .shiny_models = VecUsedShinyModelOnScene_new(),
ShinyMeshInstanceInc ShinyMeshInstanceInc_from_ObjectInfo(const ObjectInfo* oi){
return (ShinyMeshInstanceInc){
.model_t = mat4_mul_mat4(marie_translation_mat4(oi->pos),
mat4_mul_mat4(marie_3d_scal_mat4(oi->scale), marie_mat3_to_mat4(oi->rotation))),
.color_on = oi->color_on, .color_off = {1, 0.4f, 0.5f}
};
}
// todo: remove this shit
void Scene_add_smeshnyavka_3(Scene* self, ObjectInfo oi){
ShinyModelOnSceneMem* model_sh = VecShinyModelOnSceneMem_mat(&self->shiny_models, 0);
size_t ni = self->smeshnyavka_3.len;
assert(ni < model_sh->instance_attr.cap);
VecObjectInfo_append(&self->smeshnyavka_3, oi);
model_sh->instance_attr.count = ni + 1;
ShinyModelOnSceneMem_set(model_sh, ni, ShinyMeshInstanceInc_from_ObjectInfo(&oi));
}
// todo: remove this shit (and rewrite everything in haskell)
void Scene_update_smeshnyavka_3(Scene* self, size_t sh_id){
assert(sh_id < self->smeshnyavka_3.len);
const ObjectInfo* oi = VecObjectInfo_at(&self->smeshnyavka_3, sh_id);
ShinyModelOnSceneMem* model_sh = VecShinyModelOnSceneMem_mat(&self->shiny_models, 0);
ShinyModelOnSceneMem_set(model_sh, sh_id, ShinyMeshInstanceInc_from_ObjectInfo(oi));
}
GenericMeshInstanceInc GenericMeshInstanceInc_from_ObjectInfo(const ObjectInfo* oi){
return (GenericMeshInstanceInc){
.model_t = mat4_mul_mat4(marie_translation_mat4(oi->pos),
mat4_mul_mat4(marie_3d_scal_mat4(oi->scale), marie_mat3_to_mat4(oi->rotation))),
};
}
// todo: remove this shit
void Scene_add_smeshnyavka_1(Scene* self, ObjectInfo oi){
GenericModelOnSceneMem* model = VecGenericModelOnSceneMem_mat(&self->generic_models, 0);
size_t ni = self->smeshnyavka_1.len;
assert(ni < model->instance_attr.cap);
VecObjectInfo_append(&self->smeshnyavka_1, oi);
model->instance_attr.count = ni + 1;
GenericModelOnSceneMem_set(model, ni, GenericMeshInstanceInc_from_ObjectInfo(&oi));
}
// todo: remove this shit
void Scene_update_smeshnyavka_1(Scene* self, size_t sh_id){
assert(sh_id < self->smeshnyavka_1.len);
const ObjectInfo* oi = VecObjectInfo_at(&self->smeshnyavka_1, sh_id);
GenericModelOnSceneMem* model = VecGenericModelOnSceneMem_mat(&self->generic_models, 0);
GenericModelOnSceneMem_set(model, sh_id, GenericMeshInstanceInc_from_ObjectInfo(oi));
}
Scene Scene_new(VecGenericModelOnSceneMem generic_models, VecShinyModelOnSceneMem shiny_models,
Pipeline0Transfer pipeline0_ubo) {
return (Scene){.generic_models = generic_models, .shiny_models = shiny_models,
.color = {.float32 = {0, 0, 0, 1}},
.gamma_correction_factor = 2.2f, .hdr_factor = 1, .lsd_factor = 0, .anim_time = 0,
.spotlights = VecPipeline0Spotlight_new(), .point_lights = VecPipeline0PointLight_new()
.pipeline0_ubo = pipeline0_ubo, .cam = CamControlInfo_new(),
.smeshnyavka_1 = VecObjectInfo_new(),
.smeshnyavka_3 = VecObjectInfo_new(), // todo: remove this shit and rewrite everything in haskell
.text_on_screen = VecU8_new(),
};
}
void Scene_drop(Scene self) {
VecUsedGenericModelOnScene_drop(self.generic_models);
VecUsedShinyModelOnScene_drop(self.shiny_models);
VecPipeline0Spotlight_drop(self.spotlights);
VecPipeline0PointLight_drop(self.point_lights);
VecGenericModelOnSceneMem_drop(self.generic_models);
VecShinyModelOnSceneMem_drop(self.shiny_models);
}
void SceneTemplate_copy_initial_model_topology_and_rerecord_transfer_cmd(
const SceneTemplate* scene_template, const Scene* scene, char* host_mem_buffer_mem,
VkCommandBuffer command_buffer, VkBuffer host_memory_buffer
) {
/* No buffer rerecording, no buffer beginning, no buffer ending */
void SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
const SceneTemplate* scene_template, const Scene* scene, VkCommandBuffer command_buffer) {
assert(scene_template->generic_models.len == scene->generic_models.len);
assert(scene_template->shiny_models.len == scene->shiny_models.len);
assert(scene_template->generic_models.len == scene->generic_models.len);
if (vkResetCommandBuffer(command_buffer, 0) != VK_SUCCESS)
abortf("vkResetCommandBuffer");
VkCommandBufferBeginInfo info_begin = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
if (vkBeginCommandBuffer(command_buffer, &info_begin) != VK_SUCCESS)
abortf("vkBeginCommandBuffer");
size_t offset = 0;
// todo: use BufferCopyCmd 2 (to perform all the copying in one command)
// todo: ot use one buffer per all the data
for (size_t mi = 0; mi < scene_template->generic_models.len; mi++) {
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
const GenericModelOnSceneMem *m_buf = &VecUsedGenericModelOnScene_at(&scene->generic_models, mi)->model;
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
memcpy(host_mem_buffer_mem + offset, mt->topology.vertices.buf, vbo_len);
VkBufferCopy ra = {.srcOffset = offset, .dstOffset = 0, .size = vbo_len};
vkCmdCopyBuffer(command_buffer, host_memory_buffer, m_buf->vbo, 1, &ra);
offset += vbo_len;
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
assert(mm->staging_vbo.len >= mt->topology.vertices.len * sizeof(GenericMeshVertex));
assert(mm->vbo.len >= mt->topology.vertices.len * sizeof(GenericMeshVertex));
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretSubbuf_get_mapped(&mm->staging_vbo);
for (U64 i = 0; i < mt->topology.vertices.len; i++) {
staging_vbo[i].base = mt->topology.vertices.buf[i];
}
assert(mt->topology.indexes.len % 3 == 0);
for (size_t ti = 0; ti * 3 < mt->topology.indexes.len; ti++) {
U32 v0 = mt->topology.indexes.buf[ti * 3 + 0];
U32 v1 = mt->topology.indexes.buf[ti * 3 + 1];
U32 v2 = mt->topology.indexes.buf[ti * 3 + 2];
const GenericMeshVertexInc* A0 = VecGenericMeshVertexInc_at(&mt->topology.vertices, v0);
const GenericMeshVertexInc* A1 = VecGenericMeshVertexInc_at(&mt->topology.vertices, v1);
const GenericMeshVertexInc* A2 = VecGenericMeshVertexInc_at(&mt->topology.vertices, v2);
vec3 dp1 = vec3_minus_vec3(A1->pos, A0->pos);
vec3 dp2 = vec3_minus_vec3(A2->pos, A0->pos);
float du1 = A1->tex.x - A0->tex.x;
float dv1 = A1->tex.y - A0->tex.y;
float du2 = A2->tex.x - A0->tex.x;
float dv2 = A2->tex.y - A0->tex.y;
vec3 norm = vec3_normalize(vec3_cross(dp1, dp2));
mat2x3 tang_U_V = mat3x2_transpose(mat2_mul_mat3x2(
mat2_inverse(mat2_new(du1, dv1, du2, dv2)),
mat2x3_transpose((mat2x3){.x = dp1, .y = dp2})
));
staging_vbo[v0].norm = staging_vbo[v1].norm = staging_vbo[v2].norm = norm;
staging_vbo[v0].tang_U = staging_vbo[v1].tang_U = staging_vbo[v2].tang_U = tang_U_V.x;
staging_vbo[v0].tang_V = staging_vbo[v1].tang_V = staging_vbo[v2].tang_V = tang_U_V.y;
}
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_vbo, &mm->vbo);
assert(mt->topology.indexes.len == mm->indexes);
size_t ebo_len = mt->topology.indexes.len * sizeof(U32);
memcpy(host_mem_buffer_mem + offset, mt->topology.indexes.buf, ebo_len);
VkBufferCopy rb = {.srcOffset = offset, .dstOffset = 0, .size = ebo_len};
vkCmdCopyBuffer(command_buffer, host_memory_buffer, m_buf->ebo, 1, &rb);
offset += ebo_len;
}
for (size_t mi = 0; mi < scene_template->shiny_models.len; mi++) {
const ShinyMeshInSceneTemplate* mt = VecShinyMeshInSceneTemplate_at(&scene_template->shiny_models, mi);
const ShinyModelOnSceneMem *m_buf = &VecUsedShinyModelOnScene_at(&scene->shiny_models, mi)->model;
size_t vbo_len = mt->topology.vertices.len * sizeof(ShinyMeshVertex);
memcpy(host_mem_buffer_mem + offset, mt->topology.vertices.buf, vbo_len);
VkBufferCopy ra = {.srcOffset = offset, .dstOffset = 0, .size = vbo_len};
vkCmdCopyBuffer(command_buffer, host_memory_buffer, m_buf->vbo, 1, &ra);
offset += vbo_len;
size_t ebo_len = mt->topology.indexes.len * sizeof(U32);
memcpy(host_mem_buffer_mem + offset, mt->topology.indexes.buf, ebo_len);
VkBufferCopy rb = {.srcOffset = offset, .dstOffset = 0, .size = ebo_len};
vkCmdCopyBuffer(command_buffer, host_memory_buffer, m_buf->ebo, 1, &rb);
offset += ebo_len;
assert(mm->ebo.len >= ebo_len);
U32* staging_ebo = (U32*)MargaretSubbuf_get_mapped(&mm->staging_ebo);
memcpy(staging_ebo, mt->topology.indexes.buf, ebo_len);
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_ebo, &mm->ebo);
}
if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS)
abortf("vkEndCommandBuffer");
for (size_t mi = 0; mi < scene_template->shiny_models.len; mi++) {
const ShinyMeshTopology* mt = VecShinyMeshTopology_at(&scene_template->shiny_models, mi);
const ShinyModelOnSceneMem *mm = VecShinyModelOnSceneMem_at(&scene->shiny_models, mi);
assert(mm->staging_vbo.len >= mt->vertices.len * sizeof(ShinyMeshVertex));
assert(mm->vbo.len >= mt->vertices.len * sizeof(ShinyMeshVertex));
ShinyMeshVertex* staging_vbo = (ShinyMeshVertex*)MargaretSubbuf_get_mapped(&mm->staging_vbo);
for (U64 i = 0; i < mt->vertices.len; i++) {
staging_vbo[i].base = mt->vertices.buf[i];
}
assert(mt->indexes.len % 3 == 0);
for (size_t ti = 0; ti * 3 < mt->indexes.len; ti++) {
U32 v0 = mt->indexes.buf[ti * 3 + 0];
U32 v1 = mt->indexes.buf[ti * 3 + 1];
U32 v2 = mt->indexes.buf[ti * 3 + 2];
vec3 p0 = VecShinyMeshVertexInc_at(&mt->vertices, v0)->pos;
vec3 p1 = VecShinyMeshVertexInc_at(&mt->vertices, v1)->pos;
vec3 p2 = VecShinyMeshVertexInc_at(&mt->vertices, v2)->pos;
vec3 norm = vec3_normalize(vec3_cross(vec3_minus_vec3(p1, p0), vec3_minus_vec3(p2, p0)));
staging_vbo[v0].normal = staging_vbo[v1].normal = staging_vbo[v2].normal = norm;
}
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_vbo, &mm->vbo);
assert(mt->indexes.len == mm->indexes);
size_t ebo_len = mt->indexes.len * sizeof(U32);
assert(mm->ebo.len >= ebo_len);
U32* staging_ebo = (U32*)MargaretSubbuf_get_mapped(&mm->staging_ebo);
memcpy(staging_ebo, mt->indexes.buf, ebo_len);
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_ebo, &mm->ebo);
}
}
#endif

View File

@ -1,6 +1,5 @@
#include "r0_assets.h"
#include "../../marie/rasterization.h"
// #include "../../margaret/png_pixel_masses.h" // todo: delete this file
#include "../../../../gen/l1/margaret/png_pixel_masses.h"
#include "../../marie/texture_processing.h"

View File

@ -1,9 +1,12 @@
#version 450
layout(location = 0) in vec2 fsin_tex;
layout(location = 1) in vec3 fsin_pos;
layout(location = 0) in vec3 tang_norm;
layout(location = 1) in vec3 tang_U;
layout(location = 2) in vec3 tang_V;
layout(location = 3) in vec2 tex;
layout(location = 4) in vec3 pos;
/* Righ now all in set 0 */
/* Right now all in set 0 */
layout(location = 0) out vec4 fin_color;
/* Yes, even these guys */
layout(binding = 1) uniform sampler2D color_tex;
@ -14,6 +17,11 @@ layout(push_constant, std430) uniform pc {
layout(offset = 64) vec3 camera_pos;
};
struct Pipeline0PointLight {
vec3 pos;
vec3 color;
};
struct Pipeline0Spotlight {
vec3 pos;
vec3 dir;
@ -21,16 +29,11 @@ struct Pipeline0Spotlight {
float range;
};
struct Pipeline0PointLight {
vec3 pos;
vec3 color;
};
layout(std140, binding = 0) uniform Pipeline0UBO {
int point_light_count;
int spotlight_count;
Pipeline0PointLight point_light_arr[20];
Pipeline0Spotlight spotlight_arr [120];
Pipeline0PointLight point_light_arr[120];
Pipeline0Spotlight spotlight_arr [20];
};
float get_intensity(float dist){
@ -38,18 +41,19 @@ float get_intensity(float dist){
}
void main(){
vec3 compressed_normal = texture(normal_map, fsin_tex).xyz;
vec3 norm = compressed_normal * 2 - 1;
vec3 compressed_normal = texture(normal_map, tex).xyz;
vec3 correct_norm_on_tang = compressed_normal * 2 - 1;
vec3 norm = normalize(mat3(tang_U, tang_norm, tang_V) * correct_norm_on_tang);
vec3 diffuse_illumination = vec3(0);
vec3 specular_illumination = vec3(0);
for (int i = 0; i < point_light_count; i++) {
Pipeline0PointLight lamp = point_light_arr[i];
vec3 to_light = -fsin_pos + lamp.pos;
vec3 to_light = -pos + lamp.pos;
float dist = length(to_light);
vec3 U = to_light / dist;
diffuse_illumination += get_intensity(dist) * max(0.02, dot(U, norm)) * lamp.color;
diffuse_illumination += get_intensity(dist) * max(0, dot(U, norm)) * lamp.color;
vec3 A = reflect(-U, norm);
vec3 to_cam = -fsin_pos+camera_pos;
vec3 to_cam = -pos+camera_pos;
float dist_to_cam = length(to_cam);
vec3 B = to_cam / dist_to_cam;
specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 32) * lamp.color;
@ -57,9 +61,8 @@ void main(){
for (int i = 0; i < spotlight_count; i++) {
Pipeline0Spotlight lamp = spotlight_arr[i];
}
vec3 natural_color = texture(color_tex, fsin_tex).xyz;
float specular_c = texture(specular_map, fsin_tex).x;
vec3 natural_color = texture(color_tex, tex).xyz;
float specular_c = texture(specular_map, tex).x;
vec3 color = natural_color * diffuse_illumination + specular_c * specular_illumination;
fin_color = vec4(color, 1);
// fin_color = vec4(specular_c, 0, 0, 1);
}

View File

@ -2,19 +2,31 @@
layout(location = 0) in vec3 pos;
layout(location = 1) in vec2 tex;
layout(location = 2) in mat4 model_t;
/* 2 <- 3, 4, 5 */
layout(location = 2) in vec3 norm;
layout(location = 3) in vec3 tang_U;
layout(location = 4) in vec3 tang_V;
layout(location = 0) out vec2 vsout_tex;
layout(location = 1) out vec3 vsout_pos;
layout(location = 5) in mat4 model_t;
/* 5 <- 6, 7, 8 */
layout(location = 9) in mat3 normal_t;
/* 9 <- 10, 11 */
layout(location = 0) out vec3 out_norm;
layout(location = 1) out vec3 out_tang_U;
layout(location = 2) out vec3 out_tang_V;
layout(location = 3) out vec2 out_tex;
layout(location = 4) out vec3 out_pos;
layout(push_constant, std430) uniform pc {
mat4 proj_cam_t;
};
void main(){
vsout_tex = tex;
out_norm = normalize(normal_t * norm);
out_tang_U = normalize(normal_t * tang_U);
out_tang_V = normalize(normal_t * tang_V);
out_tex = tex;
vec4 real_pos = model_t * vec4(pos, 1);
vsout_pos = real_pos.xyz;
out_pos = real_pos.xyz;
gl_Position = proj_cam_t * real_pos;
}

View File

@ -11,6 +11,11 @@ layout(push_constant, std430) uniform pc {
layout(offset = 64) vec3 camera_pos;
};
struct Pipeline0PointLight {
vec3 pos;
vec3 color;
};
struct Pipeline0Spotlight {
vec3 pos;
vec3 dir;
@ -18,16 +23,11 @@ struct Pipeline0Spotlight {
float range;
};
struct Pipeline0PointLight {
vec3 pos;
vec3 color;
};
layout(std140, binding = 0) uniform Pipeline0UBO {
int point_light_count;
int spotlight_count;
Pipeline0PointLight point_light_arr[20];
Pipeline0Spotlight spotlight_arr [120];
Pipeline0PointLight point_light_arr[120];
Pipeline0Spotlight spotlight_arr [20];
};
float get_intensity(float dist){
@ -42,15 +42,14 @@ void main(){
vec3 to_light = -pos + lamp.pos;
float dist = length(to_light);
vec3 U = to_light / dist;
diffuse_illumination += get_intensity(dist) * max(0.02, dot(U, norm)) * lamp.color;
diffuse_illumination += get_intensity(dist) * max(0, dot(U, norm)) * lamp.color;
vec3 A = reflect(-U, norm);
vec3 B = normalize(-pos+camera_pos);
specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 256) * lamp.color;
// specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 256) * lamp.color;
}
for (int i = 0; i < spotlight_count; i++) {
Pipeline0Spotlight lamp = spotlight_arr[i];
}
vec3 color = color_off * diffuse_illumination + 0.5 * specular_illumination + color_on;
fin_color = vec4(color, 1);
// fin_color = vec4(length(norm) / 2, 0, 0, 1);
}

View File

@ -4,9 +4,11 @@ layout(location = 0) in vec3 pos;
layout(location = 1) in vec3 normal;
layout(location = 2) in mat4 model_t;
/* 2 <- 3,4,5 */
/* 2 <- 3, 4, 5 */
layout(location = 6) in vec3 color_off;
layout(location = 7) in vec3 color_on;
layout(location = 8) in mat3 normal_t;
/* 8 <- 9, 10 */
layout(location = 0) out vec3 vsout_normal;
layout(location = 1) out vec3 vsout_color_off;
@ -18,7 +20,7 @@ layout(push_constant, std430) uniform pc {
};
void main(){
vsout_normal = normal;
vsout_normal = normalize(normal_t * normal);
vsout_color_off = color_off;
vsout_color_on = color_on;
vec4 real_pos = model_t * vec4(pos, 1);

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 91 KiB

View File

@ -1 +0,0 @@
#version 450

View File

@ -1 +0,0 @@
#version 450

View File

@ -1,68 +0,0 @@
/* We switch on the light */
#include "../../../l1_5/core/stringsearch.h"
#include "../../../l1_5/core/input_olproga.h"
#include "../../../../gen/l1/VecAndSpan_VecU64.h"
#include "../../../../gen/l1/VecAndSpan_U64.h"
// Aborts on errors
U64 fast_solution(U64 n, U64 a, U64 b, const VecU8* S){
VecVecU64 Z = VecVecU64_new();
for (size_t i = 0; i < n; i++) {
VecU64 z = z_function(VecU8_span(S, i, n - i));
VecVecU64_append(&Z, z);
}
VecU64 dp = VecU64_new_filled(n + 1, UINT64_MAX);
dp.buf[0] = 0;
for (size_t k = 0; k < n; k++) {
size_t score_here = dp.buf[k];
dp.buf[k + 1] = MIN_U64(dp.buf[k + 1], score_here + a);
size_t lets = 0;
for (size_t sss = 0; sss <= k; sss++) {
size_t before = k - sss;
size_t reach = Z.buf[sss].buf[before];
lets = MAX_U64(lets, MIN_U64(reach, before));
}
for (size_t after = 1; after <= lets; after++) {
dp.buf[k + after] = MIN_U64(dp.buf[k + after], score_here + b);
}
}
return dp.buf[n];
}
U64 correct_solution(U64 n, U64 a, U64 b, const VecU8* S){
VecU64 dp = VecU64_new_filled(n + 1, UINT64_MAX);
dp.buf[0] = 0;
for (size_t i = 1; i <= n; i++) {
dp.buf[i] = dp.buf[i - 1] + a;
for (size_t gb = 1; gb <= i; gb++) {
for (size_t start = 0; start + gb * 2 <= i; gb++) {
for (size_t j = 0; j < gb; j++) {
if (S->buf[start + j] != S->buf[i - gb + j])
goto incorrect;
}
/* It was correct */
dp.buf[i] = MIN_U64(dp.buf[i], dp.buf[i - gb] + b);
incorrect:
continue;
}
}
}
return dp.buf[n];
}
int main(){
U64 n = stdin_read_U64_nofail();
U64 a = stdin_read_U64_nofail();
U64 b = stdin_read_U64_nofail();
VecU8 S = stdin_read_VecU8_nospace();
assert(n != 0 && S.len == n);
assert(n <= 5000);
assert(a <= 5000 && b <= 5000);
printf("%lu\n", fast_solution(n, a, b, &S));
return 0;
}

View File

@ -1,304 +0,0 @@
/* Your death is near */
#include "../../../../gen/l1/VecAndSpan_U8.h"
#include "../../../../gen/l1/VecAndSpan_VecU8.h"
#include "../../../../gen/l1/VecAndSpan_VecU32.h"
#include "../../../../gen/l1/VecAndSpan_S64.h"
#include "../../../../gen/l1/VecAndSpan_U64.h"
// #include "../../../l1_5/core/input_olproga.h"
#include "../../../../gen/l1/OptionU64.h"
#define BUF_SIZE 4096
static unsigned char buf[BUF_SIZE];
static size_t pos = 0, sz = 0;
static int pushback = EOF;
int fast_getc() {
if (pushback != EOF) {
int c = pushback;
pushback = EOF;
return c;
}
if (pos == sz) {
sz = fread(buf, 1, BUF_SIZE, stdin);
pos = 0;
if (sz == 0) return EOF;
}
return (int)buf[pos++];
}
void fast_ungetc(int c) {
if (pushback != EOF) {
abort(); // Multiple pushbacks not supported (not needed in your functions)
}
pushback = c;
}
void stdin_skip_whitespaces() {
while (true) {
int ch = fast_getc();
if (ch == EOF)
return;
if (ch != ' ' && ch != '\t' && ch != '\n') {
fast_ungetc(ch);
break;
}
}
}
// Aborts on error
OptionU64 stdin_read_U64() {
stdin_skip_whitespaces();
U64 x = 0;
int i = 0;
for (;; i++) {
int ch = fast_getc();
if (ch == EOF)
break;
if (!('0' <= ch && ch <= '9')) {
fast_ungetc(ch);
break;
}
U64 d = (U64)(ch - '0');
if (x == 0 && i > 0)
abortf("Bad integer input\n");
if (x > UINT64_MAX / 10)
abortf("Integer input exceeds UINT64_MAX\n");
x *= 10;
if (x > UINT64_MAX - d)
abortf("Integer input exceeds UINT64_MAX\n");
x += d;
}
if (i > 0)
return Some_U64(x);
return None_U64();
}
/* If empty string is returned it means EOF was reached */
NODISCARD VecU8 stdin_read_VecU8_nospace() {
stdin_skip_whitespaces();
VecU8 str = VecU8_new();
while (true) {
int ch = fast_getc();
if (ch == EOF)
break;
if (ch == ' ' || ch == '\t' || ch == '\n') {
fast_ungetc(ch);
break;
}
VecU8_append(&str, ch);
}
return str;
}
// Aborts if non-integer input or EOF was encountered before my integer
U64 stdin_read_U64_nofail() {
OptionU64 x = stdin_read_U64();
if (x.variant == Option_None)
abortf("No number found\n");
return x.some;
}
typedef struct {
U64 suf_link;
U64 ans_up_link;
U64 transition[26];
S64 class_ref;
} I_FishNode;
I_FishNode I_FishNode_new(){
/* C++ is fucking lame*/
return (I_FishNode){.suf_link = 0, .ans_up_link = 0, .transition = {0}, .class_ref = -1};
// return (I_FishNode){.class_ref = -1};
}
#include "../../../../gen/l1/eve/r_alg/VecI_FishNode.h"
typedef struct {
VecS64 next_same_ref_on_strings;
VecI_FishNode nodes;
} Fish;
Fish incomplete_Fish_from_VecU8(SpanVecU8 S){
VecS64 next_same_ref_on_strings = VecS64_new_filled(S.len, -1);
VecI_FishNode nodes = VecI_FishNode_new_reserved(1000001);
VecI_FishNode_append(&nodes, I_FishNode_new());
assert(nodes.buf[0].suf_link == 0 && nodes.buf[0].ans_up_link == 0 && nodes.buf[0].transition[0] == 0);
for (size_t j = 0; j < S.len; j++) {
SpanU8 str = VecU8_to_span(&S.data[j]);
U64 cur = 0; /* cur trie node */
for (size_t i = 0; i < str.len; i++) {
U8 ch = str.data[i];
assert('a' <= ch && ch <= 'z');
U8 d = ch - 'a';
assert(d < 26);
assert(cur < nodes.len);
if (nodes.buf[cur].transition[d] == 0) {
U64 nid = nodes.len;
VecI_FishNode_append(&nodes, I_FishNode_new());
nodes.buf[cur].transition[d] = nid;
}
assert(nodes.buf[cur].transition[d] < nodes.len);
assert(nodes.buf[cur].transition[d] != 0);
cur = nodes.buf[cur].transition[d];
}
if (nodes.buf[cur].class_ref != -1) {
assert(next_same_ref_on_strings.buf[j] == -1);
next_same_ref_on_strings.buf[j] = nodes.buf[cur].class_ref;
}
nodes.buf[cur].class_ref = (S64)j;
}
return (Fish){.next_same_ref_on_strings=next_same_ref_on_strings, .nodes=nodes};
}
/* Debug function */
void Fish_debug_print(const Fish* self){
printf("next_same_ref_on_strings:\n");
for (size_t i = 0; i < self->next_same_ref_on_strings.len; i++)
printf("%3ld ", self->next_same_ref_on_strings.buf[i]);
printf("\n");
size_t nc = self->nodes.len;
for (size_t i = 0; i < nc; i++)
printf("=== ");
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("%3ld ", self->nodes.buf[i].class_ref);
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("--- ");
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("%3lu ", self->nodes.buf[i].suf_link);
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("--- ");
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("%3lu ", self->nodes.buf[i].ans_up_link);
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("--- ");
printf("\n");
for (U8 d = 0; d < 26; d++) {
for (size_t i = 0; i < nc; i++) {
printf("%3lu ", self->nodes.buf[i].transition[d]);
}
printf("\n");
}
}
void complete_Fish(Fish* fish){
/* trie unpacked */
size_t nc = fish->nodes.len;
assert(nc >= 1);
/* We first, fields suf_link, ans_up_link are filled with garbage */
// Except for root. Root is already almost initialized
assert(fish->nodes.buf[0].suf_link == 0 && fish->nodes.buf[0].ans_up_link == 0);
// Some transitions are already complete. Those that contain 0 are yet to be filled
// transitions to 0 can't occur naturally in Trie. (Incomplete Fish = Trie)
VecU64 bfs_cur = VecU64_new_zeroinit(1); /* Initialize with one node pointing to the */
VecU64 bfs_next = VecU64_new();
while (bfs_cur.len > 0) {
do {
U64 pu = VecU64_pop(&bfs_cur);
for (U8 d = 0; d < 26; d++) {
if (fish->nodes.buf[pu].transition[d] != 0) {
U64 u = fish->nodes.buf[pu].transition[d];
U64 u_suf_link = pu == 0 ? 0 : fish->nodes.buf[fish->nodes.buf[pu].suf_link].transition[d];
fish->nodes.buf[u].suf_link = u_suf_link;
if (fish->nodes.buf[u_suf_link].class_ref != -1) {
fish->nodes.buf[u].ans_up_link = u_suf_link;
} else {
fish->nodes.buf[u].ans_up_link = fish->nodes.buf[u_suf_link].ans_up_link;
}
// fish->nodes.buf[pu].transition[d] = u;
VecU64_append(&bfs_next, u);
} else if (pu == 0) {
fish->nodes.buf[pu].transition[d] = 0;
} else {
fish->nodes.buf[pu].transition[d] = fish->nodes.buf[fish->nodes.buf[pu].suf_link].transition[d];
}
}
} while (bfs_cur.len > 0);
VecU64 t = bfs_cur;
bfs_cur = bfs_next;
bfs_next = t;
}
VecU64_drop(bfs_cur);
VecU64_drop(bfs_next);
}
void Fish_drop(Fish self){
VecS64_drop(self.next_same_ref_on_strings);
VecI_FishNode_drop(self.nodes);
}
int main() {
// #ifndef RUNNING_HERE
freopen("inputik.txt", "r", stdin);
freopen("outputik.txt", "w", stdout);
// #endif
VecU8 T = stdin_read_VecU8_nospace();
U64 N = stdin_read_U64_nofail();
VecVecU8 S = VecVecU8_new_of_size(N);
for (size_t i = 0; i < N; i++) {
VecU8 s = stdin_read_VecU8_nospace();
assert(S.buf[i].buf == NULL);
S.buf[i] = s;
}
Fish fish = incomplete_Fish_from_VecU8(VecVecU8_to_span(&S));
// Fish_debug_print(&fish);
complete_Fish(&fish);
VecVecU32 answer = VecVecU32_new_of_size(N);
/* going through T to fill answer */
U64 fish_v = 0;
for (size_t i = 0;; i++) {
int VIBE_CHECK = 0; // Can be safely removed
U64 CUR = fish_v;
while (true) {
S64 j_in_class = fish.nodes.buf[CUR].class_ref;
if (j_in_class == -1) // Can be safely removed
VIBE_CHECK++; // Can be safely removed
while (j_in_class != -1) {
assert(j_in_class < (S64)N);
size_t slen = S.buf[j_in_class].len;
assert(slen <= i);
VecU32_append(&answer.buf[j_in_class], (U32)(i - slen));
j_in_class = fish.next_same_ref_on_strings.buf[j_in_class];
}
/* We give a root a chance to execute, yet this is where we stop */
if (CUR == 0)
break;
CUR = fish.nodes.buf[CUR].ans_up_link;
}
if (VIBE_CHECK > 2) // Can be safely removed
abort(); // Can be safely removed
if (i == T.len)
break;
U8 ch = T.buf[i];
assert('a' <= ch && ch <= 'z');
U8 d = ch - 'a';
assert(d < 26);
fish_v = fish.nodes.buf[fish_v].transition[d];
}
for (size_t i = 0; i < N; i++) {
printf("%lu", answer.buf[i].len);
for (size_t e = 0; e < answer.buf[i].len; e++) {
printf(" %u", answer.buf[i].buf[e] + 1);
}
printf("\n");
}
VecVecU8_drop(S);
VecVecU32_drop(answer);
Fish_drop(fish);
VecU8_drop(T);
return 0;
}

View File

@ -1,287 +0,0 @@
/* __You get millions of volts__ */
#include "../../../../gen/l1/VecAndSpan_U8.h"
#include "../../../../gen/l1/VecAndSpan_VecU8.h"
#include "../../../../gen/l1/VecAndSpan_S64.h"
#include "../../../../gen/l1/VecAndSpan_U64.h"
#include "../../../l1_5/core/input_olproga.h"
typedef struct {
U64 suf_link;
U64 ans_up_link;
U64 transition[26];
S64 class_ref;
} I_FishNode;
I_FishNode I_FishNode_new(){
/* C++ is fucking lame*/
return (I_FishNode){.suf_link = 0, .ans_up_link = 0, .transition = {0}, .class_ref = -1};
// return (I_FishNode){.class_ref = -1};
}
#include "../../../../gen/l1/eve/r_alg/VecI_FishNode.h"
typedef struct {
VecS64 next_same_ref_on_strings;
VecI_FishNode nodes;
} Fish;
Fish incomplete_Fish_from_VecU8(SpanVecU8 S){
VecS64 next_same_ref_on_strings = VecS64_new_filled(S.len, -1);
VecI_FishNode nodes = VecI_FishNode_new_reserved(1000001);
VecI_FishNode_append(&nodes, I_FishNode_new());
assert(nodes.buf[0].suf_link == 0 && nodes.buf[0].ans_up_link == 0 && nodes.buf[0].transition[0] == 0);
for (size_t j = 0; j < S.len; j++) {
SpanU8 str = VecU8_to_span(&S.data[j]);
U64 cur = 0; /* cur trie node */
for (size_t i = 0; i < str.len; i++) {
U8 ch = str.data[i];
assert('a' <= ch && ch <= 'z');
U8 d = ch - 'a';
assert(d < 26);
assert(cur < nodes.len);
if (nodes.buf[cur].transition[d] == 0) {
U64 nid = nodes.len;
VecI_FishNode_append(&nodes, I_FishNode_new());
nodes.buf[cur].transition[d] = nid;
}
assert(nodes.buf[cur].transition[d] < nodes.len);
assert(nodes.buf[cur].transition[d] != 0);
cur = nodes.buf[cur].transition[d];
}
if (nodes.buf[cur].class_ref != -1) {
assert(next_same_ref_on_strings.buf[j] == -1);
next_same_ref_on_strings.buf[j] = nodes.buf[cur].class_ref;
}
nodes.buf[cur].class_ref = (S64)j;
}
return (Fish){.next_same_ref_on_strings=next_same_ref_on_strings, .nodes=nodes};
}
/* Debug function */
void Fish_debug_print(const Fish* self){
printf("next_same_ref_on_strings:\n");
for (size_t i = 0; i < self->next_same_ref_on_strings.len; i++)
printf("%3ld ", self->next_same_ref_on_strings.buf[i]);
printf("\n");
size_t nc = self->nodes.len;
for (size_t i = 0; i < nc; i++)
printf("=== ");
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("%3ld ", self->nodes.buf[i].class_ref);
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("--- ");
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("%3lu ", self->nodes.buf[i].suf_link);
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("--- ");
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("%3lu ", self->nodes.buf[i].ans_up_link);
printf("\n");
for (size_t i = 0; i < nc; i++)
printf("--- ");
printf("\n");
for (U8 d = 0; d < 26; d++) {
for (size_t i = 0; i < nc; i++) {
printf("%3lu ", self->nodes.buf[i].transition[d]);
}
printf("\n");
}
}
void complete_Fish(Fish* fish){
/* trie unpacked */
size_t nc = fish->nodes.len;
assert(nc >= 1);
/* We first, fields suf_link, ans_up_link are filled with garbage */
// Except for root. Root is already almost initialized
assert(fish->nodes.buf[0].suf_link == 0 && fish->nodes.buf[0].ans_up_link == 0);
// Some transitions are already complete. Those that contain 0 are yet to be filled
// transitions to 0 can't occur naturally in Trie. (Incomplete Fish = Trie)
VecU64 bfs_cur = VecU64_new_zeroinit(1); /* Initialize with one node pointing to the */
VecU64 bfs_next = VecU64_new();
while (bfs_cur.len > 0) {
do {
U64 pu = VecU64_pop(&bfs_cur);
for (U8 d = 0; d < 26; d++) {
if (fish->nodes.buf[pu].transition[d] != 0) {
U64 u = fish->nodes.buf[pu].transition[d];
U64 u_suf_link = pu == 0 ? 0 : fish->nodes.buf[fish->nodes.buf[pu].suf_link].transition[d];
fish->nodes.buf[u].suf_link = u_suf_link;
if (fish->nodes.buf[u_suf_link].class_ref != -1) {
fish->nodes.buf[u].ans_up_link = u_suf_link;
} else {
fish->nodes.buf[u].ans_up_link = fish->nodes.buf[u_suf_link].ans_up_link;
}
// fish->nodes.buf[pu].transition[d] = u;
VecU64_append(&bfs_next, u);
} else if (pu == 0) {
fish->nodes.buf[pu].transition[d] = 0;
} else {
fish->nodes.buf[pu].transition[d] = fish->nodes.buf[fish->nodes.buf[pu].suf_link].transition[d];
}
}
} while (bfs_cur.len > 0);
VecU64 t = bfs_cur;
bfs_cur = bfs_next;
bfs_next = t;
}
VecU64_drop(bfs_cur);
VecU64_drop(bfs_next);
}
void Fish_drop(Fish self){
VecS64_drop(self.next_same_ref_on_strings);
VecI_FishNode_drop(self.nodes);
}
typedef struct {
U64 trans[26];
} J_AlphaVertex;
#include "../../../../gen/l1/eve/r_alg/VecJ_AlphaVertex.h"
typedef struct{
size_t only_trash_state;
size_t start;
VecJ_AlphaVertex states;
} AntiGraph;
void AntiGraph_drop(AntiGraph self){
VecJ_AlphaVertex_drop(self.states);
}
U64 Fish_anti_automaton_dfs(U64 fv, VecS64* map, VecJ_AlphaVertex* graph, const VecI_FishNode* fish){
assert(fv < map->len);
assert(fv < fish->len);
/* Priority number 1: check if it is terminal */
if (fish->buf[fv].class_ref != -1 || fish->buf[fish->buf[fv].ans_up_link].class_ref != -1)
return 0; // Trash vertex
if (map->buf[fv] == -1) {
size_t mid = graph->len;
map->buf[fv] = (S64)mid;
/* Right now it is filled with trash. The important point is that it marked as visited */
VecJ_AlphaVertex_append(graph, (J_AlphaVertex){0});
for (U8 d = 0; d < 26; d++) {
U64 nnon = Fish_anti_automaton_dfs(fish->buf[fv].transition[d], map, graph, fish);
graph->buf[mid].trans[d] = nnon;
}
}
return map->buf[fv];
}
AntiGraph Fish_anti_automaton(Fish fish){
size_t nc = fish.nodes.len;
assert(nc > 0);
/* starting with just trash vertex, it ppints to inself, zalooping on itself */
VecJ_AlphaVertex graph = VecJ_AlphaVertex_new_zeroinit(1);
VecS64 map = VecS64_new_filled(nc, -1);
U64 start = Fish_anti_automaton_dfs(0, &map, &graph, &fish.nodes);
Fish_drop(fish);
VecS64_drop(map);
return (AntiGraph){.only_trash_state = 0, .start = start, .states = graph};
}
#define MOD (1000000007)
NODISCARD VecU64 mat_mul(uint32_t N, const VecU64* A, const VecU64* B){
assert(A->len == N * N && B->len == N * N);
VecU64 C = VecU64_new_zeroinit(N * N);
for (size_t y = 0; y < N; y++) {
for (size_t x = 0; x < N; x++) {
size_t s = 0;
for (size_t k = 0; k < N; k++) {
s = (s + (A->buf[y * N + k] * B->buf[k * N + x]) % MOD) % MOD;
}
C.buf[y * N + x] = s;
}
}
return C;
}
NODISCARD VecU64 pow_matrix(uint32_t N, const VecU64* A, uint64_t B){
if (B == 0) {
VecU64 E = VecU64_new_zeroinit(N * N);
for (size_t k = 0; k < N; k++)
E.buf[N * k + k] = 1;
return E;
}
if (B == 1)
return VecU64_clone(A);
uint64_t b = B / 2;
VecU64 e = pow_matrix(N, A, b);
assert(e.len == N * N);
VecU64 E = mat_mul(N, &e, &e);
VecU64_drop(e); // e is no more
if (B % 2 == 0)
return E;
VecU64 F = mat_mul(N, &E, A);
VecU64_drop(E);
return F;
}
void VecU64_debug_print_cool_matrix(size_t N, const VecU64* matrix){
assert(matrix->len == N * N);
for (size_t y = 0; y < N; y++) {
for (size_t x = 0; x < N; x++)
printf(" %2lu", matrix->buf[y * N + x]);
printf("\n");
}
}
int main(){
U64 K = stdin_read_U64_nofail();
U64 m = stdin_read_U64_nofail();
VecVecU8 S = VecVecU8_new_of_size(m);
for (size_t i = 0; i < m; i++) {
U64 sn = stdin_read_U64_nofail();
if (sn == 0)
abort();
assert(S.buf[i].buf == NULL);
S.buf[i] = stdin_read_VecU8_nospace();
// assert(S.buf[i].len == sn);
}
// I_Trie_debug_print(&trie);
Fish fish = incomplete_Fish_from_VecU8(VecVecU8_to_span(&S));
complete_Fish(&fish);
// Fish_debug_print(&fish);
AntiGraph antigraph = Fish_anti_automaton(fish);
size_t N = antigraph.states.len;
assert(1 < N);
assert(0 == antigraph.only_trash_state);
VecU64 graph_matrix = VecU64_new_zeroinit(N * N);
for (size_t v = 0; v < N; v++) {
for (U8 d = 0; d < 26; d++) {
size_t t = antigraph.states.buf[v].trans[d];
assert(t < N);
graph_matrix.buf[t * N + v]++;
}
}
// VecU64_debug_print_cool_matrix(N, &graph_matrix);
VecU64 K_graph_matrix = pow_matrix(N, &graph_matrix, K);
U64 K_paths = 0;
for (size_t endpoint = 1; endpoint < N; endpoint++) {
K_paths = (K_paths + K_graph_matrix.buf[endpoint * N + antigraph.start]) % MOD;
}
printf("%lu\n", K_paths);
VecU64_drop(graph_matrix);
VecU64_drop(K_graph_matrix);
AntiGraph_drop(antigraph);
VecVecU8_drop(S);
}

View File

@ -1,8 +0,0 @@
#ifndef PROTOTYPE1_SRC_L3_FUN_MACHINE_BUBINA_H
#define PROTOTYPE1_SRC_L3_FUN_MACHINE_BUBINA_H
typedef struct {
} BubinaState;
#endif

View File

@ -1,185 +0,0 @@
#ifndef PROTOTYPE1_SRC_L3_FUN_MACHINE_STATE_H
#define PROTOTYPE1_SRC_L3_FUN_MACHINE_STATE_H
#include "../../../gen/l1/VecAndSpan_U8.h"
#include "../../../gen/l1/VecAndSpan_U16.h"
// todo: recheck this structure
const U8 FunMachine_LRU_states[24][4] ={
/* x = 0 1 2 3 */
{ 3, 0, 8, 16}, /* state 0 : (1 2 3 0) LRU=0 */
{ 1, 11, 9, 17}, /* state 1 : (0 2 3 1) LRU=1 */
{ 2, 10, 19, 18}, /* state 2 : (0 1 3 2) LRU=2 */
{ 3, 11, 19, 18}, /* state 3 : (0 1 2 3) LRU=3 */
{ 2, 4, 8, 16},
{ 5, 10, 9, 17},
{ 6, 10, 9, 18},
{ 7, 11, 19, 17},
{ 7, 0, 8, 20},
{ 1, 15, 9, 21},
{ 2, 10, 23, 22},
{ 3, 11, 23, 22},
{ 1, 0, 12, 20},
{ 1, 0, 13, 21},
{ 2, 14, 8, 22},
{ 3, 15, 23, 16},
{ 6, 4, 12, 16},
{ 5, 14, 13, 17},
{ 6, 14, 13, 18},
{ 7, 15, 19, 21},
{ 5, 4, 12, 20},
{ 5, 4, 13, 21},
{ 6, 14, 12, 22},
{ 7, 15, 23, 20}
};
#define FunMachine_levers_count 16
#define FunMachine_keys_count 50
#define FunMachine_cache_banks_count 4
#define FunMachine_cache_sets_count 32
#define FunMachine_cache_sets_pow 5
#define FunMachine_cache_line_size 16
#define FunMachine_cache_line_pow 4
#define FunMachine_disk_drives 2
#define FunMachine_disk_io_block_pow 7
#define FunMachine_disk_io_block_size 128
typedef struct {
U64 timeout_remaining;
bool powered;
U16 levers;
bool keys[FunMachine_keys_count];
// History of cache bank usage (from 0 to 23)
U8 lru;
// Our simulation acknowledges complete cache consistency and does not separate cache storage from
// memory storage
// We have 4 banks and memory blocks of size 2^4 (we have 2^12) of them are separated into
// 2^5 interleaved sets (at each time each set can point to 4 of 2^7 possible indexes)
U8 cache_indexes[FunMachine_cache_banks_count][FunMachine_cache_sets_count];
// We store the last cache line that was used for instruction decoding right inside JSM3C
U16 last_command_line_mask; // Last 4 bits in IP are zeroed out
// We store the history of 3
// 0 - Z (all bits of last op result were 0)
// 1 - S (msb of last op result was 1)
// 2 - O (last operation resulted in overflow)
U16 flags;
U16 AX;
U16 BX;
U16 CX;
U16 DX;
U16 EX;
U16 FX;
U16 IP;
VecU16 memory;
} FunMachineState;
FunMachineState FunMachineState_from_image(SpanU16 image) {
assert(image.len <= UINT16_MAX);
FunMachineState res = (FunMachineState){
.timeout_remaining = UINT64_MAX,
.memory = VecU16_new_zeroinit(UINT16_MAX)};
return res;
}
void FunMachine_drop(FunMachineState self) {
VecU16_drop(self.memory);
}
typedef enum {
FunMachineIM_power_on,
FunMachineIM_power_off,
FunMachineIM_auto_wake,
FunMachineIM_button_press,
FunMachineIM_button_release,
FunMachineIM_lever_changed,
FunMachineIM_disk_drive_connected,
FunMachineIM_disk_drive_disconnected,
FunMachineIM_disk_drive_not_busy,
FunMachineIM_disk_drive_synchronized,
} FunMachineIM_variant;
typedef struct {
U8 drive_id; // From
} FunMachine_DriveConnectionInfo;
typedef struct {
FunMachineIM_variant variant;
union {
U8 id; // For button and for lever event
FunMachine_DriveConnectionInfo drive; // For all the disk drive events
};
} FunMachineIM;
// (U64, funMachineIM)
typedef struct{
U64 tp;
FunMachineIM im;
} FunMachineIM_TP;
typedef enum {
FunMachineIMR_None,
FunMachineIMR_LampUpdate,
FunMachineIMR_ShutAllDown,
} FunMachineIMR_variant;
typedef struct {
FunMachineIMR_variant variant;
// For FunMachineIMR_LampUpdate
U8 changed_row;
U16 nav; // new value set for that row
} FunMachineIMR;
// (FunMachineIMR, int, U64)
typedef struct {
FunMachineIMR r;
/* Right now there is only one inner hardware error:
* if small physical interrupt queue overflows and some events get chewed up */
int inner_hardware_error;
U64 awt;
} FunMachineIMR_AWT;
// 80K TPS
const U64 tick_time = 12500;
void FunMachine_boot(FunMachineState* self, SpanU16 image) {
assert(image.len <= UINT16_MAX);
self->powered = true;
self->AX = self->BX = self->CX = self->DX = self->EX = self->FX = self->IP = self->flags = 0;
self->lru = 0;
self->last_command_line_mask = 0;
memcpy(self->memory.buf, image.data, image.len * sizeof(U16));
memset(self->memory.buf + image.len, 0, (UINT16_MAX - image.len) * sizeof(U16));
for (int i = 0; i < FunMachine_cache_banks_count; i++)
for (int j = 0; j < FunMachine_cache_sets_count; j++)
self->cache_indexes[i][j] = i;
}
void FunMachine_sync_time_progress(FunMachineState* self, U64 tp) {
assert(self->timeout_remaining >= tp);
if (self->timeout_remaining < UINT64_MAX)
self->timeout_remaining -= tp;
}
FunMachineIMR_AWT FunMachine_execute_instruction(FunMachineState* self) {
// todo: actaully write this function
return (FunMachineIMR_AWT){ .r.variant = FunMachineIMR_None, .awt = self->timeout_remaining };
}
FunMachineIMR_AWT FunMachine_runtime_reaction(FunMachineState* self) {
if (self->timeout_remaining)
return (FunMachineIMR_AWT){ .r.variant = FunMachineIMR_None, .awt = self->timeout_remaining };
return FunMachine_execute_instruction(self);
}
FunMachineIMR_AWT FunMachine_act(FunMachineState* self, FunMachineIM_TP imtp) {
FunMachineIM im = imtp.im;
FunMachine_sync_time_progress(self, imtp.tp);
if (im.variant == FunMachineIM_power_off && !self->powered)
return (FunMachineIMR_AWT){ .r.variant = FunMachineIMR_None, .awt = self->timeout_remaining };
// todo: actaully write this function
return (FunMachineIMR_AWT){ .r.variant = FunMachineIMR_None, .awt = self->timeout_remaining };
}
#endif

View File

@ -1,357 +0,0 @@
/* This shit was written by chatgpt */
// todo: remove this crap. Rewrite it in wayland. Get rid of ncurses client
#include "../fun_machine/fun_machine.h"
#include <ncurses.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <signal.h>
#include <string.h>
#include <stdbool.h>
#include <term.h>
static void enter_altscreen(void)
{
const char *smcup = tigetstr("smcup");
if (smcup)
putp(smcup);
}
static void leave_altscreen(void)
{
const char *rmcup = tigetstr("rmcup");
if (rmcup)
putp(rmcup);
}
#define BOARD_SIZE 32
#define BOARD_OUTER_SIZE (BOARD_SIZE + 2)
#define TIMEOUT_USEC 100000 // Won't be used
#define LEVER_COUNT 16
#define KEYBOARD_ROW_SIZE 10
#define KEYBOARD_ROWS 5
const short keyboard_style[KEYBOARD_ROWS] = {2, 1, 0, 2, 1};
typedef struct {
int lamps[BOARD_SIZE][BOARD_SIZE];
U16 levers; // MSB is the left one, LSB is thr right one. false is down, true is up
/* Numeration of key statuses in this array:
* 0 1 ... 9
* 10 11 ... 19
* 20 ... 29
* 30 ... 39
* 40 ... 49
*/
U8 keys[KEYBOARD_ROW_SIZE * KEYBOARD_ROWS]; // 0 - released, 1 - pressed
bool on; // false - machine is off, on - machine is on
} VisibleState;
VisibleState VisibleState_new() {
return (VisibleState){0};
}
/* Pos on screen is opposite to bit pos (we represent lever word in big endian) */
void VisibleState_set_lever(VisibleState* self, int pos, bool v) {
U16 mask = (1u << (LEVER_COUNT - 1 - pos));
self->levers = (self->levers & (~mask)) | ((short)v << LEVER_COUNT - 1 - pos);
}
bool VisibleState_get_lever(VisibleState* self, int pos) {
return self->levers & (1u << (LEVER_COUNT - 1 - pos));
}
const char key_marks[KEYBOARD_ROW_SIZE * KEYBOARD_ROWS] = {
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'?', 'q', 'w', 'e', 'r', 't', 'y', 'u', 'p', '"',
'$', 'f', 'g', 'h', 'j', 'k', 'l', 'i', 'o', '\'',
'%', 'a', 's', 'd', 'z', 'x', 'c', 'v', 'n', 'm',
'*', 'b', '!', '/', ' ', '(', ')', '=', '-', '+',
};
#define LOGO_WIDTH 5
#define LOGO_HEIGHT (7 + 2 + 7)
U8 logo_bitmap[LOGO_HEIGHT][LOGO_WIDTH] = {
{1, 1, 1, 1, 1},
{1, 0, 0, 0, 0},
{1, 0, 0, 0, 0},
{1, 1, 1, 1, 1},
{1, 0, 0, 0, 0},
{1, 0, 0, 0, 0},
{1, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
{1, 1, 0, 1, 1},
{1, 0, 1, 0, 1},
{1, 0, 1, 0, 1},
{1, 0, 1, 0, 1},
{1, 0, 0, 0, 1},
{1, 0, 0, 0, 1},
{1, 0, 0, 0, 1},
};
char engraving_char = '#';
// char engraving_char = 176; // can be 177 or 178s
typedef enum {
pair_lampboard_border = 1,
pair_unlit_lamp,
pair_lit_lamp,
pair_lever,
pair_released_key_border,
pair_released_key_symbol,
pair_pressed_key_border,
pair_pressed_key_symbol,
pair_power_button_outer,
pair_power_button_inner,
pair_unlit_led,
pair_lit_led,
pair_engraving
} ncurses_color_pairs;
static void init_colors(void)
{
start_color();
use_default_colors();
init_pair(pair_lampboard_border, -1, 245);
init_pair(pair_unlit_lamp, 236, -1);
init_pair(pair_lit_lamp, COLOR_WHITE, COLOR_YELLOW);
init_pair(pair_lever, COLOR_WHITE, -1);
init_pair(pair_released_key_border, 233, 239);
init_pair(pair_released_key_symbol, 7, 0);
init_pair(pair_pressed_key_border, 237, 245);
init_pair(pair_pressed_key_symbol, 203, 88);
init_pair(pair_power_button_outer, 124, 9);
init_pair(pair_power_button_inner, COLOR_BLACK, 160);
init_pair(pair_unlit_led, 52, -1);
init_pair(pair_lit_led, COLOR_WHITE, 9);
init_pair(pair_engraving, 136, -1);
}
/*--------------------------------------------------------------*/
// Won't be used
static void seed_dots(VisibleState* state)
{
for (int y = 0; y < BOARD_SIZE; ++y)
for (int x = 0; x < BOARD_SIZE; ++x)
state->lamps[y][x] = rand() & 1;
}
static void toggle_dot(VisibleState* state, int y, int x)
{
if (y >= 0 && y < BOARD_SIZE && x >= 0 && x < BOARD_SIZE)
state->lamps[y][x] ^= 1;
}
/*--------------------------------------------------------------*
* Drawing
*--------------------------------------------------------------*/
static void draw_board(VisibleState* state, int X, int Y)
{
attron(COLOR_PAIR(pair_lampboard_border));
for (int i = 0; i < BOARD_OUTER_SIZE; i++) {
mvaddch(Y, X + i, ' ');
mvaddch(Y + BOARD_OUTER_SIZE - 1, X + i, ' ');
mvaddch(Y + i, X, ' ');
mvaddch(Y + i, X + BOARD_OUTER_SIZE - 1, ' ');
}
for (int y = 0; y < BOARD_SIZE; ++y)
for (int x = 0; x < BOARD_SIZE; ++x) {
int pair = state->lamps[y][x] ? pair_lit_lamp : pair_unlit_lamp;
attron(COLOR_PAIR(pair));
mvaddch(Y + y + 1, X + x + 1, '.');
attroff(COLOR_PAIR(pair));
}
refresh();
}
static void draw_power_button(int X, int Y) {
attron(COLOR_PAIR(pair_power_button_outer));
mvaddch(Y + 0, X + 1, '-');
mvaddch(Y + 0, X + 2, '-');
mvaddch(Y + 0, X + 3, '-');
mvaddch(Y + 1, X + 3, ' ');
mvaddch(Y + 1, X + 4, '|');
mvaddch(Y + 2, X + 4, '|');
mvaddch(Y + 3, X + 4, '|');
mvaddch(Y + 3, X + 3, ' ');
mvaddch(Y + 4, X + 3, '-');
mvaddch(Y + 4, X + 2, '-');
mvaddch(Y + 4, X + 1, '-');
mvaddch(Y + 4, X + 1, '-');
mvaddch(Y + 3, X + 1, ' ');
mvaddch(Y + 3, X + 0, '|');
mvaddch(Y + 2, X + 0, '|');
mvaddch(Y + 1, X + 0, '|');
mvaddch(Y + 1, X + 1, ' ');
attron(COLOR_PAIR(pair_power_button_inner));
mvaddch(Y + 1, X + 2, ' ');
mvaddch(Y + 2, X + 1, ' ');
mvaddch(Y + 3, X + 2, ' ');
mvaddch(Y + 2, X + 3, ' ');
mvaddch(Y + 2, X + 2, 'P');
}
void draw_power_led(VisibleState* state, int X, int Y) {
attron(COLOR_PAIR(state->on ? pair_lit_led : pair_unlit_led));
mvaddch(Y, X, state->on ? '.' : 'O');
}
void draw_levers(VisibleState* state, int X, int Y) {
attron(COLOR_PAIR(pair_lever));
for (int i = 0; i < LEVER_COUNT; i++) {
mvaddch(Y + 1, X + 2 * i, 'O');
mvaddch(Y + (VisibleState_get_lever(state, i) ? 0 : 2), X + 2 * i, '|');
}
}
void draw_keyboard(VisibleState* state, int X, int Y) {
for (int row = 0; row < KEYBOARD_ROWS; row++) {
for (int s = 0; s < KEYBOARD_ROW_SIZE; s++) {
int x = X + keyboard_style[row] + 4 * s;
int y = Y + row * 2;
bool pressed = state->keys[KEYBOARD_ROW_SIZE * row + s];
attron(COLOR_PAIR(pressed ? pair_pressed_key_border : pair_released_key_border));
mvaddch(y, x, '[');
mvaddch(y, x + 2, ']');
attron(COLOR_PAIR(pressed ? pair_pressed_key_symbol : pair_released_key_symbol));
mvaddch(y, x + 1, key_marks[KEYBOARD_ROW_SIZE * row + s]);
}
}
}
void draw_logo_engraving(int X, int Y) {
attron(COLOR_PAIR(pair_engraving));
for (int y = 0; y < LOGO_HEIGHT; y++) {
for (int x = 0; x < LOGO_WIDTH; x++) {
if (logo_bitmap[y][x])
mvaddch(Y + y, X + x, engraving_char);
}
}
}
#define LAMPBOARD_X 16
#define LAMPBOARD_Y 0
#define POWER_BUTTON_X 0
#define POWER_BUTTON_Y (BOARD_OUTER_SIZE + 1)
#define POWER_LED_X (5 + 2)
#define POWER_LED_Y 32
#define LEVER_ROW_X (5 + 12)
#define LEVER_ROW_Y (BOARD_OUTER_SIZE + 1)
#define KEYBOARD_X 10
#define KEYBOARD_Y 40
#define LOGO_X 3
#define LOGO_Y 2
static void draw_fun_machine(VisibleState* state) {
erase();
draw_board(state, LAMPBOARD_X, LAMPBOARD_Y);
draw_power_button(POWER_BUTTON_X, POWER_BUTTON_Y);
draw_power_led(state, POWER_LED_X, POWER_LED_Y);
draw_levers(state, LEVER_ROW_X, LEVER_ROW_Y);
draw_keyboard(state, KEYBOARD_X, KEYBOARD_Y);
draw_logo_engraving(LOGO_X, LOGO_Y);
}
int lever_intersection(int evx, int evy) {
for (int i = 0; i < LEVER_COUNT; i++) {
int lcx = LEVER_ROW_X + 2 * i;
int lcy = LEVER_ROW_Y + 1;
if (evx == lcx && abs(lcy - evy) <= 1)
return i;
}
return -1;
}
int key_intersection(int evx, int evy) {
for (int row = 0; row < KEYBOARD_ROWS; row++) {
for (int s = 0; s < KEYBOARD_ROW_SIZE; s++) {
int kcx = KEYBOARD_X + keyboard_style[row] + 4 * s + 1;
int kcy = KEYBOARD_Y + 2 * row;
if (abs(kcx - evx) <= 1 && kcy == evy)
return row * KEYBOARD_ROW_SIZE + s;
}
}
return -1;
}
/*--------------------------------------------------------------*
* Main
*--------------------------------------------------------------*/
static volatile sig_atomic_t keep_running = 1;
static void sigint_handler(int sig) { (void)sig; keep_running = 0; }
int main(void)
{
srand((unsigned)time(NULL));
VisibleState mach_disp = VisibleState_new();
seed_dots(&mach_disp);
setupterm(NULL, fileno(stdout), NULL);
enter_altscreen();
atexit(leave_altscreen);
initscr();
cbreak();
noecho();
keypad(stdscr, TRUE);
mousemask(ALL_MOUSE_EVENTS, NULL);
curs_set(0);
init_colors();
signal(SIGINT, sigint_handler);
timeout(TIMEOUT_USEC / 1000);
while (keep_running) {
draw_fun_machine(&mach_disp);
int ch = getch();
if (ch == ERR)
continue; /* timeout */
if (ch == 'q' || ch == 'Q')
break; /* quit */
if (ch == KEY_MOUSE) {
MEVENT ev;
if (getmouse(&ev) == OK) {
if (ev.bstate & BUTTON1_CLICKED) {
toggle_dot(&mach_disp, ev.y - LAMPBOARD_Y - 1, ev.x - LAMPBOARD_X - 1);
int li = lever_intersection(ev.x, ev.y);
if (li >= 0)
VisibleState_set_lever(&mach_disp, li, !VisibleState_get_lever(&mach_disp, li));
int ki = key_intersection(ev.x, ev.y);
if (ki >= 0) {
// do something
}
} else if (ev.bstate & BUTTON3_CLICKED) {
int ki = key_intersection(ev.x, ev.y);
if (ki >= 0) {
if (mach_disp.keys[ki]) {
// Releasing
mach_disp.keys[ki] = 0;
} else {
// Pressing
mach_disp.keys[ki] = 1;
}
}
}
}
} else {
seed_dots(&mach_disp); /* any other key scramble */
}
}
endwin();
return 0;
}

View File

@ -0,0 +1,16 @@
#version 460
#extension GL_EXT_nonuniform_qualifier : require
layout (location=0) in vec4 color;
layout (location=1) in vec2 tex_cord;
layout (location=2) flat in uint tex_ind;
layout (location=0) out vec4 fin_color;
layout (binding=0) uniform sampler2D images[];
void main(){
float I = texture(images[nonuniformEXT(tex_ind)], tex_cord).r;
fin_color = vec4(color.rgb, color.a * I);
}

View File

@ -0,0 +1,30 @@
#version 450
layout(location = 0) in vec4 color;
layout(location = 1) in vec2 pos;
layout(location = 2) in vec2 tex_cord;
layout(location = 3) in uint tex_ind;
layout(push_constant, std430) uniform pc {
float width;
float height;
};
layout (location=0) flat out vec4 vsout_color;
layout (location=1) out vec2 vsout_tex_cord;
layout (location=2) flat out uint vsout_tex_ind;
float lint(float A1, float B1, float A2, float B2, float x){
return A2 + (B2 - A2) * (x - A1) / (B1 - A1);
}
float deng(float B1, float x){
return lint(0, B1, -1, 1, x);
}
void main(){
vsout_color = color;
vsout_tex_cord = tex_cord;
vsout_tex_ind = tex_ind;
gl_Position = vec4(deng(width, pos.x), deng(height, pos.y), 0, 1);
}