Compare commits
8 Commits
f80dc0ded0
...
516fab6ff6
| Author | SHA1 | Date | |
|---|---|---|---|
| 516fab6ff6 | |||
| d4f9ed214f | |||
| 0f59830bdf | |||
| 5615594762 | |||
| 8e3a306459 | |||
| aadc346f43 | |||
| 6f418827dc | |||
| 202e11ab56 |
@ -27,12 +27,14 @@ add_compile_definitions(_POSIX_C_SOURCE=200112L)
|
||||
add_compile_definitions(_GNU_SOURCE)
|
||||
add_compile_options(-fno-trapping-math)
|
||||
|
||||
#add_executable(codegen_l1 src/l1/anne/codegen.c)
|
||||
#target_compile_definitions(codegen_l1
|
||||
# PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
|
||||
add_executable(codegen_l1 src/l1/anne/codegen.c)
|
||||
target_compile_definitions(codegen_l1
|
||||
PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
|
||||
|
||||
#add_executable(0_test src/l1_4/tests/t0.c)
|
||||
#add_executable(1_test src/l1_4/tests/t1.c)
|
||||
add_executable(3_test src/l1_4/tests/t3.c)
|
||||
target_link_libraries(3_test -lm)
|
||||
#
|
||||
#add_executable(l1_4_t2 src/l1_4/tests/t2.c)
|
||||
|
||||
@ -41,8 +43,8 @@ add_executable(codegen_l1_5 src/l1_5/anne/codegen.c)
|
||||
add_executable(0_render_test src/l2/tests/r0/r0.c gen/l_wl_protocols/xdg-shell-private.c)
|
||||
target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -lpng -lfreetype)
|
||||
|
||||
#add_executable(0r_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
|
||||
#target_link_libraries(0r_tex_init_prep -lm -lpng)
|
||||
add_executable(0r_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
|
||||
target_link_libraries(0r_tex_init_prep -lm -lpng)
|
||||
|
||||
#add_executable(1_render_test src/l2/tests/r1/r1.c gen/l_wl_protocols/xdg-shell-private.c)
|
||||
#target_link_libraries(1_render_test -lwayland-client -lrt -lm -lxkbcommon)
|
||||
@ -56,7 +58,7 @@ target_link_libraries(0_render_test -lvulkan -lwayland-client -lm -lxkbcommon -l
|
||||
#add_executable(l2t0_2 src/l2/tests/data_structures/t0_2.c) // todo: I will get back
|
||||
add_executable(l2t0 src/l2/tests/data_structures/t0.c)
|
||||
add_executable(l2t0_3 src/l2/tests/data_structures/t0_3.c)
|
||||
add_executable(l2t2 src/l2/tests/data_structures/t2.c)
|
||||
#add_executable(l2t2 src/l2/tests/data_structures/t2.c)
|
||||
|
||||
#add_executable(l2t0 src/l2/tests/data_structures/t0.c)
|
||||
#add_executable(l2t1 src/l2/tests/data_structures/t1.c)
|
||||
|
||||
@ -337,6 +337,81 @@ NODISCARD VecU8 generate_square_xmatn_methods(SpanU8 xmat, SpanU8 xvec, SpanU8 m
|
||||
return res;
|
||||
}
|
||||
|
||||
NODISCARD VecU8 generate_xmat_inverse_methods(SpanU8 xmat, SpanU8 xvec, SpanU8 memb){
|
||||
VecU8 res = VecU8_fmt("%s4 %s4_inverse(%s4 A) {\n", xmat, xmat, xmat);
|
||||
VecU8_append_vec(&res, VecU8_fmt(SPACE "%s m2[6][6] = {\n", memb));
|
||||
SpanU8 first_of_pair[6] = {cstr("x"), cstr("x"), cstr("x"), cstr("y"), cstr("y"), cstr("z")};
|
||||
SpanU8 second_of_pair[6] = {cstr("y"), cstr("z"), cstr("w"), cstr("z"), cstr("w"), cstr("w")};
|
||||
for (int w_col = 0; w_col < 6; w_col++) {
|
||||
VecU8_append_span(&res, cstr(SPACE SPACE "{ "));
|
||||
for (int w_row = 0; w_row < 6; w_row++) {
|
||||
if (w_row)
|
||||
VecU8_append_span(&res, cstr(", "));
|
||||
/* first first = A second first = B
|
||||
* first second = C second second = D
|
||||
* A * D - B * C */
|
||||
VecU8_append_vec(&res, VecU8_fmt("A.%s.%s * A.%s.%s - A.%s.%s * A.%s.%s",
|
||||
first_of_pair[w_col], first_of_pair[w_row], second_of_pair[w_col], second_of_pair[w_row],
|
||||
second_of_pair[w_col], first_of_pair[w_row], first_of_pair[w_col], second_of_pair[w_row]
|
||||
));
|
||||
}
|
||||
VecU8_append_span(&res, cstr(" },\n"));
|
||||
}
|
||||
VecU8_append_span(&res, cstr(SPACE "};\n"));
|
||||
|
||||
U64 a0_contr[4] = {5, 5, 4, 3};
|
||||
U64 a1_contr[4] = {4, 2, 2, 1};
|
||||
U64 a2_contr[4] = {3, 1, 0, 0};
|
||||
SpanU8 a0[4] = {cstr("y"), cstr("x"), cstr("x"), cstr("x")};
|
||||
SpanU8 a1[4] = {cstr("z"), cstr("z"), cstr("y"), cstr("y")};
|
||||
SpanU8 a2[4] = {cstr("w"), cstr("w"), cstr("w"), cstr("z")};
|
||||
VecU8_append_vec(&res, VecU8_fmt(SPACE "%s m3[4][4] = {\n", memb));
|
||||
for (int no_col = 0; no_col < 4; no_col++) {
|
||||
SpanU8 walking_column = a0[no_col];
|
||||
U64 minor_col_pair = a0_contr[no_col];
|
||||
VecU8_append_span(&res, cstr(SPACE SPACE "{ "));
|
||||
for (int no_row = 0; no_row < 4; no_row++) {
|
||||
if (no_row)
|
||||
VecU8_append_span(&res, cstr(", \n" SPACE SPACE));
|
||||
VecU8_append_vec(&res, VecU8_fmt(
|
||||
"A.%s.%s * m2[%u][%u] - A.%s.%s * m2[%u][%u] + A.%s.%s * m2[%u][%u]",
|
||||
walking_column, a0[no_row], minor_col_pair, a0_contr[no_row],
|
||||
walking_column, a1[no_row], minor_col_pair, a1_contr[no_row],
|
||||
walking_column, a2[no_row], minor_col_pair, a2_contr[no_row]));
|
||||
}
|
||||
VecU8_append_span(&res, cstr(" },\n"));
|
||||
}
|
||||
VecU8_append_span(&res, cstr(SPACE "};\n"));
|
||||
VecU8_append_vec(&res, VecU8_fmt(
|
||||
SPACE "%s d = 1 / (A.x.x * m3[0][0] - A.x.y * m3[0][1] + A.x.z * m3[0][2] - A.x.w * m3[0][3]);\n"
|
||||
SPACE "return (mat4){ "
|
||||
, memb));
|
||||
for (U64 i = 0; i < 4; i++) {
|
||||
if (i)
|
||||
VecU8_append_span(&res, cstr(",\n" SPACE SPACE ));
|
||||
VecU8_append_vec(&res, VecU8_fmt(".%s={ ", vec_field_name((int)i)));
|
||||
for (U64 j = 0; j < 4; j++) {
|
||||
if (j)
|
||||
VecU8_append_span(&res, cstr(", "));
|
||||
VecU8_append_vec(&res, VecU8_fmt("%sm3[%u][%u] * d",
|
||||
(i + j) % 2 ? cstr("-") : cstr(""), j, i));
|
||||
}
|
||||
VecU8_append_span(&res, cstr(" }"));
|
||||
}
|
||||
VecU8_append_span(&res, cstr(" };\n}\n\n"));
|
||||
|
||||
VecU8_append_vec(&res, VecU8_fmt(
|
||||
"%s2 %s2_inverse(%s2 A) {\n" /* xmat, xmat, xmat */
|
||||
SPACE "%s d = 1 / (A.x.x * A.y.y - A.y.x * A.x.y);\n" /* memb */
|
||||
SPACE "return (%s2){ .x = { A.y.y * d, -A.x.y * d}, .y = {-A.y.x * d, A.x.x * d}};\n" /* xmat */
|
||||
"}\n\n", xmat, xmat, xmat, memb, xmat));
|
||||
|
||||
// VecU8_append_vec(&res, VecU8_fmt( "%s3 %s3_inverse(%s3 A) {\n", xmat, xmat, xmat));
|
||||
// VecU8_append_vec(&res, VecU8_fmt(SPACE "%s d = 1 / ("));
|
||||
// VecU8_append_span(&res, cstr("}\n"));
|
||||
return res;
|
||||
}
|
||||
|
||||
NODISCARD VecU8 generate_xmatnm_method_mul_xmatkn(SpanU8 xmat, int n, int m, int k) {
|
||||
VecU8 g_xmatkm = codegen_name_xmatnm(xmat, k, m);
|
||||
VecU8 g_xmatnm = codegen_name_xmatnm(xmat, n, m);
|
||||
@ -414,6 +489,7 @@ NODISCARD VecU8 generate_xmat234x234_structs_methods(SpanU8 xmat, SpanU8 xvec, S
|
||||
}
|
||||
}
|
||||
}
|
||||
VecU8_append_vec(&res, generate_xmat_inverse_methods(xmat, xvec, memb));
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -428,7 +504,7 @@ void generate_geom_header() {
|
||||
VecU8_append_vec(&res.result, generate_xvec234_structs_and_cool_methods(cstr("vec"), cstr("float"), cstr("sqrtf")));
|
||||
VecU8_append_vec(&res.result, generate_xvec234_structs_and_cool_methods(cstr("dvec"), cstr("double"), cstr("sqrt")));
|
||||
VecU8_append_vec(&res.result, generate_xmat234x234_structs_methods(cstr("mat"), cstr("vec"), cstr("float"), sizeof(float)));
|
||||
VecU8_append_vec(&res.result, generate_xmat234x234_structs_methods(cstr("dmat"), cstr("dvec"), cstr("double"), sizeof(double)));
|
||||
/* VecU8_append_vec(&res.result, generate_xmat234x234_structs_methods(cstr("dmat"), cstr("dvec"), cstr("double"), sizeof(double))); */
|
||||
finish_header(res);
|
||||
}
|
||||
|
||||
|
||||
@ -13,25 +13,20 @@ void generate_margaret_eve_for_vulkan_utils() {
|
||||
.mut_span = true, .collab_vec_span = true, .span_sort = true
|
||||
});
|
||||
|
||||
/* For l2/margaret/vulkan_memory_claire.h */
|
||||
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretMemAllocatorOneBlock")}, false);
|
||||
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorRequestFreeOccupant"), true, false);
|
||||
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
|
||||
.T = cstr("MargaretMemAllocatorRequestResizeBuffer"), .t_primitive = true,
|
||||
.vec_extended = true /* We need unordered_pop to do some tomfoolery */});
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorRequestAllocBuffer"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorRequestAllocImage"), true, false);
|
||||
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretFreeMemSegment"), true, false);
|
||||
/* For l2/margaret/{ vulkan_img_claire.h , vulkan_buffer_claire.h } */
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretIAFreeSegment"), true, false);
|
||||
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
|
||||
.T = cstr("MargaretFreeMemSegment"), .t_primitive = true});
|
||||
.T = cstr("MargaretIAFreeSegment"), .t_primitive = true});
|
||||
// todo: add to BufRBTree instantiator option to create necessary shit by itself
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBAFreeSegment"), true, false);
|
||||
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
|
||||
.T = cstr("MargaretBAFreeSegment"), .t_primitive = true});
|
||||
|
||||
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
|
||||
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretFreeMemSegment")});
|
||||
.T = cstr("BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment")});
|
||||
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMABufferExpansionRecord"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMANewMovedBufRecord"), true, false);
|
||||
generate_eve_span_company_for_non_primitive_non_clonable(l, ns, cstr("MargaretImgAllocatorOneBlock"), true, false);
|
||||
generate_List_templ_inst_eve_header(l, ns, (list_instantiation_op){.T = cstr("MargaretBufAllocatorOneBlock")}, true);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -100,7 +100,7 @@ NODISCARD VecU8 generate_texture_data_struct_and_necc_methods(SpanU8 tex, SpanU8
|
||||
"}\n\n", resoftex, tex, resoftex, resoftex, memb, resoftex, tex, tex, resoftex));
|
||||
/* Method _read_from_file */
|
||||
VecU8_append_vec(&res, VecU8_fmt(
|
||||
"%s %s_read_from_file(const char* path) {\n"
|
||||
"%s %s_read_from_file(SpanU8 path) {\n"
|
||||
SPACE "VecU8 data = read_whole_file_or_abort(path);\n"
|
||||
SPACE "%s res = %s_from_bitmap_text(VecU8_to_span(&data));\n"
|
||||
SPACE "if (res.variant != Result_Ok) {\n"
|
||||
|
||||
@ -8,22 +8,19 @@ void generate_headers_for_r0_r1_r2_r3() {
|
||||
mkdir_nofail("l1/eve/r0");
|
||||
{ /* Needed in r0_assets.h */
|
||||
SpanU8 ns = cstr("r0");
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshVertex"), true, true);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshVertexInc"), true, true);
|
||||
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("GenericMeshInSceneTemplate"), true, false);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("GenericMeshInstance"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshVertex"), true, true);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshInstance"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyMeshVertexInc"), true, true);
|
||||
generate_eve_span_company_for_non_primitive_clonable(l, ns, cstr("ShinyMeshTopology"), true, false);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("Pipeline0Spotlight"), true, false);
|
||||
// generate_eve_span_company_for_primitive(l, ns, cstr("Pipeline0PointLight"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("Wimbzle"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("Nibzle"), true, false);
|
||||
/* r0_scene.h */
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelOnSceneMem"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("ShinyModelOnSceneMem"), true, false);
|
||||
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("ObjectInfo"), true, false);
|
||||
/* r0 */
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("GenericModelTexVulkPointers"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("CommandForImageCopying"), true, true);
|
||||
}
|
||||
mkdir_nofail("l1/eve/r2");
|
||||
{ /* r2 */
|
||||
|
||||
@ -48,24 +48,25 @@ void generate_util_temp_very_base_headers() {
|
||||
VecU8_drop(SpanT);
|
||||
VecU8_drop(dependency);
|
||||
}
|
||||
generate_guarded_span_company_for_primitive(cstr("l1"), cstr(""),
|
||||
cstr("CSTR"), cstr(""), true, false);
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("CSTR"), cstr(""), true, false);
|
||||
|
||||
generate_ResultType_templ_inst_guarded_header(cstr("l1"), cstr(""),
|
||||
generate_ResultType_templ_inst_guarded_header(l, ns,
|
||||
cstr(""), cstr("VecU8"), cstr("#include \"VecAndSpan_U8.h\""), true, false);
|
||||
generate_ResultType_templ_inst_guarded_header(cstr("l1"), cstr(""),
|
||||
generate_ResultType_templ_inst_guarded_header(l, ns,
|
||||
cstr(""), cstr("SpanU8"), cstr("#include \"VecAndSpan_U8.h\""), true, true);
|
||||
|
||||
generate_guarded_span_company_for_primitive(cstr("l1"), cstr(""), cstr("U32Segment"),
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("U32Segment"),
|
||||
cstr("#include \"../../src/l1/core/uint_segments.h\""), true, true);
|
||||
|
||||
/* Not very basic but definitely very common */
|
||||
generate_guarded_span_company_for_non_primitive_clonable(cstr("l1"), cstr(""), cstr("TextureDataR8G8B8A8"),
|
||||
generate_guarded_span_company_for_non_primitive_clonable(l, ns, cstr("TextureDataR8G8B8A8"),
|
||||
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
|
||||
generate_guarded_span_company_for_non_primitive_clonable(cstr("l1"), cstr(""), cstr("TextureDataR8G8B8"),
|
||||
generate_guarded_span_company_for_non_primitive_clonable(l, ns, cstr("TextureDataR8G8B8"),
|
||||
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
|
||||
generate_guarded_span_company_for_non_primitive_clonable(cstr("l1"), cstr(""), cstr("TextureDataR8"),
|
||||
generate_guarded_span_company_for_non_primitive_clonable(l, ns, cstr("TextureDataR8"),
|
||||
cstr("#include \"../../gen/l1/pixel_masses.h\"\n"), true, false);
|
||||
|
||||
generate_guarded_span_company_for_primitive(l, ns, cstr("KVPU64ToU64"), cstr(""), true, false);
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -91,7 +91,8 @@ NODISCARD VecU8 generate_List_template_instantiation(list_instantiation_op op, b
|
||||
}
|
||||
|
||||
|
||||
void generate_List_templ_inst_eve_header(SpanU8 layer, SpanU8 bonus_ns, list_instantiation_op op, bool gen_node_declaration) {
|
||||
void generate_List_templ_inst_eve_header(
|
||||
SpanU8 layer, SpanU8 bonus_ns, list_instantiation_op op, bool gen_node_declaration) {
|
||||
generate_SOME_templ_inst_eve_header(layer, bonus_ns,
|
||||
generate_List_template_instantiation(op, gen_node_declaration), VecU8_fmt("List%s", op.T));
|
||||
}
|
||||
|
||||
@ -135,7 +135,12 @@ void U64_stringification_into_buf(U64 x, VecU8* targ){
|
||||
}
|
||||
}
|
||||
|
||||
// todo: add %d (when I figure out how to do it)
|
||||
/* %s - SpanU8
|
||||
* %v - VecU8
|
||||
* %u - U64
|
||||
* %c - int (one byte character)
|
||||
* %i - S64
|
||||
*/
|
||||
NODISCARD VecU8 VecU8_fmt(const char* fmt, ...) {
|
||||
assert(fmt);
|
||||
size_t k = 0;
|
||||
|
||||
@ -87,6 +87,11 @@ typedef struct {
|
||||
U32 height;
|
||||
} SizeOfRectangleU32;
|
||||
|
||||
typedef struct{
|
||||
U64 key;
|
||||
U64 value;
|
||||
} KVPU64ToU64;
|
||||
|
||||
#define check(expr) if (!(expr)) { abortf("Assertion failed at %s : %d : " #expr "\n", __FILE__, __LINE__); }
|
||||
|
||||
#endif
|
||||
|
||||
@ -28,11 +28,12 @@ typedef struct {
|
||||
|
||||
void Result_ok_or_int_drop(Result_ok_or_int obj) {}
|
||||
|
||||
NODISCARD VecU8 read_whole_file_or_abort(const char* filename) {
|
||||
FILE* fp = fopen(filename, "rb");
|
||||
if (!fp) {
|
||||
abortf("Can't open file %s: %s\n", filename, strerror(errno));
|
||||
}
|
||||
NODISCARD VecU8 read_whole_file_or_abort(SpanU8 path) {
|
||||
VecU8 filename = VecU8_fmt("%s%c", path, 0);
|
||||
FILE* fp = fopen((const char*)filename.buf, "rb");
|
||||
if (!fp)
|
||||
abortf("Can't open file %s: %s\n", (const char*)filename.buf, strerror(errno));
|
||||
VecU8_drop(filename);
|
||||
if (fseek(fp, 0, SEEK_END) != 0) {
|
||||
abortf("fseek: %s\n", strerror(errno));
|
||||
}
|
||||
@ -52,6 +53,12 @@ NODISCARD VecU8 read_whole_file_or_abort(const char* filename) {
|
||||
return result;
|
||||
}
|
||||
|
||||
NODISCARD VecU8 read_file_by_path(VecU8 path){
|
||||
VecU8 content = read_whole_file_or_abort(VecU8_to_span(&path));
|
||||
VecU8_drop(path);
|
||||
return content;
|
||||
}
|
||||
|
||||
void write_whole_file_or_abort(const char* filename, SpanU8 content) {
|
||||
FILE* fd = fopen(filename, "wb");
|
||||
if (!fd) {
|
||||
|
||||
75
src/l1_4/tests/t3.c
Normal file
75
src/l1_4/tests/t3.c
Normal file
@ -0,0 +1,75 @@
|
||||
#include "../../../gen/l1/geom.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
|
||||
float random_float(float a, float b){
|
||||
int r = rand();
|
||||
return a + (b - a) * ((float)r / (float)RAND_MAX);
|
||||
}
|
||||
|
||||
float random_float100(){
|
||||
return random_float(-100, 100);
|
||||
}
|
||||
|
||||
mat4 random_big_matrix(){
|
||||
return mat4_new(random_float100(), random_float100(), random_float100(), random_float100(),
|
||||
random_float100(), random_float100(), random_float100(), random_float100(),
|
||||
random_float100(), random_float100(), random_float100(), random_float100(),
|
||||
random_float100(), random_float100(), random_float100(), random_float100());
|
||||
}
|
||||
|
||||
mat2 random_smol_matrix(){
|
||||
return mat2_new(random_float100(), random_float100(), random_float100(), random_float100());
|
||||
}
|
||||
|
||||
#define flPr "%02.05f"
|
||||
|
||||
void test_mat4(mat4 A){
|
||||
mat4 iA = mat4_inverse(A);
|
||||
mat4 product = mat4_mul_mat4(iA, A);
|
||||
printf(flPr " " flPr " " flPr " " flPr "\n"
|
||||
flPr " " flPr " " flPr " " flPr "\n"
|
||||
flPr " " flPr " " flPr " " flPr "\n"
|
||||
flPr " " flPr " " flPr " " flPr "\n",
|
||||
product.x.x, product.y.x, product.z.x, product.w.x,
|
||||
product.x.y, product.y.y, product.z.y, product.w.y,
|
||||
product.x.z, product.y.z, product.z.z, product.w.z,
|
||||
product.x.w, product.y.w, product.z.w, product.w.w);
|
||||
}
|
||||
|
||||
void test_mat2(mat2 A){
|
||||
mat2 iA = mat2_inverse(A);
|
||||
mat2 product = mat2_mul_mat2(iA, A);
|
||||
printf(flPr " " flPr "\n"
|
||||
flPr " " flPr "\n",
|
||||
product.x.x, product.y.x,
|
||||
product.x.y, product.y.y);
|
||||
}
|
||||
|
||||
void test(){
|
||||
mat2x3 A = (mat2x3){.x = {1, 2, 3}, .y = {4, 5, 6}};
|
||||
mat3x2 At = mat2x3_transpose(A);
|
||||
assert(At.x.x == 1);
|
||||
assert(At.x.y == 4);
|
||||
assert(At.y.x == 2);
|
||||
assert(At.y.y == 5);
|
||||
assert(At.z.x == 3);
|
||||
assert(At.z.y == 6);
|
||||
}
|
||||
|
||||
int main() {
|
||||
test();
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat4(random_big_matrix());
|
||||
test_mat2(random_smol_matrix());
|
||||
test_mat2(random_smol_matrix());
|
||||
test_mat2(random_smol_matrix());
|
||||
test_mat2(random_smol_matrix());
|
||||
}
|
||||
@ -11,11 +11,15 @@ void generate_l1_5_template_instantiation_for_base_types(){
|
||||
generate_buf_rbtree_Set_templ_inst_guarded_header(l, ns, cstr("#include \"../l1/VecAndSpan_S64.h\""),
|
||||
(set_instantiation_op){.T = cstr("S64"), .t_integer = true});
|
||||
|
||||
// l1/core/int_primitives is included in l1_5/core/rb_tree_node.h, hence no additional dependencies needed
|
||||
/* l1/core/utils.h is included in l1_5/core/rb_tree_node.h, hence no additional dependencies needed */
|
||||
generate_rbtree_Set_templ_inst_guarded_header(l, ns, cstr(""), (set_instantiation_op){
|
||||
.T = cstr("U64"), .t_integer = true }, true);
|
||||
generate_rbtree_Set_templ_inst_guarded_header(l, ns, cstr(""), (set_instantiation_op){
|
||||
.T = cstr("S64"), .t_integer = true }, true);
|
||||
|
||||
// todo: move vector declaration HERE
|
||||
generate_buf_rbtree_Map_templ_inst_guarded_header(l, ns, cstr("#include \"../../gen/l1/VecKVPU64ToU64.h\"\n"),
|
||||
(map_instantiation_op){.K = cstr("U64"), .k_integer = true, .V = cstr("U64"), .v_integer = true,});
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -9,17 +9,20 @@ void generate_l1_5_template_instantiations_for_margaret(){
|
||||
mkdir_nofail("l1_5/eve");
|
||||
mkdir_nofail("l1_5/eve/margaret");
|
||||
|
||||
/* For MargaretMemAllocator */
|
||||
/* For l2/margaret/{ vulkan_img_claire.h , vulkan_buffer_claire.h } */
|
||||
generate_buf_rbtree_Set_templ_inst_eve_header(l, ns, (set_instantiation_op){
|
||||
.T = cstr("MargaretFreeMemSegment"), .t_primitive = true,
|
||||
.T = cstr("MargaretIAFreeSegment"), .t_primitive = true,
|
||||
/* comparison takes additional U8 parameter */
|
||||
.alternative_less = cstr("MargaretFreeMemSegment_less_resp_align"),
|
||||
.alternative_less = cstr("MargaretIAFreeSegment_less_resp_align"),
|
||||
.alternative_comp_set_name_embed = cstr("LenRespAlign"),
|
||||
.guest_data_T = cstr("U8"),
|
||||
});
|
||||
generate_rbtree_Map_templ_inst_eve_header(l, ns, (map_instantiation_op){
|
||||
.K = cstr("U64"), .k_integer = true, .V = cstr("MargaretMAOccupation"), .v_primitive = true,
|
||||
}, true /* We want RBTreeNode_KVPU64ToMargaretMemoryOccupation to be generated here for us */ );
|
||||
generate_buf_rbtree_Set_templ_inst_eve_header(l, ns, (set_instantiation_op){
|
||||
.T = cstr("MargaretBAFreeSegment"), .t_primitive = true,
|
||||
/* comparison takes additional U8 parameter */
|
||||
.alternative_less = cstr("MargaretBAFreeSegment_less_len"),
|
||||
.alternative_comp_set_name_embed = cstr("Len"),
|
||||
});
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -471,7 +471,7 @@ void codegen_append_buff_rbtree_map__method_at_iter(VecU8* res, map_instantiatio
|
||||
op.k_integer ? VecU8_from_span(op.K) : VecU8_fmt("const %s*", op.K),
|
||||
mut ? VecU8_fmt("%s*", op.V) : (op.v_integer ? VecU8_from_span(op.V) : VecU8_fmt("const %s*", op.V)),
|
||||
|
||||
op.k_integer ? cstr("") : cstr("&"), op.v_integer ? cstr("") : cstr("&")));
|
||||
op.k_integer ? cstr("") : cstr("&"), (op.v_integer && !mut) ? cstr("") : cstr("&")));
|
||||
}
|
||||
|
||||
NODISCARD VecU8 get_name_of_buf_rbtree_map_structure(map_instantiation_op op){
|
||||
|
||||
@ -7,9 +7,10 @@
|
||||
#include "../../../gen/l1/VecAndSpan_U32Segment.h"
|
||||
|
||||
#include "../../l1_5/core/buff_rb_tree_node.h"
|
||||
#include "../../l1_5/core/rb_tree_node.h"
|
||||
|
||||
typedef struct {
|
||||
MargaretMAIterator img;
|
||||
MargaretImg img;
|
||||
U64 usage;
|
||||
U64 pos_in_desc_array;
|
||||
} LucyImage;
|
||||
@ -41,8 +42,6 @@ typedef struct{
|
||||
|
||||
typedef struct {
|
||||
MargaretEngineReference ve;
|
||||
VkCommandBuffer transfer_cmd_buffer;
|
||||
MargaretMAIterator staging_buffer;
|
||||
ListLucyImage images;
|
||||
VkDescriptorSetLayout descriptor_set_layout;
|
||||
VkDescriptorSet descriptor_set;
|
||||
@ -50,10 +49,9 @@ typedef struct {
|
||||
|
||||
|
||||
// todo: write
|
||||
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve, VkCommandBuffer transfer_cmd_buffer, VkDescriptorSetLayout descriptor_set_layout){
|
||||
LucyGlyphCache LucyGlyphCache_new(MargaretEngineReference ve, VkDescriptorSetLayout descriptor_set_layout){
|
||||
VkDescriptorSet descriptor_set = margaret_allocate_descriptor_set(ve.device, ve.descriptor_pool, descriptor_set_layout);
|
||||
MargaretMAIterator staging_buffer = MargaretMemAllocatorRequests_alloc_buf(ve.host_visible_mem_requests, 8192, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, false);
|
||||
return (LucyGlyphCache){.ve = ve, .transfer_cmd_buffer = transfer_cmd_buffer, .staging_buffer = staging_buffer,
|
||||
return (LucyGlyphCache){.ve = ve,
|
||||
.images = ListLucyImage_new(), .descriptor_set_layout = descriptor_set_layout, .descriptor_set = descriptor_set};
|
||||
}
|
||||
|
||||
|
||||
@ -2,10 +2,18 @@
|
||||
#define prototype1_src_l2_lucy_rendering_h
|
||||
|
||||
#include "glyph_cache.h"
|
||||
#include "../../../gen/l1/pixel_masses.h"
|
||||
#include "../../../gen/l1/geom.h"
|
||||
|
||||
typedef struct{
|
||||
vec4 color;
|
||||
vec2 pos;
|
||||
vec2 tex_cord;
|
||||
U32 tex_ind;
|
||||
} LucyVertex;
|
||||
|
||||
typedef struct{
|
||||
LucyGlyphCache cache;
|
||||
|
||||
VkPipelineLayout pipeline_layout;
|
||||
VkPipeline pipeline;
|
||||
} LucyGlyphRenderer;
|
||||
@ -13,7 +21,7 @@ typedef struct{
|
||||
#define LUCY_MAX_DESCRIPTOR_COUNT 10
|
||||
|
||||
LucyGlyphRenderer LucyGlyphRenderer_new(
|
||||
MargaretEngineReference engine_reference, VkCommandBuffer transfer_command_buffer,
|
||||
MargaretEngineReference engine_reference, SpanU8 root_dir,
|
||||
VkRenderPass render_pass, U32 renderpass_subpass){
|
||||
VkDescriptorSetLayout descriptor_set_layout;
|
||||
check(vkCreateDescriptorSetLayout(engine_reference.device, &(VkDescriptorSetLayoutCreateInfo){
|
||||
@ -28,8 +36,7 @@ LucyGlyphRenderer LucyGlyphRenderer_new(
|
||||
}, NULL, &descriptor_set_layout) == VK_SUCCESS);
|
||||
|
||||
|
||||
|
||||
LucyGlyphCache cache = LucyGlyphCache_new(engine_reference, transfer_command_buffer, descriptor_set_layout);
|
||||
LucyGlyphCache cache = LucyGlyphCache_new(engine_reference, descriptor_set_layout);
|
||||
|
||||
VkPipelineLayout pipeline_layout;
|
||||
check(vkCreatePipelineLayout(engine_reference.device, &(VkPipelineLayoutCreateInfo){
|
||||
@ -42,11 +49,29 @@ LucyGlyphRenderer LucyGlyphRenderer_new(
|
||||
}},
|
||||
}, NULL, &pipeline_layout) == VK_SUCCESS);
|
||||
|
||||
/* Configuring font pipeline */
|
||||
VkPipeline pipeline = margaret_create_triangle_pipeline_one_attachment(engine_reference.device,
|
||||
render_pass, renderpass_subpass, (MargaretMostImportantPipelineOptions){
|
||||
.pipeline_layout = pipeline_layout,
|
||||
.vertex_shader_code = read_file_by_path(VecU8_fmt("%s/gen/l_adele/lucy/vert.spv", root_dir)),
|
||||
.fragment_shader_code = read_file_by_path(VecU8_fmt("%s/gen/l_adele/lucy/frag.spv", root_dir)),
|
||||
.vertexBindingDescriptionCount = 1,
|
||||
.pVertexBindingDescriptions = (VkVertexInputBindingDescription[]){
|
||||
{ .binding = 0, .stride = sizeof(LucyVertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX } },
|
||||
.vertexAttributeDescriptionCount = 4,
|
||||
.pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]){
|
||||
{.location = 0, .binding = 0,
|
||||
.format = VK_FORMAT_R32G32B32A32_SFLOAT, .offset = offsetof(LucyVertex, color)},
|
||||
{.location = 1, .binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof(LucyVertex, pos)},
|
||||
{.location = 2, .binding = 0,
|
||||
.format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof(LucyVertex, tex_cord)},
|
||||
{.location = 3, .binding = 0,
|
||||
.format = VK_FORMAT_R32_UINT, .offset = offsetof(LucyVertex, tex_ind)},
|
||||
},
|
||||
.depthTestEnable = false, .depthWriteEnable = false, .blendEnable = true,
|
||||
});
|
||||
|
||||
|
||||
// todo: create the actual pipeline
|
||||
return (LucyGlyphRenderer){};
|
||||
return (LucyGlyphRenderer){.cache = cache, .pipeline_layout = pipeline_layout, .pipeline = pipeline};
|
||||
}
|
||||
|
||||
#endif
|
||||
1
src/l2/margaret/allocator_base.h
Normal file
1
src/l2/margaret/allocator_base.h
Normal file
@ -0,0 +1 @@
|
||||
|
||||
316
src/l2/margaret/vulkan_buffer_claire.h
Normal file
316
src/l2/margaret/vulkan_buffer_claire.h
Normal file
@ -0,0 +1,316 @@
|
||||
// Same dependencies as vulkan memory allocator
|
||||
#include "../../l1/core/uint_segments.h"
|
||||
#include "../../l1/core/util.h"
|
||||
#include "../../l1_5/core/buff_rb_tree_node.h"
|
||||
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
|
||||
|
||||
typedef struct MargaretBufAllocatorOneBlock MargaretBufAllocatorOneBlock;
|
||||
|
||||
typedef struct {
|
||||
MargaretBufAllocatorOneBlock* block;
|
||||
U64 start;
|
||||
U64 len;
|
||||
} MargaretBAFreeSegment;
|
||||
|
||||
bool MargaretBAFreeSegment_less_len(const MargaretBAFreeSegment* A, const MargaretBAFreeSegment* B){
|
||||
if (A->len == B->len) {
|
||||
if (A->block == B->block) {
|
||||
return A->start < B->start;
|
||||
}
|
||||
return (uintptr_t)A->block < (uintptr_t)B->block;
|
||||
}
|
||||
return A->len < B->len;
|
||||
}
|
||||
|
||||
U64 margaret_bump_buffer_size_to_alignment(U64 A, U8 alignment_exp){
|
||||
if (A & ((1ull << alignment_exp) - 1))
|
||||
A = A - (A & ((1ull << alignment_exp) - 1)) + (1ull << alignment_exp);
|
||||
return A;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
MargaretBufAllocatorOneBlock* block;
|
||||
U64 start;
|
||||
U64 len;
|
||||
} MargaretSubbuf;
|
||||
|
||||
struct MargaretBufAllocatorOneBlock{
|
||||
BufRBTree_MapU64ToU64 occupants;
|
||||
U64 capacity;
|
||||
U64 occupation_counter;
|
||||
VkDeviceMemory mem_hand;
|
||||
VkBuffer buf_hand;
|
||||
void* mapped_memory;
|
||||
};
|
||||
|
||||
void MargaretBufAllocatorOneBlock_drop(MargaretBufAllocatorOneBlock self){
|
||||
BufRBTree_MapU64ToU64_drop(self.occupants);
|
||||
}
|
||||
#include "../../../gen/l1/eve/margaret/ListMargaretBufAllocatorOneBlock.h"
|
||||
|
||||
#include "../../../gen/l1/eve/margaret/OptionMargaretBAFreeSegment.h"
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretBAFreeSegment.h"
|
||||
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLen_SetMargaretBAFreeSegment.h"
|
||||
|
||||
typedef struct {
|
||||
ListMargaretBufAllocatorOneBlock blocks;
|
||||
BufRBTreeByLen_SetMargaretBAFreeSegment mem_free_space;
|
||||
VkDevice device;
|
||||
VkPhysicalDevice physical_device;
|
||||
VkBufferUsageFlags usage;
|
||||
U8 memory_type_id;
|
||||
U8 alignment_exp;
|
||||
bool host_visible;
|
||||
} MargaretBufAllocator;
|
||||
|
||||
|
||||
void MargaretBufAllocator__erase_gap(
|
||||
MargaretBufAllocator* self, MargaretBufAllocatorOneBlock* block, U64 start, U64 len){
|
||||
if (len == 0)
|
||||
return;
|
||||
bool eret = BufRBTreeByLen_SetMargaretBAFreeSegment_erase(&self->mem_free_space,
|
||||
&(MargaretBAFreeSegment){.block = block, .start = start, .len = len});
|
||||
assert(eret);
|
||||
block->occupation_counter += len;
|
||||
assert(block->occupation_counter <= block->capacity);
|
||||
}
|
||||
|
||||
void MargaretBufAllocator__insert_gap(
|
||||
MargaretBufAllocator* self, MargaretBufAllocatorOneBlock* block, U64 start, U64 len){
|
||||
if (len == 0)
|
||||
return;
|
||||
bool iret = BufRBTreeByLen_SetMargaretBAFreeSegment_insert(&self->mem_free_space,
|
||||
(MargaretBAFreeSegment){.block = block, .start = start, .len = len});
|
||||
assert(iret);
|
||||
assert(len <= block->occupation_counter);
|
||||
block->occupation_counter -= len;
|
||||
}
|
||||
|
||||
OptionMargaretBAFreeSegment MargaretBufAllocator__search_gap(MargaretBufAllocator* self, U64 req_size){
|
||||
assert(req_size % (1ull << self->alignment_exp) == 0);
|
||||
U64 sit = BufRBTreeByLen_SetMargaretBAFreeSegment_find_min_grtr_or_eq(&self->mem_free_space,
|
||||
&(MargaretBAFreeSegment){.len = req_size});
|
||||
if (sit == 0)
|
||||
return None_MargaretBAFreeSegment();
|
||||
return Some_MargaretBAFreeSegment(*BufRBTreeByLen_SetMargaretBAFreeSegment_at_iter(&self->mem_free_space, sit));
|
||||
}
|
||||
|
||||
void MargaretBufAllocator__add_block(MargaretBufAllocator* self, U64 capacity){
|
||||
VkBuffer buffer;
|
||||
check(vkCreateBuffer(self->device, &(VkBufferCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.size = capacity,
|
||||
.usage = self->usage,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE
|
||||
}, NULL, &buffer) == VK_SUCCESS);
|
||||
VkMemoryRequirements memory_requirements;
|
||||
vkGetBufferMemoryRequirements(self->device, buffer, &memory_requirements);
|
||||
VkDeviceMemory memory;
|
||||
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.allocationSize = memory_requirements.size,
|
||||
.memoryTypeIndex = self->memory_type_id
|
||||
}, NULL, &memory) == VK_SUCCESS);
|
||||
check(vkBindBufferMemory(self->device, buffer, memory, 0) == VK_SUCCESS);
|
||||
void* mapped_memory = NULL;
|
||||
if (self->host_visible) {
|
||||
check(vkMapMemory(self->device, memory, 0, capacity, 0, &mapped_memory) == VK_SUCCESS);
|
||||
}
|
||||
ListMargaretBufAllocatorOneBlock_insert(&self->blocks, (MargaretBufAllocatorOneBlock){
|
||||
.occupants = BufRBTree_MapU64ToU64_new_reserved(1),
|
||||
.capacity = capacity,
|
||||
.occupation_counter = capacity,
|
||||
.mem_hand = memory, .buf_hand = buffer, .mapped_memory = mapped_memory
|
||||
});
|
||||
}
|
||||
|
||||
MargaretBufAllocator MargaretBufAllocator_new(
|
||||
VkDevice device, VkPhysicalDevice physical_device,
|
||||
VkBufferUsageFlags usage, U8 memory_type_id, U8 alignment_exp, bool host_visible, U64 initial_block_size
|
||||
){
|
||||
MargaretBufAllocator self = {
|
||||
.blocks = ListMargaretBufAllocatorOneBlock_new(),
|
||||
.mem_free_space = BufRBTreeByLen_SetMargaretBAFreeSegment_new_reserved(1),
|
||||
.device = device, .physical_device = physical_device, .usage = usage, .memory_type_id = memory_type_id,
|
||||
.alignment_exp = alignment_exp, .host_visible = host_visible
|
||||
};
|
||||
MargaretBufAllocator__add_block(&self, initial_block_size);
|
||||
MargaretBufAllocator__insert_gap(&self, &self.blocks.first->el, 0, initial_block_size);
|
||||
return self;
|
||||
}
|
||||
|
||||
void MargaretBufAllocator__put_buf_to_a_gap(MargaretBufAllocator* self, MargaretBAFreeSegment segment, U64 req_size){
|
||||
assert(req_size <= segment.len);
|
||||
MargaretBufAllocator__erase_gap(self, segment.block, segment.start, segment.len);
|
||||
MargaretBufAllocator__insert_gap(self, segment.block,
|
||||
segment.start + req_size, segment.len - req_size);
|
||||
BufRBTree_MapU64ToU64* occupants = &segment.block->occupants;
|
||||
bool iret = BufRBTree_MapU64ToU64_insert(occupants, segment.start, req_size);
|
||||
assert(iret);
|
||||
}
|
||||
|
||||
U64Segment MargaretBufAllocator__get_left_free_space(
|
||||
const MargaretBufAllocator* self, const MargaretSubbuf* allocation){
|
||||
U64 occ_start = allocation->start;
|
||||
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&allocation->block->occupants, allocation->start);
|
||||
if (prev_occ_it != 0) {
|
||||
U64 prev_occ_start;
|
||||
U64 prev_occ_taken_size;
|
||||
BufRBTree_MapU64ToU64_at_iter(&allocation->block->occupants, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
|
||||
|
||||
assert(prev_occ_start + prev_occ_taken_size <= occ_start);
|
||||
return (U64Segment){
|
||||
.start = prev_occ_start + prev_occ_taken_size,
|
||||
.len = occ_start - (prev_occ_start + prev_occ_taken_size)};
|
||||
}
|
||||
return (U64Segment){.start = 0, .len = occ_start};
|
||||
}
|
||||
|
||||
U64Segment MargaretBufAllocator__get_right_free_space(
|
||||
const MargaretBufAllocator* self, const MargaretSubbuf* allocation){
|
||||
U64 occ_start = allocation->start;
|
||||
U64 occ_taken_size = allocation->len;
|
||||
|
||||
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&allocation->block->occupants, allocation->start);
|
||||
if (next_occ_it != 0) {
|
||||
U64 next_occ_start;
|
||||
U64 next_occ_taken_size;
|
||||
BufRBTree_MapU64ToU64_at_iter(&allocation->block->occupants, next_occ_it, &next_occ_start, &next_occ_taken_size);
|
||||
assert(occ_start + occ_taken_size <= next_occ_start);
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
|
||||
}
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = allocation->block->capacity - (occ_start + occ_taken_size)};
|
||||
}
|
||||
|
||||
|
||||
void MargaretBufAllocator_drop(MargaretBufAllocator self){
|
||||
for (ListNodeMargaretBufAllocatorOneBlock* bi = self.blocks.first; bi; bi = bi->next) {
|
||||
vkDestroyBuffer(self.device, bi->el.buf_hand, NULL);
|
||||
vkFreeMemory(self.device, bi->el.mem_hand, NULL);
|
||||
}
|
||||
ListMargaretBufAllocatorOneBlock_drop(self.blocks);
|
||||
BufRBTreeByLen_SetMargaretBAFreeSegment_drop(self.mem_free_space);
|
||||
}
|
||||
|
||||
/* Free one subbuffer, not a whole MBA :) */
|
||||
void MargaretBufAllocator_free(MargaretBufAllocator* self, MargaretSubbuf allocation){
|
||||
U64Segment left_free_space = MargaretBufAllocator__get_left_free_space(self, &allocation);
|
||||
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, &allocation);
|
||||
|
||||
MargaretBufAllocator__erase_gap(self, allocation.block, left_free_space.start, left_free_space.len);
|
||||
MargaretBufAllocator__erase_gap(self, allocation.block, right_free_space.start, right_free_space.len);
|
||||
MargaretBufAllocator__insert_gap(self, allocation.block,
|
||||
left_free_space.start,
|
||||
right_free_space.start + right_free_space.len - left_free_space.start);
|
||||
}
|
||||
|
||||
/* Idk how to hide this monster */
|
||||
void MargaretBufAllocator_debug(const MargaretBufAllocator* self){
|
||||
printf(" ======== MargaretBufAllocator state ======== \n");
|
||||
int n_segments = (int)self->mem_free_space.el.len;
|
||||
printf("Blocks:\n");
|
||||
for (ListNodeMargaretBufAllocatorOneBlock* block_it = self->blocks.first; block_it; block_it = block_it->next) {
|
||||
U64 free_space_acc_segs = 0;
|
||||
U64 occ_space_acc_occ = 0;
|
||||
MargaretBufAllocatorOneBlock* block = &block_it->el;
|
||||
int n_occupants = (int)block->occupants.el.len;
|
||||
printf("-*- occupied: %lu/%lu, occupants: %d\n", block->occupation_counter, block->capacity, n_occupants);
|
||||
for (int si = 0; si < n_segments; si++) {
|
||||
MargaretBAFreeSegment fseg = self->mem_free_space.el.buf[si];
|
||||
if (fseg.block == block) {
|
||||
assert(fseg.start + fseg.len <= block->capacity);
|
||||
free_space_acc_segs += fseg.len;
|
||||
}
|
||||
}
|
||||
for (int oi = 0; oi < n_occupants; oi++) {
|
||||
KVPU64ToU64 occ = block->occupants.el.buf[oi];
|
||||
assert(occ.key + occ.value <= block->capacity);
|
||||
occ_space_acc_occ += occ.value;
|
||||
for (int sc = 0; sc < n_occupants; sc++) {
|
||||
KVPU64ToU64 occ2 = block->occupants.el.buf[sc];
|
||||
if (sc != oi) {
|
||||
assert(occ.key + occ.value <= occ2.key || occ2.key + occ2.value <= occ.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
assert(free_space_acc_segs == block->capacity - block->occupation_counter);
|
||||
assert(occ_space_acc_occ == block->occupation_counter);
|
||||
}
|
||||
}
|
||||
|
||||
NODISCARD MargaretSubbuf MargaretBufAllocator_alloc(MargaretBufAllocator* self, U64 req_size){
|
||||
req_size = margaret_bump_buffer_size_to_alignment(req_size, self->alignment_exp);
|
||||
|
||||
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
|
||||
};
|
||||
VkPhysicalDeviceProperties2 properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
|
||||
.pNext = &maintenance3_properties,
|
||||
};
|
||||
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
|
||||
|
||||
check(req_size <= maintenance3_properties.maxMemoryAllocationSize);
|
||||
OptionMargaretBAFreeSegment free_gap = MargaretBufAllocator__search_gap(self, req_size);
|
||||
if (free_gap.variant == Option_None) {
|
||||
assert(self->blocks.first != NULL);
|
||||
U64 pitch = self->blocks.first->el.capacity;
|
||||
// Old blocks remain intact
|
||||
U64 new_capacity = MAX_U64(req_size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
|
||||
MargaretBufAllocator__add_block(self, new_capacity);
|
||||
MargaretBufAllocatorOneBlock* new_block = &self->blocks.first->el;
|
||||
MargaretBufAllocator__insert_gap(self, new_block, req_size, new_capacity - req_size);
|
||||
new_block->occupation_counter = req_size;
|
||||
bool iret = BufRBTree_MapU64ToU64_insert(&new_block->occupants, 0, req_size);
|
||||
assert(iret);
|
||||
return (MargaretSubbuf){.block = &self->blocks.first->el, 0, req_size};
|
||||
}
|
||||
MargaretBufAllocator__put_buf_to_a_gap(self, free_gap.some, req_size);
|
||||
return (MargaretSubbuf){.block = free_gap.some.block, .start = free_gap.some.start, req_size};
|
||||
}
|
||||
|
||||
void MargaretBufAllocator_shrink(MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 smaller_size){
|
||||
smaller_size = margaret_bump_buffer_size_to_alignment(smaller_size, self->alignment_exp);
|
||||
assert(smaller_size > 0);
|
||||
assert(smaller_size <= allocation->len);
|
||||
|
||||
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
|
||||
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
|
||||
MargaretBufAllocator__insert_gap(self, allocation->block,
|
||||
allocation->start + smaller_size,
|
||||
right_free_space.len + (allocation->len - smaller_size));
|
||||
|
||||
allocation->len = smaller_size;
|
||||
}
|
||||
|
||||
/* It actually may returns a 'null-MargaretBuf-allocation' : if return value .len field is zero it means
|
||||
* that expansion in-place was possible and the allocator argument was updated with a new size and nothing was returned.
|
||||
* But if ret value .len field is non-zero it means a valid MargaretSubbuf object was returned and the
|
||||
* `allocation` argument was untouched. It remains a valid object, you need to deallocate it yourself
|
||||
*/
|
||||
NODISCARD MargaretSubbuf MargaretBufAllocator_expand(MargaretBufAllocator* self, MargaretSubbuf* allocation, U64 bigger_size){
|
||||
bigger_size = margaret_bump_buffer_size_to_alignment(bigger_size, self->alignment_exp);
|
||||
|
||||
U64Segment right_free_space = MargaretBufAllocator__get_right_free_space(self, allocation);
|
||||
if (allocation->start + bigger_size > right_free_space.start + right_free_space.len){
|
||||
return MargaretBufAllocator_alloc(self, bigger_size);
|
||||
}
|
||||
MargaretBufAllocator__erase_gap(self, allocation->block, right_free_space.start, right_free_space.len);
|
||||
MargaretBufAllocator__insert_gap(self, allocation->block,
|
||||
allocation->start + bigger_size,
|
||||
right_free_space.len + (allocation->len - bigger_size));
|
||||
|
||||
allocation->len = bigger_size;
|
||||
return (MargaretSubbuf){0};
|
||||
}
|
||||
|
||||
char* MargaretSubbuf_get_mapped(const MargaretSubbuf* allocation){
|
||||
assert(allocation->block->mapped_memory);
|
||||
assert(allocation->start + allocation->len <= allocation->block->capacity);
|
||||
return (char*)allocation->block->mapped_memory + allocation->start;
|
||||
}
|
||||
|
||||
VkBuffer MargaretSubbuf_get_buffer(const MargaretSubbuf* allocation){
|
||||
assert(allocation->start + allocation->len <= allocation->block->capacity);
|
||||
return allocation->block->buf_hand;
|
||||
}
|
||||
521
src/l2/margaret/vulkan_images_claire.h
Normal file
521
src/l2/margaret/vulkan_images_claire.h
Normal file
@ -0,0 +1,521 @@
|
||||
/* This is a Claire header. Do not include it in more that one place.
|
||||
* This Claire requires vulkan api:
|
||||
*
|
||||
*
|
||||
* typedef integer VkResult
|
||||
*
|
||||
* const VkResult VK_SUCCESS
|
||||
*
|
||||
* typedef integer VkStructureType
|
||||
*
|
||||
* const VkStructureType VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
|
||||
* const VkStructureType VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
|
||||
* const VkStructureType VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO
|
||||
*
|
||||
* typedef integer VkBufferCreateFlags
|
||||
* typedef integer VkDeviceSize
|
||||
* typedef integer VkBufferUsageFlags
|
||||
* typedef integer VkSharingMode
|
||||
*
|
||||
* const VkSharingMode VK_SHARING_MODE_EXCLUSIVE
|
||||
*
|
||||
*
|
||||
* typedef handler VkPhysicalDevice
|
||||
* typedef handler VkDevice
|
||||
* typedef handler VkBuffer
|
||||
* typedef handler VkImage
|
||||
* typedef handler VkDeviceMemory
|
||||
* typedef handler VkCommandBuffer
|
||||
*
|
||||
* typedef struct {
|
||||
* VkStructureType sType;
|
||||
* const void* pNext;
|
||||
* VkBufferCreateFlags flags;
|
||||
* VkDeviceSize size;
|
||||
* VkBufferUsageFlags usage;
|
||||
* VkSharingMode sharingMode;
|
||||
* uint32_t queueFamilyIndexCount;
|
||||
* const uint32_t* pQueueFamilyIndices;
|
||||
* } VkBufferCreateInfo
|
||||
*
|
||||
* typedef integer VkMemoryPropertyFlags
|
||||
*
|
||||
* const VkMemoryPropertyFlags VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
|
||||
*
|
||||
* typedef struct {
|
||||
* VkMemoryPropertyFlags propertyFlags;
|
||||
* ...
|
||||
* } VkMemoryType
|
||||
*
|
||||
* #define VK_MAX_MEMORY_TYPES 32
|
||||
*
|
||||
* typedef struct {
|
||||
* uint32_t memoryTypeCount;
|
||||
* VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES];
|
||||
* ...
|
||||
* } VkPhysicalDeviceMemoryProperties
|
||||
*
|
||||
* void vkGetPhysicalDeviceMemoryProperties(
|
||||
* VkPhysicalDevice physicalDevice,
|
||||
* VkPhysicalDeviceMemoryProperties* pMemoryProperties)
|
||||
*
|
||||
* typedef void VkAllocationCallbacks
|
||||
* (seriously, this type is only used as a pointer in an exposed api function, and it always takes NULL value)
|
||||
*
|
||||
* VkResult vkCreateBuffer(
|
||||
* VkDevice device,
|
||||
* const VkBufferCreateInfo* pCreateInfo,
|
||||
* const VkAllocationCallbacks* pAllocator,
|
||||
* VkBuffer* pBuffer)
|
||||
*
|
||||
* typedef struct {
|
||||
* VkDeviceSize size;
|
||||
* VkDeviceSize alignment;
|
||||
* uint32_t memoryTypeBits;
|
||||
* } VkMemoryRequirements
|
||||
*
|
||||
* void vkGetBufferMemoryRequirements(
|
||||
* VkDevice device,
|
||||
* VkBuffer buffer,
|
||||
* VkMemoryRequirements* pMemoryRequirements)
|
||||
*
|
||||
* typedef integer VkImageCreateFlags
|
||||
* typedef integer VkImageType
|
||||
*
|
||||
* const VkImageType VK_IMAGE_TYPE_2D
|
||||
*
|
||||
* typedef integer VkFormat
|
||||
*
|
||||
* typedef struct {
|
||||
* uint32_t width;
|
||||
* uint32_t height;
|
||||
* uint32_t depth;
|
||||
* } VkExtent3D
|
||||
*
|
||||
* typedef integer VkSampleCountFlagBits
|
||||
*
|
||||
* const VkSampleCountFlagBits VK_SAMPLE_COUNT_1_BIT
|
||||
*
|
||||
* typedef integer VkImageTiling
|
||||
*
|
||||
* const VkImageTiling VK_IMAGE_TILING_LINEAR
|
||||
*
|
||||
* const VkImageTiling VK_IMAGE_TILING_OPTIMAL
|
||||
*
|
||||
* typedef integer VkImageUsageFlags
|
||||
* typedef integer VkImageLayout
|
||||
*
|
||||
* const VkImageLayout VK_IMAGE_LAYOUT_UNDEFINED
|
||||
*
|
||||
* typedef struct {
|
||||
* VkStructureType sType;
|
||||
* const void* pNext;
|
||||
* VkImageCreateFlags flags;
|
||||
* VkImageType imageType;
|
||||
* VkFormat format;
|
||||
* VkExtent3D extent;
|
||||
* uint32_t mipLevels;
|
||||
* uint32_t arrayLayers;
|
||||
* VkSampleCountFlagBits samples;
|
||||
* VkImageTiling tiling;
|
||||
* VkImageUsageFlags usage;
|
||||
* VkSharingMode sharingMode;
|
||||
* uint32_t queueFamilyIndexCount;
|
||||
* const uint32_t* pQueueFamilyIndices;
|
||||
* VkImageLayout initialLayout;
|
||||
* } VkImageCreateInfo
|
||||
*
|
||||
* VkResult vkCreateImage(
|
||||
* VkDevice device,
|
||||
* const VkImageCreateInfo* pCreateInfo,
|
||||
* const VkAllocationCallbacks* pAllocator,
|
||||
* VkImage* pImage)
|
||||
*
|
||||
* void vkGetImageMemoryRequirements(
|
||||
* VkDevice device,
|
||||
* VkImage image,
|
||||
* VkMemoryRequirements* pMemoryRequirements)
|
||||
*
|
||||
* typedef struct {
|
||||
* VkStructureType sType;
|
||||
* const void* pNext;
|
||||
* VkDeviceSize allocationSize;
|
||||
* uint32_t memoryTypeIndex;
|
||||
* } VkMemoryAllocateInfo
|
||||
*
|
||||
* VkResult vkAllocateMemory(
|
||||
* VkDevice device,
|
||||
* const VkMemoryAllocateInfo* pAllocateInfo,
|
||||
* const VkAllocationCallbacks* pAllocator,
|
||||
* VkDeviceMemory* pMemory)
|
||||
*
|
||||
* VkResult vkBindBufferMemory(
|
||||
* VkDevice device,
|
||||
* VkBuffer buffer,
|
||||
* VkDeviceMemory memory,
|
||||
* VkDeviceSize memoryOffset)
|
||||
*
|
||||
* VkResult vkBindImageMemory(
|
||||
* VkDevice device,
|
||||
* VkImage image,
|
||||
* VkDeviceMemory memory,
|
||||
* VkDeviceSize memoryOffset)
|
||||
*
|
||||
* void vkDestroyBuffer(
|
||||
* VkDevice device,
|
||||
* VkBuffer buffer,
|
||||
* const VkAllocationCallbacks* pAllocator)
|
||||
*
|
||||
* void vkDestroyImage(
|
||||
* VkDevice device,
|
||||
* VkImage image,
|
||||
* const VkAllocationCallbacks* pAllocator)
|
||||
*/
|
||||
|
||||
// todo: get rid of this whole VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT crap. MargaretMA is for non-host-visible
|
||||
// todo: for staging buffers you better use MargaretBufferAllocator. Ou, yeah, I have yet to write them
|
||||
|
||||
|
||||
// todo: fucking rewrite all of this. Yes, I want all of this shit rewritten
|
||||
|
||||
#include "../../l1/core/util.h"
|
||||
#include "../../l1_5/core/buff_rb_tree_node.h"
|
||||
#include "../../../gen/l1_5/BufRBTree_MapU64ToU64.h"
|
||||
|
||||
typedef struct{
|
||||
U64 block;
|
||||
U64 start;
|
||||
U64 len;
|
||||
} MargaretIAFreeSegment;
|
||||
|
||||
#include "../../l1/core/uint_segments.h"
|
||||
// todo: substitute U64Segment_get_length_resp_alignment by my own function
|
||||
bool MargaretIAFreeSegment_less_resp_align(const MargaretIAFreeSegment* A, const MargaretIAFreeSegment* B, U8 alignment_exp){
|
||||
U64 A_len = U64Segment_get_length_resp_alignment((U64Segment){A->start, A->len}, alignment_exp);
|
||||
U64 B_len = U64Segment_get_length_resp_alignment((U64Segment){B->start, B->len}, alignment_exp);
|
||||
if (A_len == B_len) {
|
||||
if (A->block == B->block) {
|
||||
return A->start < B->start;
|
||||
}
|
||||
return A->block < B->block;
|
||||
}
|
||||
return A_len < B_len;
|
||||
}
|
||||
|
||||
|
||||
/* Does not include all parameters needed for relocation. Because relocation is needed only
|
||||
* during controlled defragmentation */
|
||||
typedef struct {
|
||||
U64 block;
|
||||
VkImage image;
|
||||
U64 start;
|
||||
} MargaretImgAllocation;
|
||||
|
||||
/* Not primitive */
|
||||
typedef struct {
|
||||
BufRBTree_MapU64ToU64 images;
|
||||
U64 capacity;
|
||||
U64 occupation_counter;
|
||||
VkDeviceMemory mem_hand;
|
||||
void* mapped_memory;
|
||||
} MargaretImgAllocatorOneBlock;
|
||||
|
||||
void MargaretImgAllocatorOneBlock_drop(MargaretImgAllocatorOneBlock self){
|
||||
BufRBTree_MapU64ToU64_drop(self.images);
|
||||
}
|
||||
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretImgAllocatorOneBlock.h"
|
||||
|
||||
#include "../../../gen/l1/VecAndSpan_U8.h"
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretIAFreeSegment.h"
|
||||
#include "../../../gen/l1/eve/margaret/OptionMargaretIAFreeSegment.h"
|
||||
#include "../../../gen/l1_5/eve/margaret/BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment.h"
|
||||
#include "../../../gen/l1/eve/margaret/OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment.h"
|
||||
|
||||
#define MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP 28
|
||||
|
||||
/* Superstructure for managing free segments of memory of some type in ALL BLOCKS */
|
||||
typedef struct {
|
||||
OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment free_space_in_memory[MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP];
|
||||
VecU8 set_present;
|
||||
} MargaretMemFreeSpaceManager;
|
||||
|
||||
MargaretMemFreeSpaceManager MargaretMemFreeSpaceManager_new(){
|
||||
MargaretMemFreeSpaceManager res = {.set_present = VecU8_new_zeroinit(1)};
|
||||
res.set_present.buf[0] = 3;
|
||||
for (U8 algn = 0; algn < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; algn++)
|
||||
res.free_space_in_memory[algn] = None_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment();
|
||||
res.free_space_in_memory[3] = Some_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment(
|
||||
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_new_reserved(3, 1));
|
||||
return res;
|
||||
}
|
||||
|
||||
void MargaretMemFreeSpaceManager_drop(MargaretMemFreeSpaceManager self){
|
||||
for (U8 alignment_exp = 0; alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP; alignment_exp++)
|
||||
OptionBufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_drop(self.free_space_in_memory[alignment_exp]);
|
||||
VecU8_drop(self.set_present);
|
||||
}
|
||||
|
||||
void MargaretMemFreeSpaceManager_erase(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
|
||||
if (len == 0)
|
||||
return;
|
||||
assert(man->set_present.len > 0);
|
||||
for (size_t aj = 0; aj < man->set_present.len; aj++) {
|
||||
U8 alignment = man->set_present.buf[aj];
|
||||
assert(alignment < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
|
||||
assert(man->free_space_in_memory[alignment].variant == Option_Some);
|
||||
bool eret = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_erase(&
|
||||
man->free_space_in_memory[alignment].some,
|
||||
&(MargaretIAFreeSegment){.block = block, .start = start, .len = len});
|
||||
assert(eret);
|
||||
}
|
||||
}
|
||||
|
||||
void MargaretMemFreeSpaceManager_insert(MargaretMemFreeSpaceManager* man, U64 block, U64 start, U64 len){
|
||||
if (len == 0)
|
||||
return;
|
||||
assert(man->set_present.len > 0); /* MargaretMemFreeSpaceManager will do that for us with 2^3 */
|
||||
for (size_t aj = 0; aj < man->set_present.len; aj++) {
|
||||
U8 alignment = man->set_present.buf[aj];
|
||||
assert(alignment < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
|
||||
assert(man->free_space_in_memory[alignment].variant == Option_Some);
|
||||
bool iret = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_insert(&
|
||||
man->free_space_in_memory[alignment].some, (MargaretIAFreeSegment){.block = block, .start = start, .len = len});
|
||||
assert(iret);
|
||||
}
|
||||
}
|
||||
|
||||
OptionMargaretIAFreeSegment MargaretMemFreeSpaceManager_search(
|
||||
MargaretMemFreeSpaceManager* man, U8 alignment_exp, U64 req_size) {
|
||||
check(alignment_exp < MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP);
|
||||
if (man->free_space_in_memory[alignment_exp].variant == Option_None) {
|
||||
assert(man->set_present.len > 0);
|
||||
assert(man->free_space_in_memory[man->set_present.buf[0]].variant == Option_Some);
|
||||
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment* have = &man->free_space_in_memory[man->set_present.buf[0]].some;
|
||||
man->free_space_in_memory[alignment_exp] = Some_BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment(
|
||||
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_new_reserved(alignment_exp, have->el.len));
|
||||
for (size_t i = 0; i < have->el.len; i++) {
|
||||
BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_insert(
|
||||
&man->free_space_in_memory[alignment_exp].some, *VecMargaretIAFreeSegment_at(&have->el, i));
|
||||
}
|
||||
}
|
||||
assert(man->free_space_in_memory[alignment_exp].variant == Option_Some);
|
||||
U64 sit = BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_find_min_grtr_or_eq(
|
||||
&man->free_space_in_memory[alignment_exp].some, &(MargaretIAFreeSegment){.len = req_size,});
|
||||
if (sit == 0)
|
||||
return None_MargaretIAFreeSegment();
|
||||
return Some_MargaretIAFreeSegment(*BufRBTreeByLenRespAlign_SetMargaretIAFreeSegment_at_iter(
|
||||
&man->free_space_in_memory[alignment_exp].some, sit));
|
||||
}
|
||||
|
||||
/* VkDevice and VkPhysicalDevice stay remembered here. Don't forget that, please */
|
||||
typedef struct {
|
||||
VecMargaretImgAllocatorOneBlock blocks;
|
||||
MargaretMemFreeSpaceManager mem_free_space;
|
||||
VkDevice device;
|
||||
VkPhysicalDevice physical_device;
|
||||
U8 memory_type_id;
|
||||
} MargaretImgAllocator;
|
||||
|
||||
void MargaretImgAllocator__erase_gap(MargaretImgAllocator* self, U64 block_id, U64 start, U64 len){
|
||||
MargaretMemFreeSpaceManager_erase(&self->mem_free_space, block_id, start, len);
|
||||
MargaretImgAllocatorOneBlock* BLOCK = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, block_id);
|
||||
BLOCK->occupation_counter += len;
|
||||
assert(BLOCK->occupation_counter <= BLOCK->capacity);
|
||||
}
|
||||
|
||||
void MargaretImgAllocator__insert_gap(MargaretImgAllocator* self, U64 block_id, U64 start, U64 len){
|
||||
MargaretMemFreeSpaceManager_insert(&self->mem_free_space, block_id, start, len);
|
||||
MargaretImgAllocatorOneBlock* BLOCK = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, block_id);
|
||||
assert(len <= BLOCK->occupation_counter);
|
||||
BLOCK->occupation_counter -= len;
|
||||
}
|
||||
|
||||
void MargaretImgAllocator__add_block(MargaretImgAllocator* self, U64 capacity){
|
||||
VkDeviceMemory memory;
|
||||
check(vkAllocateMemory(self->device, &(VkMemoryAllocateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
||||
.allocationSize = capacity, .memoryTypeIndex = self->memory_type_id
|
||||
}, NULL, &memory) == VK_SUCCESS);
|
||||
VecMargaretImgAllocatorOneBlock_append(&self->blocks, (MargaretImgAllocatorOneBlock){
|
||||
.images = BufRBTree_MapU64ToU64_new_reserved(1),
|
||||
.capacity = capacity,
|
||||
.occupation_counter = capacity,
|
||||
.mem_hand = memory,
|
||||
.mapped_memory = NULL /* not supported */});
|
||||
}
|
||||
|
||||
MargaretImgAllocator MargaretImgAllocator_new(
|
||||
VkDevice device, VkPhysicalDevice physical_device, U8 memory_type_id, U64 initial_block_size
|
||||
){
|
||||
MargaretImgAllocator self = {
|
||||
.blocks = VecMargaretImgAllocatorOneBlock_new(),
|
||||
.mem_free_space = MargaretMemFreeSpaceManager_new(),
|
||||
.device = device,
|
||||
.physical_device = physical_device,
|
||||
.memory_type_id = memory_type_id,
|
||||
};
|
||||
MargaretImgAllocator__add_block(&self, initial_block_size);
|
||||
MargaretImgAllocator__insert_gap(&self, 0, 0, initial_block_size);
|
||||
return self;
|
||||
}
|
||||
|
||||
U64 margaret_get_alignment_left_padding(U64 unaligned_start, U8 alignment_exp){
|
||||
U64 hit = unaligned_start & (1ull << alignment_exp) - 1;
|
||||
return (hit ? (1ull << alignment_exp) - hit : 0);
|
||||
}
|
||||
|
||||
U64 MargaretImgAllocator__add_img_given_gap(
|
||||
MargaretImgAllocator* self, MargaretIAFreeSegment segment, U64 required_size, U8 alignment_exp
|
||||
){
|
||||
U64 gap_start = segment.start;
|
||||
U64 gap_len = segment.len;
|
||||
|
||||
U64 af = margaret_get_alignment_left_padding(gap_start, alignment_exp);
|
||||
U64 aligned_start = gap_start + af;
|
||||
assert(aligned_start + required_size <= gap_start + gap_len);
|
||||
MargaretImgAllocator__erase_gap(self, segment.block, gap_start, gap_len);
|
||||
MargaretImgAllocator__insert_gap(self, segment.block, gap_start, af);
|
||||
MargaretImgAllocator__insert_gap(self, segment.block, aligned_start + required_size,
|
||||
gap_start + gap_len - (aligned_start + required_size));
|
||||
|
||||
BufRBTree_MapU64ToU64* images = &VecMargaretImgAllocatorOneBlock_mat(&self->blocks, segment.block)->images;
|
||||
bool iret = BufRBTree_MapU64ToU64_insert(images, aligned_start, required_size);
|
||||
assert(iret);
|
||||
return aligned_start;
|
||||
}
|
||||
|
||||
U64Segment MargaretImgAllocator__get_left_free_space(
|
||||
const MargaretImgAllocator* self, MargaretImgAllocation allocation){
|
||||
const MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_at(&self->blocks, allocation.block);
|
||||
U64 occ_start = allocation.start;
|
||||
|
||||
U64 prev_occ_it = BufRBTree_MapU64ToU64_find_max_less(&block->images, allocation.start);
|
||||
if (prev_occ_it != 0) {
|
||||
U64 prev_occ_start;
|
||||
U64 prev_occ_taken_size;
|
||||
BufRBTree_MapU64ToU64_at_iter(&block->images, prev_occ_it, &prev_occ_start, &prev_occ_taken_size);
|
||||
|
||||
assert(prev_occ_start + prev_occ_taken_size <= occ_start);
|
||||
return (U64Segment){
|
||||
.start = prev_occ_start + prev_occ_taken_size,
|
||||
.len = occ_start - (prev_occ_start + prev_occ_taken_size)};
|
||||
}
|
||||
return (U64Segment){.start = 0, .len = occ_start};
|
||||
}
|
||||
|
||||
U64Segment MargaretImgAllocator__get_right_free_space(
|
||||
const MargaretImgAllocator* self, MargaretImgAllocation allocation){
|
||||
const MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_at(&self->blocks, allocation.block);
|
||||
U64 occ_start = allocation.start;
|
||||
VkMemoryRequirements occ_memory_requirements;
|
||||
vkGetImageMemoryRequirements(self->device, allocation.image, &occ_memory_requirements);
|
||||
U64 occ_taken_size = occ_memory_requirements.size;
|
||||
|
||||
U64 next_occ_it = BufRBTree_MapU64ToU64_find_min_grtr(&block->images, allocation.start);
|
||||
if (next_occ_it != 0) {
|
||||
U64 next_occ_start;
|
||||
U64 next_occ_taken_size;
|
||||
BufRBTree_MapU64ToU64_at_iter(&block->images, next_occ_it, &next_occ_start, &next_occ_taken_size);
|
||||
assert(occ_start + occ_taken_size <= next_occ_start);
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = next_occ_start - (occ_start + occ_taken_size)};
|
||||
}
|
||||
return (U64Segment){.start = occ_start + occ_taken_size, .len = block->capacity - (occ_start + occ_taken_size)};
|
||||
}
|
||||
|
||||
void MargaretImgAllocator_drop(MargaretImgAllocator self){
|
||||
for (size_t bi = 0; bi < self.blocks.len; bi++) {
|
||||
vkFreeMemory(self.device, self.blocks.buf[bi].mem_hand, NULL);
|
||||
}
|
||||
VecMargaretImgAllocatorOneBlock_drop(self.blocks);
|
||||
MargaretMemFreeSpaceManager_drop(self.mem_free_space);
|
||||
}
|
||||
|
||||
void MargaretImgAllocator_free(MargaretImgAllocator* self, MargaretImgAllocation allocation){
|
||||
U64Segment left_free_space = MargaretImgAllocator__get_left_free_space(self, allocation);
|
||||
U64Segment right_free_space = MargaretImgAllocator__get_right_free_space(self, allocation);
|
||||
|
||||
vkDestroyImage(self->device, allocation.image, NULL);
|
||||
|
||||
MargaretImgAllocator__erase_gap(self, allocation.block, left_free_space.start, left_free_space.len);
|
||||
MargaretImgAllocator__erase_gap(self, allocation.block, right_free_space.start, right_free_space.len);
|
||||
MargaretImgAllocator__insert_gap(self, allocation.block,
|
||||
left_free_space.start,
|
||||
right_free_space.start + right_free_space.len - left_free_space.start);
|
||||
}
|
||||
|
||||
NODISCARD MargaretImgAllocation MargaretImgAllocator__alloc(
|
||||
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
|
||||
VkImageUsageFlags usage_flags
|
||||
){
|
||||
VkPhysicalDeviceMaintenance3Properties maintenance3_properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES,
|
||||
};
|
||||
VkPhysicalDeviceProperties2 properties = {
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
|
||||
.pNext = &maintenance3_properties,
|
||||
};
|
||||
vkGetPhysicalDeviceProperties2(self->physical_device, &properties);
|
||||
|
||||
|
||||
VkImage fresh_img;
|
||||
check(vkCreateImage(self->device, &(VkImageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
||||
.imageType = VK_IMAGE_TYPE_2D,
|
||||
.format = format,
|
||||
.extent = (VkExtent3D){.width = width, .height = height,.depth = 1,},
|
||||
.mipLevels = 1,
|
||||
.arrayLayers = 1,
|
||||
.samples = VK_SAMPLE_COUNT_1_BIT,
|
||||
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
||||
.usage = usage_flags,
|
||||
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
||||
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
||||
}, NULL, &fresh_img) == VK_SUCCESS);
|
||||
VkMemoryRequirements mem_requirements;
|
||||
vkGetImageMemoryRequirements(self->device, fresh_img, &mem_requirements);
|
||||
|
||||
check(U64_is_2pow(mem_requirements.alignment));
|
||||
U8 alignment_exp = U64_2pow_log(mem_requirements.alignment);
|
||||
check(mem_requirements.size <= maintenance3_properties.maxMemoryAllocationSize);
|
||||
OptionMargaretIAFreeSegment free_gap =
|
||||
MargaretMemFreeSpaceManager_search(&self->mem_free_space, alignment_exp, mem_requirements.size);
|
||||
|
||||
if (free_gap.variant == Option_None) {
|
||||
assert(self->blocks.len > 0);
|
||||
U64 pitch = self->blocks.buf[self->blocks.len - 1].capacity;
|
||||
// Old blocks remain intact
|
||||
U64 new_capacity = MAX_U64(mem_requirements.size, MIN_U64(2 * pitch, maintenance3_properties.maxMemoryAllocationSize));
|
||||
MargaretImgAllocator__add_block(self, new_capacity);
|
||||
U64 bid = self->blocks.len - 1;
|
||||
MargaretImgAllocator__insert_gap(self, bid, mem_requirements.size, new_capacity - mem_requirements.size);
|
||||
MargaretImgAllocatorOneBlock* block = VecMargaretImgAllocatorOneBlock_mat(&self->blocks, bid);
|
||||
block->occupation_counter = mem_requirements.size;
|
||||
bool iret = BufRBTree_MapU64ToU64_insert(&block->images, 0, mem_requirements.size);
|
||||
assert(iret);
|
||||
check(vkBindImageMemory(self->device, fresh_img, block->mem_hand, 0) == VK_SUCCESS);
|
||||
return (MargaretImgAllocation){.block = bid, fresh_img, 0};
|
||||
}
|
||||
U64 aligned_pos = MargaretImgAllocator__add_img_given_gap(self, free_gap.some, mem_requirements.size, alignment_exp);
|
||||
VkDeviceMemory memory = VecMargaretImgAllocatorOneBlock_at(&self->blocks, free_gap.some.block)->mem_hand;
|
||||
check(vkBindImageMemory(self->device, fresh_img, memory, aligned_pos) == VK_SUCCESS);
|
||||
return (MargaretImgAllocation){.block = free_gap.some.block, .image = fresh_img, .start = aligned_pos};
|
||||
}
|
||||
|
||||
typedef struct{
|
||||
MargaretImgAllocation a;
|
||||
U64 width;
|
||||
U64 height;
|
||||
VkFormat format;
|
||||
VkImageUsageFlags usage_flags;
|
||||
VkImageLayout current_layout;
|
||||
} MargaretImg;
|
||||
|
||||
NODISCARD MargaretImg MargaretImgAllocator_alloc(
|
||||
MargaretImgAllocator* self, U64 width, U64 height, VkFormat format,
|
||||
VkImageUsageFlags usage_flags
|
||||
){
|
||||
return (MargaretImg){.a = MargaretImgAllocator__alloc(self, width, height, format, usage_flags),
|
||||
.width = width, .height = height, .format = format, .usage_flags = usage_flags,
|
||||
.current_layout = VK_IMAGE_LAYOUT_UNDEFINED};
|
||||
}
|
||||
@ -2,6 +2,7 @@
|
||||
#define prototype1_src_l2_margaret_vulkan_memory_h
|
||||
|
||||
#include <vulkan/vulkan.h>
|
||||
#include "vulkan_memory_claire.h"
|
||||
#include "vulkan_images_claire.h"
|
||||
#include "vulkan_buffer_claire.h"
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
@ -329,6 +329,7 @@ VkDevice margaret_create_logical_device(VkPhysicalDevice physical_device, Margar
|
||||
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
|
||||
.pNext = (void*)&used_synchronization2_features,
|
||||
.features = (VkPhysicalDeviceFeatures) {
|
||||
.geometryShader = true,
|
||||
.samplerAnisotropy = physical_features.samplerAnisotropy,
|
||||
},
|
||||
};
|
||||
@ -565,7 +566,7 @@ MargaretScoredPhysicalDevice margaret_score_physical_device(
|
||||
else if (properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU)
|
||||
score += 100;
|
||||
if (!features2.features.geometryShader)
|
||||
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No geometry shader")};
|
||||
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No geometry shaders")};
|
||||
if (!synchronization2_features.synchronization2)
|
||||
return (MargaretScoredPhysicalDevice){dev, -1, cstr("No synchronization2")};
|
||||
if (features2.features.samplerAnisotropy)
|
||||
@ -997,23 +998,10 @@ void margaret_end_command_buffer(VkCommandBuffer command_buffer){
|
||||
check(vkEndCommandBuffer(command_buffer) == VK_SUCCESS);
|
||||
}
|
||||
|
||||
VkPipelineShaderStageCreateInfo margaret_shader_stage_vertex_crinfo(VkShaderModule module) {
|
||||
return (VkPipelineShaderStageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .module = module,
|
||||
.stage = VK_SHADER_STAGE_VERTEX_BIT, .pName = "main",
|
||||
};
|
||||
}
|
||||
|
||||
VkPipelineShaderStageCreateInfo margaret_shader_stage_fragment_crinfo(VkShaderModule module) {
|
||||
return (VkPipelineShaderStageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .module = module,
|
||||
.stage = VK_SHADER_STAGE_FRAGMENT_BIT, .pName = "main",
|
||||
};
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
VkPipelineLayout pipeline_layout;
|
||||
VecU8 vertex_shader_code;
|
||||
VecU8 geometry_shader_code;
|
||||
VecU8 fragment_shader_code;
|
||||
U32 vertexBindingDescriptionCount;
|
||||
VkVertexInputBindingDescription* pVertexBindingDescriptions;
|
||||
@ -1028,18 +1016,32 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
|
||||
VkDevice device, VkRenderPass render_pass, U32 renderpass_subpass,
|
||||
MargaretMostImportantPipelineOptions op
|
||||
){
|
||||
|
||||
VkShaderModule vert_module = margaret_VkShaderModule_new(device, op.vertex_shader_code);
|
||||
VkShaderModule frag_module = margaret_VkShaderModule_new(device, op.fragment_shader_code);
|
||||
|
||||
VkPipelineShaderStageCreateInfo shader_modules[3] = {
|
||||
(VkPipelineShaderStageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.module = margaret_VkShaderModule_new(device, op.vertex_shader_code),
|
||||
.stage = VK_SHADER_STAGE_VERTEX_BIT, .pName = "main",
|
||||
},
|
||||
(VkPipelineShaderStageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.module = margaret_VkShaderModule_new(device, op.fragment_shader_code),
|
||||
.stage = VK_SHADER_STAGE_FRAGMENT_BIT, .pName = "main",
|
||||
},
|
||||
};
|
||||
U32 shader_modules_c = 2;
|
||||
if (op.geometry_shader_code.len > 0) {
|
||||
shader_modules[shader_modules_c] = (VkPipelineShaderStageCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.module = margaret_VkShaderModule_new(device, op.geometry_shader_code),
|
||||
.stage = VK_SHADER_STAGE_GEOMETRY_BIT, .pName = "main",
|
||||
};
|
||||
shader_modules_c++;
|
||||
}
|
||||
|
||||
VkGraphicsPipelineCreateInfo pipeline_crinfo = {
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.stageCount = 2,
|
||||
.pStages = (VkPipelineShaderStageCreateInfo[]){
|
||||
margaret_shader_stage_vertex_crinfo(vert_module),
|
||||
margaret_shader_stage_fragment_crinfo(frag_module)
|
||||
},
|
||||
.stageCount = shader_modules_c,
|
||||
.pStages = shader_modules,
|
||||
.pVertexInputState = &(VkPipelineVertexInputStateCreateInfo){
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.vertexBindingDescriptionCount = op.vertexBindingDescriptionCount,
|
||||
@ -1063,8 +1065,8 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
.depthClampEnable = VK_FALSE,
|
||||
.polygonMode = VK_POLYGON_MODE_FILL,
|
||||
// .cullMode = VK_CULL_MODE_BACK_BIT,
|
||||
.cullMode = VK_CULL_MODE_NONE,
|
||||
.cullMode = VK_CULL_MODE_BACK_BIT,
|
||||
// .cullMode = VK_CULL_MODE_NONE,
|
||||
.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
|
||||
.depthBiasEnable = VK_FALSE,
|
||||
.depthBiasConstantFactor = 0.0f,
|
||||
@ -1118,8 +1120,9 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
|
||||
VkPipeline pipeline;
|
||||
check(vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipeline_crinfo, NULL, &pipeline) == VK_SUCCESS);
|
||||
|
||||
vkDestroyShaderModule(device, frag_module, NULL);
|
||||
vkDestroyShaderModule(device, vert_module, NULL);
|
||||
for (U32 i = 0; i < shader_modules_c; i++) {
|
||||
vkDestroyShaderModule(device, shader_modules[i].module, NULL);
|
||||
}
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
@ -1127,15 +1130,99 @@ VkPipeline margaret_create_triangle_pipeline_one_attachment(
|
||||
|
||||
// todo: move image copying function here
|
||||
|
||||
// for users of memory that should be aware whether we are using two memory types or one
|
||||
typedef struct {
|
||||
VkDevice device;
|
||||
MargaretMemAllocator* host_visible_mem;
|
||||
MargaretMemAllocatorRequests* host_visible_mem_requests;
|
||||
MargaretMemAllocator* device_local_mem;
|
||||
MargaretMemAllocatorRequests* device_local_mem_requests;
|
||||
bool device_local_is_host_visible;
|
||||
VkPhysicalDevice physical_device;
|
||||
VkCommandBuffer transfer_cmd_buffer;
|
||||
MargaretImgAllocator* dev_local_images;
|
||||
MargaretBufAllocator* dev_local_buffers;
|
||||
MargaretBufAllocator* staging_buffers;
|
||||
VkDescriptorPool descriptor_pool;
|
||||
} MargaretEngineReference;
|
||||
|
||||
void margaret_rec_cmd_copy_buffer(
|
||||
VkCommandBuffer cmd_buf,
|
||||
const MargaretSubbuf* src_allocation, U64 src_offset,
|
||||
const MargaretSubbuf* dst_allocation, U64 dst_offset, U64 length){
|
||||
vkCmdCopyBuffer(cmd_buf,
|
||||
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
|
||||
1, &(VkBufferCopy){
|
||||
.srcOffset = src_allocation->start + src_offset, .dstOffset = dst_allocation->start + dst_offset,
|
||||
.size = length});
|
||||
}
|
||||
|
||||
void margaret_rec_cmd_copy_buffer_one_to_one_part(
|
||||
VkCommandBuffer cmd_buf,
|
||||
const MargaretSubbuf* src_allocation,
|
||||
const MargaretSubbuf* dst_allocation, U64 offset, U64 length){
|
||||
assert(offset + length <= src_allocation->len);
|
||||
assert(offset + length <= dst_allocation->len);
|
||||
vkCmdCopyBuffer(cmd_buf,
|
||||
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
|
||||
1, &(VkBufferCopy){
|
||||
.srcOffset = src_allocation->start + offset, .dstOffset = dst_allocation->start + offset, .size = length});
|
||||
}
|
||||
|
||||
void margaret_rec_cmd_copy_buffer_one_to_one(
|
||||
VkCommandBuffer cmd_buf, const MargaretSubbuf* src_allocation, const MargaretSubbuf* dst_allocation){
|
||||
U64 copying_len = MIN_U64(src_allocation->len, dst_allocation->len);
|
||||
vkCmdCopyBuffer(cmd_buf,
|
||||
MargaretSubbuf_get_buffer(src_allocation), MargaretSubbuf_get_buffer(dst_allocation),
|
||||
1, &(VkBufferCopy){
|
||||
.srcOffset = src_allocation->start, .dstOffset = dst_allocation->start, .size = copying_len});
|
||||
}
|
||||
|
||||
/* (destination_stage_mask, destination_access_mask) are probably
|
||||
* (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT) */
|
||||
void margaret_rec_cmd_copy_buffer_to_image_one_to_one_color_aspect(
|
||||
VkCommandBuffer cmd_buf, const MargaretSubbuf* src, MargaretImg* dst,
|
||||
VkImageLayout dst_new_layout,
|
||||
VkPipelineStageFlags destination_stage_mask, VkAccessFlags destination_access_mask){
|
||||
|
||||
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
0 /* Flags */, 0, NULL, 0, NULL, 1, &(VkImageMemoryBarrier){
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||
.srcAccessMask = 0,
|
||||
.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
||||
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = dst->a.image,
|
||||
.subresourceRange = (VkImageSubresourceRange){
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
|
||||
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
|
||||
},
|
||||
});
|
||||
|
||||
vkCmdCopyBufferToImage(cmd_buf, MargaretSubbuf_get_buffer(src), dst->a.image,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &(VkBufferImageCopy){
|
||||
.bufferOffset = src->start,
|
||||
.bufferRowLength = 0,
|
||||
.bufferImageHeight = 0,
|
||||
.imageSubresource = (VkImageSubresourceLayers){
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1,
|
||||
},
|
||||
.imageOffset = {0, 0, 0},
|
||||
.imageExtent = { .width = dst->width, .height = dst->height, .depth = 1 },
|
||||
});
|
||||
|
||||
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_TRANSFER_BIT, destination_stage_mask,
|
||||
0 /* Flags */, 0, NULL, 0, NULL, 1, &(VkImageMemoryBarrier){
|
||||
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
.dstAccessMask = destination_access_mask,
|
||||
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
.newLayout = dst_new_layout,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = dst->a.image,
|
||||
.subresourceRange = (VkImageSubresourceRange){
|
||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0,
|
||||
.levelCount = 1, .baseArrayLayer = 0, .layerCount = 1,
|
||||
},
|
||||
});
|
||||
dst->current_layout = dst_new_layout;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -91,5 +91,12 @@ vec3 marie_normal_from_tang_space_gradient(float delt_x, float delta_z) {
|
||||
return (vec3){-delt_x * N, N, -delta_z * N};
|
||||
}
|
||||
|
||||
mat4 marie_3d_scal_mat4(float scale){
|
||||
return mat4_new(scale, 0, 0, 0,
|
||||
0, scale, 0, 0,
|
||||
0, 0, scale, 0,
|
||||
0, 0, 0, 1);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
@ -361,7 +361,8 @@ void vkCmdCopyImage(
|
||||
const VkImageCopy* pRegions);
|
||||
|
||||
|
||||
#include "../../margaret/vulkan_memory_claire.h"
|
||||
// #include "../../margaret/vulkan_memory_claire.h"
|
||||
// #include "../../margaret/vulkan_me"
|
||||
|
||||
int main(){
|
||||
return 0;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -13,21 +13,29 @@
|
||||
typedef struct {
|
||||
vec3 pos;
|
||||
vec2 tex;
|
||||
} GenericMeshVertex;
|
||||
#include "../../../../gen/l1/eve/r0/VecAndSpan_GenericMeshVertex.h"
|
||||
} GenericMeshVertexInc;
|
||||
|
||||
#include "../../../../gen/l1/eve/r0/VecAndSpan_GenericMeshVertexInc.h"
|
||||
|
||||
typedef struct {
|
||||
VecGenericMeshVertex vertices;
|
||||
GenericMeshVertexInc base;
|
||||
vec3 norm;
|
||||
vec3 tang_U;
|
||||
vec3 tang_V;
|
||||
} GenericMeshVertex;
|
||||
|
||||
typedef struct {
|
||||
VecGenericMeshVertexInc vertices;
|
||||
VecU32 indexes;
|
||||
} GenericMeshTopology;
|
||||
|
||||
void GenericMeshTopology_drop(GenericMeshTopology self) {
|
||||
VecGenericMeshVertex_drop(self.vertices);
|
||||
VecGenericMeshVertexInc_drop(self.vertices);
|
||||
VecU32_drop(self.indexes);
|
||||
}
|
||||
|
||||
GenericMeshTopology GenericMeshTopology_clone(const GenericMeshTopology* self) {
|
||||
return (GenericMeshTopology){.vertices = VecGenericMeshVertex_clone(&self->vertices), .indexes = VecU32_clone(&self->indexes)};
|
||||
return (GenericMeshTopology){.vertices = VecGenericMeshVertexInc_clone(&self->vertices), .indexes = VecU32_clone(&self->indexes)};
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
@ -55,35 +63,50 @@ GenericMeshInSceneTemplate GenericMeshInSceneTemplate_clone(const GenericMeshInS
|
||||
|
||||
typedef struct {
|
||||
mat4 model_t;
|
||||
} GenericMeshInstanceInc;
|
||||
|
||||
typedef struct {
|
||||
GenericMeshInstanceInc base;
|
||||
mat3 normal_t;
|
||||
} GenericMeshInstance;
|
||||
|
||||
|
||||
typedef struct {
|
||||
vec3 pos;
|
||||
vec3 normal;
|
||||
} ShinyMeshVertex;
|
||||
#include "../../../../gen/l1/eve/r0/VecAndSpan_ShinyMeshVertex.h"
|
||||
} ShinyMeshVertexInc;
|
||||
|
||||
typedef struct {
|
||||
VecShinyMeshVertex vertices;
|
||||
ShinyMeshVertexInc base;
|
||||
vec3 normal;
|
||||
} ShinyMeshVertex;
|
||||
#include "../../../../gen/l1/eve/r0/VecAndSpan_ShinyMeshVertexInc.h"
|
||||
|
||||
typedef struct {
|
||||
VecShinyMeshVertexInc vertices;
|
||||
VecU32 indexes;
|
||||
} ShinyMeshTopology;
|
||||
|
||||
void ShinyMeshTopology_drop(ShinyMeshTopology self) {
|
||||
VecShinyMeshVertex_drop(self.vertices);
|
||||
VecShinyMeshVertexInc_drop(self.vertices);
|
||||
VecU32_drop(self.indexes);
|
||||
}
|
||||
|
||||
ShinyMeshTopology ShinyMeshTopology_clone(const ShinyMeshTopology* self) {
|
||||
return (ShinyMeshTopology){.vertices = VecShinyMeshVertex_clone(&self->vertices), VecU32_clone(&self->indexes)};
|
||||
return (ShinyMeshTopology){.vertices = VecShinyMeshVertexInc_clone(&self->vertices),
|
||||
VecU32_clone(&self->indexes)};
|
||||
}
|
||||
|
||||
#include "../../../../gen/l1/eve/r0/VecShinyMeshTopology.h"
|
||||
|
||||
typedef struct {
|
||||
typedef struct{
|
||||
mat4 model_t;
|
||||
vec3 color_off;
|
||||
vec3 color_on;
|
||||
} ShinyMeshInstanceInc;
|
||||
|
||||
typedef struct {
|
||||
ShinyMeshInstanceInc base;
|
||||
mat3 normal_t;
|
||||
} ShinyMeshInstance;
|
||||
|
||||
typedef struct {
|
||||
@ -169,52 +192,62 @@ GenericMeshTopology generate_one_fourth_of_a_cylinder(float w, float r, U32 k) {
|
||||
assert(k >= 1);
|
||||
const float a = M_PI_2f / (float)k;
|
||||
const float l = 2 * r * sinf(M_PI_4f / (float)k);
|
||||
const vec2 v0tex = {r / (2 * r + w), r / (2 * r + (float)k * l)};
|
||||
const vec2 v1tex = {(r + w) / (2 * r + w), r / (2 * r + (float)k * l)};
|
||||
const vec2 v2tex = {r / (2 * r + w), 2 * r / (2 * r + (float)k * l)};
|
||||
const vec2 v3tex = {(r + w) / (2 * r + w), 2 * r / (2 * r + (float)k * l)};
|
||||
VecGenericMeshVertex vertices = VecGenericMeshVertex_new_reserved(4 * k + 6);
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {0, 0, 0}, .tex = v0tex});
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {w, 0, 0}, .tex = v1tex});
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {0, r, 0}, .tex = v2tex});
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {w, r, 0}, .tex = v3tex});
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {0, 0, -r}, .tex = {r / (2 * r + w), 0}});
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){.pos = {w, 0, -r}, .tex = {(r + w) / (2 * r + w), 0}});
|
||||
for (U32 i = 1; i <= k; i++) {
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
|
||||
float tex_width = 2 * r + w;
|
||||
float tex_height = 2 * r + (float)k * l;
|
||||
|
||||
const vec2 v0tex = {r / tex_width, r / tex_height};
|
||||
const vec2 v1tex = {(r + w) / tex_width, r / tex_height};
|
||||
const vec2 v2tex = {r / tex_width, 2 * r / tex_height};
|
||||
const vec2 v3tex = {(r + w) / tex_width, 2 * r / tex_height};
|
||||
VecGenericMeshVertexInc vertices = VecGenericMeshVertexInc_new_reserved(8 + 4 * k + (k + 2) * 2);
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, 0}, .tex = v0tex});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, 0}, .tex = v1tex});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, r, 0}, .tex = v2tex});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, r, 0}, .tex = v3tex});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, 0}, .tex = v0tex});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, 0}, .tex = v1tex});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, -r}, .tex = {r / tex_width, 0}});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, -r}, .tex = {(r + w) / tex_width, 0}});
|
||||
|
||||
for (U32 i = 0; i < k; i++) {
|
||||
for (int j = 0; j < 2; j++) {
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
|
||||
.pos = {0, cosf(a * (float)(i + j)) * r, -sinf(a * (float)(i + j)) * r},
|
||||
.tex = {v2tex.x, v2tex.y + (float)(i + j) * l / tex_height}
|
||||
});
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
|
||||
.pos = {w, cosf(a * (float)(i + j)) * r, -sinf(a * (float)(i + j)) * r},
|
||||
.tex = {v3tex.x, v3tex.y + (float)(i + j) * l / tex_height}
|
||||
});
|
||||
}
|
||||
}
|
||||
assert(vertices.len == 8 + 4 * k);
|
||||
|
||||
for (U32 i = 0; i <= k; i++) {
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
|
||||
.pos = {0, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
|
||||
.tex = vec2_add_vec2(v0tex, (vec2){r / (2 * r + w) * -sinf(a * (float)i), r / (2*r + (float)k * l) * cosf(a * (float)i)})
|
||||
.tex = (vec2){ (r - r *sinf(a * (float)i)) / tex_width, (r + r * cosf(a * (float)i)) / tex_height},
|
||||
});
|
||||
}
|
||||
for (U32 i = 1; i <= k; i++) {
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {0, 0, 0}, .tex = v0tex});
|
||||
for (U32 i = 0; i <= k; i++) {
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){
|
||||
.pos = {w, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
|
||||
.tex = vec2_add_vec2(v1tex, (vec2){r / (2 * r + w) * sinf(a * (float)i), r / (2*r + (float)k * l) * cosf(a * (float)i)})
|
||||
.tex = (vec2){ (r + w + r * sinf(a * (float)i)) / tex_width, (r + r * cosf(a * (float)i)) / tex_height},
|
||||
});
|
||||
}
|
||||
for (U32 i = 1; i <= k; i++) {
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
|
||||
.pos = {0, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
|
||||
.tex = {v2tex.x, v2tex.y + (float)i * l / (2*r + (float)k * l)}
|
||||
});
|
||||
}
|
||||
for (U32 i = 1; i <= k; i++) {
|
||||
VecGenericMeshVertex_append(&vertices, (GenericMeshVertex){
|
||||
.pos = {w, cosf(a * (float)i) * r, -sinf(a * (float)i) * r},
|
||||
.tex = {v3tex.x, v3tex.y + (float)i * l / (2*r + (float)k * l)}
|
||||
});
|
||||
}
|
||||
VecU32 indexes = VecU32_new_reserved(3*(2+2+2*k+2*k));
|
||||
{
|
||||
U32 _span_0[] = {5, 1, 0, 5, 0, 4, 1, 3, 0, 3, 2, 0};
|
||||
VecU32_append_span(&indexes, (SpanU32){.data = _span_0, .len = ARRAY_SIZE(_span_0)});
|
||||
}
|
||||
for (U32 i = 1; i <= k; i++) {
|
||||
VecGenericMeshVertexInc_append(&vertices, (GenericMeshVertexInc){.pos = {w, 0, 0}, .tex = v1tex});
|
||||
assert(vertices.len == 8 + 4 * k + (k + 2) * 2);
|
||||
|
||||
VecU32 indexes = VecU32_new_reserved(3*(4+2*k+2*k));
|
||||
U32 _span_0[] = {7, 5, 4, 7, 4, 6, 1, 3, 0, 3, 2, 0};
|
||||
VecU32_append_span(&indexes, (SpanU32){.data = _span_0, .len = ARRAY_SIZE(_span_0)});
|
||||
for (U32 i = 0; i < k; i++) {
|
||||
U32 _span_1[] = {
|
||||
0, i > 1 ? 5 + i - 1 : 2, 5 + i,
|
||||
1, 5 + k + i, i > 1 ? 5 + k + i - 1 : 3,
|
||||
i > 1 ? 5 + 2 * k + i - 1 : 2, i > 1 ? 5 + 3 * k + i - 1 : 3, 5 + 2 * k + i,
|
||||
5 + 3 * k + i, 5 + 2 * k + i, i > 1 ? 5 + 3 * k + i - 1 : 3,
|
||||
8 + 4 * k + k + 1, 8 + 4 * k + i, 8 + 4 * k + i + 1,
|
||||
8 + 4 * k + 2 * k + 3, 8 + 4 * k + (k + 2) + i + 1, 8 + 4 * k + (k + 2) + i,
|
||||
8 + 4 * i + 0, 8 + 4 * i + 1, 8 + 4 * i + 3,
|
||||
8 + 4 * i + 0, 8 + 4 * i + 3, 8 + 4 * i + 2,
|
||||
};
|
||||
VecU32_append_span(&indexes, (SpanU32){.data = _span_1, .len = ARRAY_SIZE(_span_1)});
|
||||
}
|
||||
@ -440,8 +473,8 @@ cvec3 Bublazhuzhka_get_color(const Bublazhuzhka* self, vec2 v) {
|
||||
return (cvec3){121 - p * 2, 30 + p, 65 - p};
|
||||
}
|
||||
|
||||
cvec3 compress_normal_vec_into_norm_texel(vec3 n) {
|
||||
return (cvec3){(U32)roundf(255 * (n.x + 1) / 2), (U32)roundf(255 * (n.y + 1) / 2), (U32)roundf(255 * (n.z + 1) / 2)};
|
||||
cvec4 compress_normal_vec_into_norm_texel(vec3 n) {
|
||||
return (cvec4){(U32)roundf(255 * (n.x + 1) / 2), (U32)roundf(255 * (n.y + 1) / 2), (U32)roundf(255 * (n.z + 1) / 2), 255};
|
||||
}
|
||||
|
||||
|
||||
@ -459,8 +492,7 @@ typedef struct {
|
||||
void draw_polygon_on_normal_texture_smooth_param_surf_h_draw_cb(void* ug, S32 x, S32 y, vec4 attr) {
|
||||
draw_polygon_on_normal_texture_smooth_param_surf_H_DrawGuest* g = ug;
|
||||
vec3 normal = g->my_client.fn(g->my_client.guest, (vec2){attr.x, attr.y});
|
||||
cvec3 ans = compress_normal_vec_into_norm_texel(normal);
|
||||
*TextureDataR8G8B8A8_mat(g->tex, x, y) = (cvec4){ans.x, ans.y, ans.z, 255};
|
||||
*TextureDataR8G8B8A8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel(normal);
|
||||
}
|
||||
|
||||
void draw_polygon_on_normal_texture_smooth_param_surf(
|
||||
@ -483,21 +515,27 @@ typedef struct {
|
||||
} FnNormalVectorGenExaggParamCallback;
|
||||
|
||||
typedef struct {
|
||||
TextureDataR8G8B8* tex;
|
||||
TextureDataR8G8B8A8* tex;
|
||||
FnNormalVectorGenExaggParamCallback my_client;
|
||||
mat3 BNT_trans;
|
||||
} draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest;
|
||||
|
||||
void draw_polygon_on_normal_texture_exaggerated_param_surf_draw_cb(void* ug, S32 x, S32 y, vec4 attr) {
|
||||
draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest* g = ug;
|
||||
vec3 normal = g->my_client.fn(g->my_client.guest, (vec3){attr.x, attr.y, attr.z});
|
||||
*TextureDataR8G8B8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel(normal);
|
||||
vec3 tang_normal = mat3_mul_vec3(g->BNT_trans, normal);
|
||||
*TextureDataR8G8B8A8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel(tang_normal);
|
||||
}
|
||||
|
||||
/* We can't derive texture coordinates from parameter space coordinates, you have to do it yourself */
|
||||
/* We can't derive texture coordinates from parameter space coordinates, you have to do it yourself.
|
||||
* Also, we have to convert normal vector in world space to normal space in tangent space.
|
||||
* You specify an orthogonal basis of tangent space of that triangle: BNT - { tangent_U, normal vector tangent_ } */
|
||||
void draw_polygon_on_normal_texture_nat_cords_exaggerated_param_surf(
|
||||
TextureDataR8G8B8* tex, vec2 ta, vec2 tb, vec2 tc, vec3 pa, vec3 pb, vec3 pc, FnNormalVectorGenExaggParamCallback cb
|
||||
TextureDataR8G8B8A8* tex, vec2 ta, vec2 tb, vec2 tc, vec3 pa, vec3 pb, vec3 pc, FnNormalVectorGenExaggParamCallback cb,
|
||||
mat3 BNT
|
||||
) {
|
||||
draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest aboba = {.tex = tex, .my_client = cb};
|
||||
draw_polygon_on_normal_texture_exaggerated_param_surf_H_DrawGuest aboba = {.tex = tex, .my_client = cb,
|
||||
.BNT_trans = mat3_transpose(BNT)};
|
||||
marie_rasterize_triangle_with_attr(
|
||||
(MariePlaneVertAttr){.pos = ta, .attr = {pa.x, pa.y, pa.z, 0} },
|
||||
(MariePlaneVertAttr){.pos = tb, .attr = {pb.x, pb.y, pb.z, 0} },
|
||||
@ -505,32 +543,33 @@ void draw_polygon_on_normal_texture_nat_cords_exaggerated_param_surf(
|
||||
(FnMarieRasterizerCallback){draw_polygon_on_normal_texture_exaggerated_param_surf_draw_cb, (void*)&aboba});
|
||||
}
|
||||
// todo: add a version for that function with non-native coordinate system (on vertex) (like I did with absolutely flat surface)
|
||||
// todo: also, maybe, add a function to derive BNT and do cool stuff with trop mat3x2
|
||||
|
||||
|
||||
typedef struct {
|
||||
TextureDataR8G8B8A8* tex;
|
||||
cvec3 normal_compr;
|
||||
} draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest;
|
||||
|
||||
void draw_polygon_on_normal_texture_absolutely_flat_h_draw_cb(void* ug, S32 x, S32 y, vec4 attr) {
|
||||
draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest* g = ug;
|
||||
*TextureDataR8G8B8A8_mat(g->tex, x, y) = (cvec4){g->normal_compr.x, g->normal_compr.y, g->normal_compr.z, 255};
|
||||
*TextureDataR8G8B8A8_mat(g->tex, x, y) = compress_normal_vec_into_norm_texel((vec3){0, 1, 0});
|
||||
}
|
||||
|
||||
void draw_polygon_on_normal_texture_nat_cords_absolutely_flat(TextureDataR8G8B8A8* tex,
|
||||
vec2 ta, vec2 tb, vec2 tc, vec3 c_normal
|
||||
vec2 ta, vec2 tb, vec2 tc
|
||||
) {
|
||||
draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest aboba = {tex, compress_normal_vec_into_norm_texel(c_normal)};
|
||||
draw_polygon_on_normal_texture_absolutely_flat_H_DrawGuest aboba = {tex};
|
||||
marie_rasterize_triangle_with_attr((MariePlaneVertAttr){.pos = ta}, (MariePlaneVertAttr){.pos = tb},
|
||||
(MariePlaneVertAttr){.pos = tc}, (FnMarieRasterizerCallback){
|
||||
.fn = draw_polygon_on_normal_texture_absolutely_flat_h_draw_cb, .guest = (void*)&aboba});
|
||||
}
|
||||
|
||||
// todo: replace it with a "color everything in one color" function
|
||||
void draw_polygon_on_normal_texture_absolutely_flat(TextureDataR8G8B8A8* tex,
|
||||
vec2 pa, vec2 pb, vec2 pc, mat3x2 trop, vec3 c_normal
|
||||
) {
|
||||
vec2 pa, vec2 pb, vec2 pc, mat3x2 trop
|
||||
) {
|
||||
draw_polygon_on_normal_texture_nat_cords_absolutely_flat(tex, mat3x2_mul_vec3(trop, vec2_and_one(pa)),
|
||||
mat3x2_mul_vec3(trop, vec2_and_one(pb)), mat3x2_mul_vec3(trop, vec2_and_one(pc)), c_normal);
|
||||
mat3x2_mul_vec3(trop, vec2_and_one(pb)), mat3x2_mul_vec3(trop, vec2_and_one(pc)));
|
||||
}
|
||||
|
||||
|
||||
@ -541,21 +580,20 @@ typedef struct {
|
||||
} FnHeightMapGradFlatSurfCallback;
|
||||
|
||||
typedef struct {
|
||||
mat3 surf_orient;
|
||||
FnHeightMapGradFlatSurfCallback my_client;
|
||||
} draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest;
|
||||
|
||||
vec3 draw_polygon_on_normal_texture_flat_param_surf_h_draw_cb(void* ug, vec2 p) {
|
||||
draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest* g = ug;
|
||||
vec2 grad = g->my_client.fn(g->my_client.guest, p);
|
||||
return mat3_mul_vec3(g->surf_orient, marie_normal_from_tang_space_gradient(grad.x, grad.y));
|
||||
return marie_normal_from_tang_space_gradient(grad.x, grad.y); // todo: remove this cluster, while leaving only this function where it's nee
|
||||
}
|
||||
|
||||
/* The simplest case of normal texture generation: for a smooth flat surface of a polygon */
|
||||
void draw_polygon_on_normal_texture_flat_param_surf(TextureDataR8G8B8A8* tex, vec2 pa, vec2 pb, vec2 pc, mat3x2 trop,
|
||||
mat3 surf_orient, FnHeightMapGradFlatSurfCallback height_map_cb
|
||||
FnHeightMapGradFlatSurfCallback height_map_cb
|
||||
) {
|
||||
draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest aboba = {surf_orient, height_map_cb};
|
||||
draw_polygon_on_normal_texture_flat_param_surf_H_DrawGuest aboba = {height_map_cb};
|
||||
draw_polygon_on_normal_texture_smooth_param_surf(tex, pa, pb, pc, trop, (FnNormalVectorGenCallback){
|
||||
.fn = draw_polygon_on_normal_texture_flat_param_surf_h_draw_cb, .guest = (void*)&aboba});
|
||||
}
|
||||
@ -665,34 +703,32 @@ TextureDataR8G8B8A8 generate_normal_tex_for_one_fourth_of_a_cylinder(float s_res
|
||||
|
||||
Bublazhuzhka crap_on_the_back_side = fill_rectangle_with_crap(w, r);
|
||||
mat3x2 trop_back_side = {.x.x = cord_resol.x, .y.y = cord_resol.y, .z = vec2_mul_vec2((vec2){r, r}, cord_resol)};
|
||||
mat3 orient_back_side = {.x = {1, 0, 0}, .y = {0, 0, 1}, .z = {0, 1, 0}};
|
||||
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){w, 0}, (vec2){w, r}, trop_back_side, orient_back_side,
|
||||
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){w, 0}, (vec2){w, r}, trop_back_side,
|
||||
(FnHeightMapGradFlatSurfCallback){.fn = height_map_cb_that_uses_bublazhuzhka, .guest = &crap_on_the_back_side});
|
||||
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){0, r}, (vec2){w, r}, trop_back_side, orient_back_side,
|
||||
draw_polygon_on_normal_texture_flat_param_surf(&res, (vec2){0, 0}, (vec2){0, r}, (vec2){w, r}, trop_back_side,
|
||||
(FnHeightMapGradFlatSurfCallback){.fn = height_map_cb_that_uses_bublazhuzhka, .guest = &crap_on_the_back_side});
|
||||
Bublazhuzhka_drop(crap_on_the_back_side);
|
||||
|
||||
mat3x2 str = {.x.x = cord_resol.x, .y.y = cord_resol.y};
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, v0tex, v1tex, v4tex, str, (vec3){0, -1, 0});
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, v1tex, v4tex, v5tex, str, (vec3){0, -1, 0});
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, v0tex, v1tex, v4tex, str);
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, v1tex, v4tex, v5tex, str);
|
||||
for (size_t i = 0; i < k; i++) {
|
||||
vec2 A = {r - sinf((float)i * a) * r, r + cosf((float)i * a) * r};
|
||||
vec2 B = {r - sinf((float)(i + 1) * a) * r, r + cosf((float)(i + 1) * a) * r};
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r, r}, str, (vec3){-1, 0, 0});
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r, r}, str);
|
||||
}
|
||||
for (size_t i = 0; i < k; i++) {
|
||||
vec2 A = {r + w + sinf((float)i * a) * r, r + cosf((float)i * a) * r};
|
||||
vec2 B = {r + w + sinf((float)(i + 1) * a) * r, r + cosf((float)(i + 1) * a) * r};
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r + w, r}, str, (vec3){1, 0, 0});
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, (vec2){r + w, r}, str);
|
||||
}
|
||||
for (size_t i = 0; i < k; i++) {
|
||||
vec2 A = {r, 2 * r + (float)i * l};
|
||||
vec2 B = {r + w, 2 * r + (float)i * l};
|
||||
vec2 C = {r, 2 * r + (float)i * l + l};
|
||||
vec2 D = {r + w, 2 * r + (float)i * l + l};
|
||||
vec3 n = {0, cosf(a / 2 + a * (float)i), -sinf(a / 2 + a * (float)i)};
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, C, str, n);
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, D, B, C, str, n);
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, A, B, C, str);
|
||||
draw_polygon_on_normal_texture_absolutely_flat(&res, D, B, C, str);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -700,39 +736,39 @@ TextureDataR8G8B8A8 generate_normal_tex_for_one_fourth_of_a_cylinder(float s_res
|
||||
U32 quad_to_triangles_conv_arr[6] = {0, 1, 2, 0, 2, 3};
|
||||
|
||||
ShinyMeshTopology generate_shiny_cube(float r) {
|
||||
ShinyMeshVertex vert[24] = {
|
||||
{{+r, +r, +r}, {1, 0, 0}},
|
||||
{{+r, -r, +r}, {1, 0, 0}},
|
||||
{{+r, -r, -r}, {1, 0, 0}},
|
||||
{{+r, +r, -r}, {1, 0, 0}},
|
||||
ShinyMeshVertexInc vert[24] = {
|
||||
{{+r, +r, +r}},
|
||||
{{+r, -r, +r}},
|
||||
{{+r, -r, -r}},
|
||||
{{+r, +r, -r}},
|
||||
|
||||
{{-r, -r, -r}, {-1, 0, 0}},
|
||||
{{-r, -r, +r}, {-1, 0, 0}},
|
||||
{{-r, +r, +r}, {-1, 0, 0}},
|
||||
{{-r, +r, -r}, {-1, 0, 0}},
|
||||
{{-r, -r, -r}},
|
||||
{{-r, -r, +r}},
|
||||
{{-r, +r, +r}},
|
||||
{{-r, +r, -r}},
|
||||
|
||||
{{+r, +r, +r}, {0, 1, 0}},
|
||||
{{+r, +r, -r}, {0, 1, 0}},
|
||||
{{-r, +r, -r}, {0, 1, 0}},
|
||||
{{-r, +r, +r}, {0, 1, 0}},
|
||||
{{+r, +r, +r}},
|
||||
{{+r, +r, -r}},
|
||||
{{-r, +r, -r}},
|
||||
{{-r, +r, +r}},
|
||||
|
||||
{{-r, -r, -r}, {0, -1, 0}},
|
||||
{{+r, -r, -r}, {0, -1, 0}},
|
||||
{{+r, -r, +r}, {0, -1, 0}},
|
||||
{{-r, -r, +r}, {0, -1, 0}},
|
||||
{{-r, -r, -r}},
|
||||
{{+r, -r, -r}},
|
||||
{{+r, -r, +r}},
|
||||
{{-r, -r, +r}},
|
||||
|
||||
{{+r, +r, +r}, {0, 0, 1}},
|
||||
{{-r, +r, +r}, {0, 0, 1}},
|
||||
{{-r, -r, +r}, {0, 0, 1}},
|
||||
{{+r, -r, +r}, {0, 0, 1}},
|
||||
{{+r, +r, +r}},
|
||||
{{-r, +r, +r}},
|
||||
{{-r, -r, +r}},
|
||||
{{+r, -r, +r}},
|
||||
|
||||
{{-r, -r, -r}, {0, 0, -1}},
|
||||
{{-r, +r, -r}, {0, 0, -1}},
|
||||
{{+r, +r, -r}, {0, 0, -1}},
|
||||
{{+r, -r, -r}, {0, 0, -1}},
|
||||
{{-r, -r, -r}},
|
||||
{{-r, +r, -r}},
|
||||
{{+r, +r, -r}},
|
||||
{{+r, -r, -r}},
|
||||
};
|
||||
VecShinyMeshVertex vertices_vec = VecShinyMeshVertex_new_zeroinit(24);
|
||||
memcpy(vertices_vec.buf, vert, sizeof(vert));
|
||||
VecShinyMeshVertexInc vertices_vec = VecShinyMeshVertexInc_from_span(
|
||||
(SpanShinyMeshVertexInc){ .data = vert, .len = ARRAY_SIZE(vert) });
|
||||
VecU32 indexes_vec = VecU32_new_reserved(36);
|
||||
for (U32 f = 0; f < 6; f++) {
|
||||
for (U32 j = 0; j < 6; j++)
|
||||
@ -767,49 +803,6 @@ CubeVertOfFace CubeVertOfFace_next(CubeVertOfFace vert) {
|
||||
return (CubeVertOfFace){vert.face, (vert.vert_on_it + 1) % 4};
|
||||
}
|
||||
|
||||
ShinyMeshTopology generate_shiny_rhombicuboctahedron(float r) {
|
||||
ShinyMeshTopology res = generate_shiny_cube(r);
|
||||
for (int f = 0; f < 6; f++) {
|
||||
vec3 growth = vec3_mul_scal((*VecShinyMeshVertex_at(&res.vertices, f * 4)).normal, M_SQRT1_2);
|
||||
for (int i = 0; i < 4; i++) {
|
||||
vec3* pos = &VecShinyMeshVertex_mat(&res.vertices, f * 4 + i)->pos;
|
||||
*pos = vec3_add_vec3(*pos, growth);
|
||||
}
|
||||
}
|
||||
for (int f = 0; f < 6; f++) {
|
||||
for (int i = 0; i < 2; i++) {
|
||||
CubeVertOfFace vof = {f, 2*i+(f%2)};
|
||||
ShinyMeshVertex A = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(vof));
|
||||
ShinyMeshVertex B = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_next(CubeVertOfFace_jump(vof))));
|
||||
ShinyMeshVertex C = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_jump(vof)));
|
||||
ShinyMeshVertex D = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_next(vof)));
|
||||
vec3 norm = vec3_normalize(vec3_add_vec3(A.normal, B.normal));
|
||||
ShinyMeshVertex quad_v[4] = {{A.pos, norm}, {B.pos, norm}, {C.pos, norm}, {D.pos, norm}};
|
||||
size_t b = res.vertices.len;
|
||||
VecShinyMeshVertex_append_span(&res.vertices, (SpanShinyMeshVertex){quad_v, ARRAY_SIZE(quad_v)});
|
||||
for (U32 j = 0; j < 6; j++)
|
||||
VecU32_append(&res.indexes, b + quad_to_triangles_conv_arr[j]);
|
||||
}
|
||||
}
|
||||
for (int f = 0; f < 2; f++) {
|
||||
for (int e = 0; e < 4; e++) {
|
||||
CubeVertOfFace vof = {f, e};
|
||||
ShinyMeshVertex A = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_next(vof)));
|
||||
ShinyMeshVertex B = *VecShinyMeshVertex_at(&res.vertices, CubeVertOfFace_to_vid(CubeVertOfFace_jump(vof)));
|
||||
ShinyMeshVertex C = *VecShinyMeshVertex_at(&res.vertices,
|
||||
CubeVertOfFace_to_vid(CubeVertOfFace_next(CubeVertOfFace_jump(CubeVertOfFace_next(vof)))));
|
||||
vec3 norm = vec3_normalize(vec3_add_vec3(A.normal, vec3_add_vec3(B.normal, C.normal)));
|
||||
|
||||
ShinyMeshVertex ang_v[3] = {{A.pos, norm}, {B.pos, norm}, {C.pos, norm}};
|
||||
size_t b = res.vertices.len;
|
||||
VecShinyMeshVertex_append_span(&res.vertices, (SpanShinyMeshVertex){ang_v, ARRAY_SIZE(ang_v)});
|
||||
for (int i = 0; i < 3; i++)
|
||||
VecU32_append(&res.indexes, b + i);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
GenericMeshInSceneTemplate GenericMeshInSceneTemplate_for_log(U32 w, U32 r, U32 k) {
|
||||
return (GenericMeshInSceneTemplate){.topology = generate_one_fourth_of_a_cylinder((float)w, (float)r, k),
|
||||
.diffuse_texture_path = VecU8_format("textures/log_%u_%u_%u_diffuse.png", w, r, k),
|
||||
|
||||
@ -5,50 +5,82 @@
|
||||
|
||||
#include "../../margaret/vulkan_utils.h"
|
||||
|
||||
typedef struct {
|
||||
U64 count;
|
||||
MargaretSubbuf staging_busy;
|
||||
MargaretSubbuf staging_updatable;
|
||||
MargaretSubbuf device_local;
|
||||
U64 cap; /* All 3 buffers are synced to the same capacity */
|
||||
} PatriciaBuf;
|
||||
|
||||
void PatriciaBuf_swap_staging(PatriciaBuf* self){
|
||||
MargaretSubbuf t = self->staging_updatable;
|
||||
self->staging_updatable = self->staging_busy;
|
||||
self->staging_busy = t;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
size_t indexes;
|
||||
U64 instance_vec_len;
|
||||
U64 instance_vec_capacity;
|
||||
|
||||
MargaretSubbuf staging_vbo;
|
||||
MargaretSubbuf staging_ebo;
|
||||
|
||||
// todo: replace TextureDataXXX with MargaretPngPromises
|
||||
MargaretMAIterator staging_vbo;
|
||||
MargaretMAIterator staging_ebo;
|
||||
MargaretMAIterator staging_instance_attr_buf;
|
||||
|
||||
TextureDataR8G8B8A8 pixels_diffuse;
|
||||
TextureDataR8G8B8A8 pixels_normal;
|
||||
TextureDataR8 pixels_specular;
|
||||
|
||||
MargaretMAIterator staging_diffuse_tex_buf;
|
||||
MargaretMAIterator staging_normal_tex_buf;
|
||||
MargaretMAIterator staging_specular_tex_buf;
|
||||
MargaretSubbuf staging_diffuse_tex_buf;
|
||||
MargaretSubbuf staging_normal_tex_buf;
|
||||
MargaretSubbuf staging_specular_tex_buf;
|
||||
|
||||
MargaretMAIterator vbo;
|
||||
MargaretMAIterator ebo;
|
||||
MargaretMAIterator instance_attr_buf;
|
||||
MargaretMAIterator diffuse_texture;
|
||||
MargaretMAIterator normal_texture;
|
||||
MargaretMAIterator specular_texture;
|
||||
MargaretSubbuf vbo;
|
||||
MargaretSubbuf ebo;
|
||||
PatriciaBuf instance_attr;
|
||||
|
||||
// todo: store dimensions of these images
|
||||
MargaretImg diffuse_texture;
|
||||
MargaretImg normal_texture;
|
||||
MargaretImg specular_texture;
|
||||
} GenericModelOnSceneMem;
|
||||
|
||||
#include "../../../../gen/l1/eve/r0/VecGenericModelOnSceneMem.h"
|
||||
|
||||
void GenericModelOnSceneMem_set(GenericModelOnSceneMem* self, size_t instance, GenericMeshInstanceInc uncomp){
|
||||
assert(instance < self->instance_attr.count);
|
||||
GenericMeshInstance* staging = (GenericMeshInstance*)MargaretSubbuf_get_mapped(&self->instance_attr.staging_updatable);
|
||||
staging[instance].base = uncomp;
|
||||
mat4 tr_inv = mat4_transpose(mat4_inverse(uncomp.model_t));
|
||||
staging[instance].normal_t = mat3_new(
|
||||
tr_inv.x.x, tr_inv.y.x, tr_inv.z.x,
|
||||
tr_inv.x.y, tr_inv.y.y, tr_inv.z.y,
|
||||
tr_inv.x.z, tr_inv.y.z, tr_inv.z.z );
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
size_t indexes;
|
||||
U64 instance_vec_capacity;
|
||||
U64 instance_vec_len;
|
||||
|
||||
MargaretMAIterator staging_vbo;
|
||||
MargaretMAIterator staging_ebo;
|
||||
MargaretMAIterator staging_instance_attr_buf;
|
||||
MargaretSubbuf staging_vbo;
|
||||
MargaretSubbuf staging_ebo;
|
||||
|
||||
MargaretMAIterator vbo;
|
||||
MargaretMAIterator ebo;
|
||||
MargaretMAIterator instance_attr_buf;
|
||||
MargaretSubbuf vbo;
|
||||
MargaretSubbuf ebo;
|
||||
PatriciaBuf instance_attr;
|
||||
} ShinyModelOnSceneMem;
|
||||
|
||||
#include "../../../../gen/l1/eve/r0/VecShinyModelOnSceneMem.h"
|
||||
|
||||
void ShinyModelOnSceneMem_set(ShinyModelOnSceneMem* self, size_t instance, ShinyMeshInstanceInc uncomp){
|
||||
assert(instance < self->instance_attr.count);
|
||||
ShinyMeshInstance* staging = (ShinyMeshInstance*)MargaretSubbuf_get_mapped(&self->instance_attr.staging_updatable);
|
||||
staging[instance].base = uncomp;
|
||||
mat4 tr_inv = mat4_transpose(mat4_inverse(uncomp.model_t));
|
||||
staging[instance].normal_t = mat3_new(
|
||||
tr_inv.x.x, tr_inv.y.x, tr_inv.z.x,
|
||||
tr_inv.x.y, tr_inv.y.y, tr_inv.z.y,
|
||||
tr_inv.x.z, tr_inv.y.z, tr_inv.z.z );
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
float fov;
|
||||
mat3 cam_basis;
|
||||
@ -86,7 +118,7 @@ void CamControlInfo_up(CamControlInfo* self, float fl) {
|
||||
CamControlInfo CamControlInfo_new() {
|
||||
return (CamControlInfo){
|
||||
.fov = 1.5f, .cam_basis = marie_simple_camera_rot_m_basis_in_cols(0, 0, 0), .pos = {0, 0, 0},
|
||||
.speed = 2.7f, .sensitivity = 0.5f * M_PIf / 180, .pitch_cap = M_PIf * 0.49f
|
||||
.speed = 6.7f, .sensitivity = 0.5f * M_PIf / 180, .pitch_cap = M_PIf * 0.49f
|
||||
};
|
||||
}
|
||||
|
||||
@ -99,30 +131,104 @@ void CamControlInfo_update_direction(CamControlInfo* self, int win_width, int wi
|
||||
self->cam_basis = marie_simple_camera_rot_m_basis_in_cols(yaw, pitch, 0);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
MargaretSubbuf staging_busy;
|
||||
MargaretSubbuf staging_updatable;
|
||||
MargaretSubbuf device_local;
|
||||
} Pipeline0Transfer;
|
||||
|
||||
// Just for a test in r0
|
||||
typedef struct {
|
||||
mat3 rotation;
|
||||
vec3 pos;
|
||||
float scale;
|
||||
vec3 color_on;
|
||||
} ObjectInfo;
|
||||
|
||||
#include "../../../../gen/l1/eve/r0/VecObjectInfo.h"
|
||||
|
||||
/* Non copyable */
|
||||
typedef struct {
|
||||
VecGenericModelOnSceneMem generic_models;
|
||||
VecShinyModelOnSceneMem shiny_models;
|
||||
|
||||
VkClearColorValue color;
|
||||
float gamma_correction_factor;
|
||||
float hdr_factor;
|
||||
float lsd_factor;
|
||||
float anim_time; // A timer, passed to functions that push push constants
|
||||
MargaretMAIterator pipeline0_staging_ubo;
|
||||
MargaretMAIterator pipeline0_ubo;
|
||||
|
||||
/* point_light_vec_len and spotlight_vec_len are stored in staging (and also device local) buffers */
|
||||
Pipeline0Transfer pipeline0_ubo;
|
||||
|
||||
CamControlInfo cam;
|
||||
vec3 funny_vector;
|
||||
|
||||
VecObjectInfo smeshnyavka_1;
|
||||
VecObjectInfo smeshnyavka_2;
|
||||
VecObjectInfo smeshnyavka_3;
|
||||
} Scene;
|
||||
|
||||
ShinyMeshInstanceInc ShinyMeshInstanceInc_from_ObjectInfo(const ObjectInfo* oi){
|
||||
return (ShinyMeshInstanceInc){
|
||||
.model_t = mat4_mul_mat4(marie_translation_mat4(oi->pos),
|
||||
mat4_mul_mat4(marie_3d_scal_mat4(oi->scale), marie_mat3_to_mat4(oi->rotation))),
|
||||
.color_on = oi->color_on, .color_off = {1, 0.4f, 0.5f}
|
||||
};
|
||||
}
|
||||
|
||||
// todo: remove this shit
|
||||
void Scene_add_smeshnyavka_3(Scene* self, ObjectInfo oi){
|
||||
ShinyModelOnSceneMem* model_sh = VecShinyModelOnSceneMem_mat(&self->shiny_models, 0);
|
||||
size_t ni = self->smeshnyavka_3.len;
|
||||
assert(ni < model_sh->instance_attr.cap);
|
||||
VecObjectInfo_append(&self->smeshnyavka_3, oi);
|
||||
model_sh->instance_attr.count = ni + 1;
|
||||
ShinyModelOnSceneMem_set(model_sh, ni, ShinyMeshInstanceInc_from_ObjectInfo(&oi));
|
||||
}
|
||||
|
||||
// todo: remove this shit
|
||||
void Scene_update_smeshnyavka_3(Scene* self, size_t sh_id){
|
||||
assert(sh_id < self->smeshnyavka_3.len);
|
||||
const ObjectInfo* oi = VecObjectInfo_at(&self->smeshnyavka_3, sh_id);
|
||||
ShinyModelOnSceneMem* model_sh = VecShinyModelOnSceneMem_mat(&self->shiny_models, 0);
|
||||
ShinyModelOnSceneMem_set(model_sh, sh_id, ShinyMeshInstanceInc_from_ObjectInfo(oi));
|
||||
}
|
||||
|
||||
GenericMeshInstanceInc GenericMeshInstanceInc_from_ObjectInfo(const ObjectInfo* oi){
|
||||
return (GenericMeshInstanceInc){
|
||||
.model_t = mat4_mul_mat4(marie_translation_mat4(oi->pos),
|
||||
mat4_mul_mat4(marie_3d_scal_mat4(oi->scale), marie_mat3_to_mat4(oi->rotation))),
|
||||
};
|
||||
}
|
||||
|
||||
// todo: remove this shit
|
||||
void Scene_add_smeshnyavka_1(Scene* self, ObjectInfo oi){
|
||||
GenericModelOnSceneMem* model = VecGenericModelOnSceneMem_mat(&self->generic_models, 0);
|
||||
size_t ni = self->smeshnyavka_1.len;
|
||||
assert(ni < model->instance_attr.cap);
|
||||
VecObjectInfo_append(&self->smeshnyavka_1, oi);
|
||||
model->instance_attr.count = ni + 1;
|
||||
GenericModelOnSceneMem_set(model, ni, GenericMeshInstanceInc_from_ObjectInfo(&oi));
|
||||
}
|
||||
|
||||
// todo: remove this shit
|
||||
void Scene_update_smeshnyavka_1(Scene* self, size_t sh_id){
|
||||
assert(sh_id < self->smeshnyavka_1.len);
|
||||
const ObjectInfo* oi = VecObjectInfo_at(&self->smeshnyavka_1, sh_id);
|
||||
GenericModelOnSceneMem* model = VecGenericModelOnSceneMem_mat(&self->generic_models, 0);
|
||||
GenericModelOnSceneMem_set(model, sh_id, GenericMeshInstanceInc_from_ObjectInfo(oi));
|
||||
}
|
||||
|
||||
|
||||
|
||||
Scene Scene_new(VecGenericModelOnSceneMem generic_models, VecShinyModelOnSceneMem shiny_models,
|
||||
MargaretMAIterator pipeline0_staging_ubo, MargaretMAIterator pipeline0_ubo) {
|
||||
Pipeline0Transfer pipeline0_ubo) {
|
||||
return (Scene){.generic_models = generic_models, .shiny_models = shiny_models,
|
||||
.color = {.float32 = {0, 0, 0, 1}},
|
||||
.gamma_correction_factor = 2.2f, .hdr_factor = 1, .lsd_factor = 0, .anim_time = 0,
|
||||
.pipeline0_staging_ubo = pipeline0_staging_ubo, .pipeline0_ubo = pipeline0_ubo,
|
||||
.cam = CamControlInfo_new(), .funny_vector = {0, 0, 0}
|
||||
.pipeline0_ubo = pipeline0_ubo, .cam = CamControlInfo_new(),
|
||||
.smeshnyavka_1 = VecObjectInfo_new(), .smeshnyavka_2 = VecObjectInfo_new(),
|
||||
.smeshnyavka_3 = VecObjectInfo_new(), // todo: remove this shit and rewrite everything in haskell
|
||||
};
|
||||
}
|
||||
|
||||
@ -142,54 +248,75 @@ void SceneTemplate_copy_initial_model_topology_cmd_buf_recording(
|
||||
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
|
||||
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
|
||||
|
||||
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
|
||||
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
|
||||
memcpy(staging_vbo, mt->topology.vertices.buf, vbo_len);
|
||||
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
|
||||
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
|
||||
assert(mm->staging_vbo.len >= mt->topology.vertices.len * sizeof(GenericMeshVertex));
|
||||
assert(mm->vbo.len >= mt->topology.vertices.len * sizeof(GenericMeshVertex));
|
||||
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretSubbuf_get_mapped(&mm->staging_vbo);
|
||||
for (U64 i = 0; i < mt->topology.vertices.len; i++) {
|
||||
staging_vbo[i].base = mt->topology.vertices.buf[i];
|
||||
}
|
||||
assert(mt->topology.indexes.len % 3 == 0);
|
||||
for (size_t ti = 0; ti * 3 < mt->topology.indexes.len; ti++) {
|
||||
U32 v0 = mt->topology.indexes.buf[ti * 3 + 0];
|
||||
U32 v1 = mt->topology.indexes.buf[ti * 3 + 1];
|
||||
U32 v2 = mt->topology.indexes.buf[ti * 3 + 2];
|
||||
const GenericMeshVertexInc* A0 = VecGenericMeshVertexInc_at(&mt->topology.vertices, v0);
|
||||
const GenericMeshVertexInc* A1 = VecGenericMeshVertexInc_at(&mt->topology.vertices, v1);
|
||||
const GenericMeshVertexInc* A2 = VecGenericMeshVertexInc_at(&mt->topology.vertices, v2);
|
||||
vec3 dp1 = vec3_minus_vec3(A1->pos, A0->pos);
|
||||
vec3 dp2 = vec3_minus_vec3(A2->pos, A0->pos);
|
||||
float du1 = A1->tex.x - A0->tex.x;
|
||||
float dv1 = A1->tex.y - A0->tex.y;
|
||||
float du2 = A2->tex.x - A0->tex.x;
|
||||
float dv2 = A2->tex.y - A0->tex.y;
|
||||
vec3 norm = vec3_normalize(vec3_cross(dp1, dp2));
|
||||
mat2x3 tang_U_V = mat3x2_transpose(mat2_mul_mat3x2(
|
||||
mat2_inverse(mat2_new(du1, dv1, du2, dv2)),
|
||||
mat2x3_transpose((mat2x3){.x = dp1, .y = dp2})
|
||||
));
|
||||
staging_vbo[v0].norm = staging_vbo[v1].norm = staging_vbo[v2].norm = norm;
|
||||
staging_vbo[v0].tang_U = staging_vbo[v1].tang_U = staging_vbo[v2].tang_U = tang_U_V.x;
|
||||
staging_vbo[v0].tang_V = staging_vbo[v1].tang_V = staging_vbo[v2].tang_V = tang_U_V.y;
|
||||
}
|
||||
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_vbo, &mm->vbo);
|
||||
|
||||
assert(mt->topology.indexes.len == mm->indexes);
|
||||
size_t ebo_len = mt->topology.indexes.len * sizeof(U32);
|
||||
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
|
||||
assert(mm->ebo.len >= ebo_len);
|
||||
U32* staging_ebo = (U32*)MargaretSubbuf_get_mapped(&mm->staging_ebo);
|
||||
memcpy(staging_ebo, mt->topology.indexes.buf, ebo_len);
|
||||
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
|
||||
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
|
||||
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_ebo, &mm->ebo);
|
||||
}
|
||||
|
||||
for (size_t mi = 0; mi < scene_template->shiny_models.len; mi++) {
|
||||
const ShinyMeshTopology* mt = VecShinyMeshTopology_at(&scene_template->shiny_models, mi);
|
||||
const ShinyModelOnSceneMem *mm = VecShinyModelOnSceneMem_at(&scene->shiny_models, mi);
|
||||
|
||||
size_t vbo_len = mt->vertices.len * sizeof(ShinyMeshVertex);
|
||||
ShinyMeshVertex* staging_vbo = (ShinyMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
|
||||
memcpy(staging_vbo, mt->vertices.buf, vbo_len);
|
||||
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
|
||||
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
|
||||
assert(mm->staging_vbo.len >= mt->vertices.len * sizeof(ShinyMeshVertex));
|
||||
assert(mm->vbo.len >= mt->vertices.len * sizeof(ShinyMeshVertex));
|
||||
ShinyMeshVertex* staging_vbo = (ShinyMeshVertex*)MargaretSubbuf_get_mapped(&mm->staging_vbo);
|
||||
for (U64 i = 0; i < mt->vertices.len; i++) {
|
||||
staging_vbo[i].base = mt->vertices.buf[i];
|
||||
}
|
||||
assert(mt->indexes.len % 3 == 0);
|
||||
for (size_t ti = 0; ti * 3 < mt->indexes.len; ti++) {
|
||||
U32 v0 = mt->indexes.buf[ti * 3 + 0];
|
||||
U32 v1 = mt->indexes.buf[ti * 3 + 1];
|
||||
U32 v2 = mt->indexes.buf[ti * 3 + 2];
|
||||
vec3 p0 = VecShinyMeshVertexInc_at(&mt->vertices, v0)->pos;
|
||||
vec3 p1 = VecShinyMeshVertexInc_at(&mt->vertices, v1)->pos;
|
||||
vec3 p2 = VecShinyMeshVertexInc_at(&mt->vertices, v2)->pos;
|
||||
vec3 norm = vec3_normalize(vec3_cross(vec3_minus_vec3(p1, p0), vec3_minus_vec3(p2, p0)));
|
||||
staging_vbo[v0].normal = staging_vbo[v1].normal = staging_vbo[v2].normal = norm;
|
||||
}
|
||||
|
||||
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_vbo, &mm->vbo);
|
||||
|
||||
assert(mt->indexes.len == mm->indexes);
|
||||
size_t ebo_len = mt->indexes.len * sizeof(U32);
|
||||
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
|
||||
assert(mm->ebo.len >= ebo_len);
|
||||
U32* staging_ebo = (U32*)MargaretSubbuf_get_mapped(&mm->staging_ebo);
|
||||
memcpy(staging_ebo, mt->indexes.buf, ebo_len);
|
||||
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
|
||||
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
|
||||
}
|
||||
|
||||
for (size_t mi = 0; mi < scene_template->generic_models.len; mi++) {
|
||||
const GenericMeshInSceneTemplate* mt = VecGenericMeshInSceneTemplate_at(&scene_template->generic_models, mi);
|
||||
const GenericModelOnSceneMem *mm = VecGenericModelOnSceneMem_at(&scene->generic_models, mi);
|
||||
|
||||
size_t vbo_len = mt->topology.vertices.len * sizeof(GenericMeshVertex);
|
||||
GenericMeshVertex* staging_vbo = (GenericMeshVertex*)MargaretMAIterator_get_mapped(mm->staging_vbo);
|
||||
memcpy(staging_vbo, mt->topology.vertices.buf, vbo_len);
|
||||
vkCmdCopyBuffer(command_buffer, mm->staging_vbo->value.me.buf.buffer, mm->vbo->value.me.buf.buffer,
|
||||
1, &(VkBufferCopy){ .srcOffset = 0, .dstOffset = 0, .size = vbo_len});
|
||||
|
||||
assert(mt->topology.indexes.len == mm->indexes);
|
||||
size_t ebo_len = mt->topology.indexes.len * sizeof(U32);
|
||||
U32* staging_ebo = (U32*)MargaretMAIterator_get_mapped(mm->staging_ebo);
|
||||
memcpy(staging_ebo, mt->topology.indexes.buf, ebo_len);
|
||||
vkCmdCopyBuffer(command_buffer, mm->staging_ebo->value.me.buf.buffer, mm->ebo->value.me.buf.buffer,
|
||||
1, &(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = ebo_len});
|
||||
margaret_rec_cmd_copy_buffer_one_to_one(command_buffer, &mm->staging_ebo, &mm->ebo);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
#include "r0_assets.h"
|
||||
#include "../../marie/rasterization.h"
|
||||
// #include "../../margaret/png_pixel_masses.h" // todo: delete this file
|
||||
#include "../../../../gen/l1/margaret/png_pixel_masses.h"
|
||||
#include "../../marie/texture_processing.h"
|
||||
|
||||
|
||||
@ -1,9 +1,12 @@
|
||||
#version 450
|
||||
|
||||
layout(location = 0) in vec2 fsin_tex;
|
||||
layout(location = 1) in vec3 fsin_pos;
|
||||
layout(location = 0) in vec3 tang_norm;
|
||||
layout(location = 1) in vec3 tang_U;
|
||||
layout(location = 2) in vec3 tang_V;
|
||||
layout(location = 3) in vec2 tex;
|
||||
layout(location = 4) in vec3 pos;
|
||||
|
||||
/* Righ now all in set 0 */
|
||||
/* Right now all in set 0 */
|
||||
layout(location = 0) out vec4 fin_color;
|
||||
/* Yes, even these guys */
|
||||
layout(binding = 1) uniform sampler2D color_tex;
|
||||
@ -38,18 +41,19 @@ float get_intensity(float dist){
|
||||
}
|
||||
|
||||
void main(){
|
||||
vec3 compressed_normal = texture(normal_map, fsin_tex).xyz;
|
||||
vec3 norm = compressed_normal * 2 - 1;
|
||||
vec3 compressed_normal = texture(normal_map, tex).xyz;
|
||||
vec3 correct_norm_on_tang = compressed_normal * 2 - 1;
|
||||
vec3 norm = normalize(mat3(tang_U, tang_norm, tang_V) * correct_norm_on_tang);
|
||||
vec3 diffuse_illumination = vec3(0);
|
||||
vec3 specular_illumination = vec3(0);
|
||||
for (int i = 0; i < point_light_count; i++) {
|
||||
Pipeline0PointLight lamp = point_light_arr[i];
|
||||
vec3 to_light = -fsin_pos + lamp.pos;
|
||||
vec3 to_light = -pos + lamp.pos;
|
||||
float dist = length(to_light);
|
||||
vec3 U = to_light / dist;
|
||||
diffuse_illumination += get_intensity(dist) * max(0.02, dot(U, norm)) * lamp.color;
|
||||
diffuse_illumination += get_intensity(dist) * max(0, dot(U, norm)) * lamp.color;
|
||||
vec3 A = reflect(-U, norm);
|
||||
vec3 to_cam = -fsin_pos+camera_pos;
|
||||
vec3 to_cam = -pos+camera_pos;
|
||||
float dist_to_cam = length(to_cam);
|
||||
vec3 B = to_cam / dist_to_cam;
|
||||
specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 32) * lamp.color;
|
||||
@ -57,8 +61,8 @@ void main(){
|
||||
for (int i = 0; i < spotlight_count; i++) {
|
||||
Pipeline0Spotlight lamp = spotlight_arr[i];
|
||||
}
|
||||
vec3 natural_color = texture(color_tex, fsin_tex).xyz;
|
||||
float specular_c = texture(specular_map, fsin_tex).x;
|
||||
vec3 natural_color = texture(color_tex, tex).xyz;
|
||||
float specular_c = texture(specular_map, tex).x;
|
||||
vec3 color = natural_color * diffuse_illumination + specular_c * specular_illumination;
|
||||
fin_color = vec4(color, 1);
|
||||
}
|
||||
|
||||
@ -2,19 +2,31 @@
|
||||
|
||||
layout(location = 0) in vec3 pos;
|
||||
layout(location = 1) in vec2 tex;
|
||||
layout(location = 2) in mat4 model_t;
|
||||
/* 2 <- 3, 4, 5 */
|
||||
layout(location = 2) in vec3 norm;
|
||||
layout(location = 3) in vec3 tang_U;
|
||||
layout(location = 4) in vec3 tang_V;
|
||||
|
||||
layout(location = 0) out vec2 vsout_tex;
|
||||
layout(location = 1) out vec3 vsout_pos;
|
||||
layout(location = 5) in mat4 model_t;
|
||||
/* 5 <- 6, 7, 8 */
|
||||
layout(location = 9) in mat3 normal_t;
|
||||
/* 9 <- 10, 11 */
|
||||
|
||||
layout(location = 0) out vec3 out_norm;
|
||||
layout(location = 1) out vec3 out_tang_U;
|
||||
layout(location = 2) out vec3 out_tang_V;
|
||||
layout(location = 3) out vec2 out_tex;
|
||||
layout(location = 4) out vec3 out_pos;
|
||||
|
||||
layout(push_constant, std430) uniform pc {
|
||||
mat4 proj_cam_t;
|
||||
};
|
||||
|
||||
void main(){
|
||||
vsout_tex = tex;
|
||||
out_norm = normalize(normal_t * norm);
|
||||
out_tang_U = normalize(normal_t * tang_U);
|
||||
out_tang_V = normalize(normal_t * tang_V);
|
||||
out_tex = tex;
|
||||
vec4 real_pos = model_t * vec4(pos, 1);
|
||||
vsout_pos = real_pos.xyz;
|
||||
out_pos = real_pos.xyz;
|
||||
gl_Position = proj_cam_t * real_pos;
|
||||
}
|
||||
@ -42,15 +42,14 @@ void main(){
|
||||
vec3 to_light = -pos + lamp.pos;
|
||||
float dist = length(to_light);
|
||||
vec3 U = to_light / dist;
|
||||
diffuse_illumination += get_intensity(dist) * max(0.02, dot(U, norm)) * lamp.color;
|
||||
diffuse_illumination += get_intensity(dist) * max(0, dot(U, norm)) * lamp.color;
|
||||
vec3 A = reflect(-U, norm);
|
||||
vec3 B = normalize(-pos+camera_pos);
|
||||
specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 256) * lamp.color;
|
||||
// specular_illumination += get_intensity(dist) * pow(max(0, dot(A, B)), 256) * lamp.color;
|
||||
}
|
||||
for (int i = 0; i < spotlight_count; i++) {
|
||||
Pipeline0Spotlight lamp = spotlight_arr[i];
|
||||
}
|
||||
vec3 color = color_off * diffuse_illumination + 0.5 * specular_illumination + color_on;
|
||||
fin_color = vec4(color, 1);
|
||||
// fin_color = vec4(length(norm) / 2, 0, 0, 1);
|
||||
}
|
||||
|
||||
@ -4,9 +4,11 @@ layout(location = 0) in vec3 pos;
|
||||
layout(location = 1) in vec3 normal;
|
||||
|
||||
layout(location = 2) in mat4 model_t;
|
||||
/* 2 <- 3,4,5 */
|
||||
/* 2 <- 3, 4, 5 */
|
||||
layout(location = 6) in vec3 color_off;
|
||||
layout(location = 7) in vec3 color_on;
|
||||
layout(location = 8) in mat3 normal_t;
|
||||
/* 8 <- 9, 10 */
|
||||
|
||||
layout(location = 0) out vec3 vsout_normal;
|
||||
layout(location = 1) out vec3 vsout_color_off;
|
||||
@ -18,7 +20,7 @@ layout(push_constant, std430) uniform pc {
|
||||
};
|
||||
|
||||
void main(){
|
||||
vsout_normal = normal;
|
||||
vsout_normal = normalize(normal_t * normal);
|
||||
vsout_color_off = color_off;
|
||||
vsout_color_on = color_on;
|
||||
vec4 real_pos = model_t * vec4(pos, 1);
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 76 KiB After Width: | Height: | Size: 91 KiB |
Loading…
x
Reference in New Issue
Block a user