Saving: generating Vec, Span, Option template instances in a codegen file. Renamed {ConstSpan,Span} to {Span,MutSpan}. Added VecU8_fmt function

This commit is contained in:
Андреев Григорий 2025-08-15 00:24:35 +03:00
parent 78c33325d4
commit 42a64c6e15
19 changed files with 811 additions and 438 deletions

View File

@ -8,24 +8,28 @@ add_compile_definitions(_POSIX_C_SOURCE=200112L)
add_compile_definitions(_GNU_SOURCE)
add_compile_options(-fno-trapping-math)
add_executable(0_test src/l1/tests/t0.c)
add_executable(1_test src/l1/tests/t1.c)
add_executable(codegen_l1 src/l1/codegen/codegen.c)
target_compile_definitions(codegen_l1
PRIVATE PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8)
add_executable(codegen_l2 src/l2/codegen.c)
add_executable(0_render_test src/l2/tests/r0/r0.c)
target_link_libraries(0_render_test -lvulkan -lX11 -lm)
add_executable(0_render_test_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
target_link_libraries(0_render_test_tex_init_prep -lm)
add_executable(1_render_test src/l2/tests/r1/r1.c gen/l_wl_protocols/xdg-shell-private.c)
target_link_libraries(1_render_test -lwayland-client -lrt -lm -lxkbcommon)
add_executable(0_play_test src/l3/tests/p0.c)
target_link_libraries(0_play_test -lncurses)
# Recursively collect all .h files in the src directory.
file(GLOB_RECURSE HEADER_FILES "${CMAKE_SOURCE_DIR}/src/*.h")
# Do not build utku
add_executable(utka src/l1/tests/t0.c ${HEADER_FILES})
#add_executable(0_test src/l1/tests/t0.c)
#add_executable(1_test src/l1/tests/t1.c)
#
#add_executable(codegen_l2 src/l2/codegen/codegen.c)
#
#add_executable(0_render_test src/l2/tests/r0/r0.c)
#target_link_libraries(0_render_test -lvulkan -lX11 -lm)
#
#add_executable(0_render_test_tex_init_prep src/l2/tests/r0/r0_tex_init_prep.c)
#target_link_libraries(0_render_test_tex_init_prep -lm)
#
#add_executable(1_render_test src/l2/tests/r1/r1.c gen/l_wl_protocols/xdg-shell-private.c)
#target_link_libraries(1_render_test -lwayland-client -lrt -lm -lxkbcommon)
#
#add_executable(0_play_test src/l3/tests/p0.c)
#target_link_libraries(0_play_test -lncurses)
#
## Recursively collect all .h files in the src directory.
#file(GLOB_RECURSE HEADER_FILES "${CMAKE_SOURCE_DIR}/src/*.h")
## Do not build utku
#add_executable(utka src/l1/tests/t0.c ${HEADER_FILES})

View File

@ -6,13 +6,9 @@ cc := 'gcc'
wl_protocols := $(shell pkg-config --variable=pkgdatadir wayland-protocols)
gen/l_wl_protocols/xdg-shell-client.h: $(wl_protocols)/stable/xdg-shell/xdg-shell.xml
mkdir -p gen/l_wl_protocols
wayland-scanner client-header $< $@
gen/l_wl_protocols/xdg-shell-private.c: $(wl_protocols)/stable/xdg-shell/xdg-shell.xml
mkdir -p gen/l_wl_protocols
wayland-scanner private-code $< $@
out/l1/codegen_l1: src/l1/codegen/codegen.c $(HEADERS)
mkdir -p out/l2
$(cc) $(cflags) -o $@ $<
out/l1/t0: src/l1/tests/t0.c $(HEADERS)
mkdir -p out/l1
@ -22,10 +18,22 @@ out/l1/t1: src/l1/tests/t1.c $(HEADERS)
mkdir -p out/l1
$(cc) $(cflags) -o $@ $<
out/l2/codegen_l: src/l2/codegen.c $(HEADERS)
out/l2/codegen_l2: src/l2/codegen/codegen.c $(HEADERS)
mkdir -p out/l2
$(cc) $(cflags) -o $@ $<
gen/l_wl_protocols/xdg-shell-client.h: $(wl_protocols)/stable/xdg-shell/xdg-shell.xml
mkdir -p gen/l_wl_protocols
wayland-scanner client-header $< $@
gen/l_wl_protocols/xdg-shell-private.c: $(wl_protocols)/stable/xdg-shell/xdg-shell.xml
mkdir -p gen/l_wl_protocols
wayland-scanner private-code $< $@
out/l2/r0: src/l2/tests/r0/r0.c $(HEADERS)
mkdir -p out/l2
$(cc) $(cflags) -o $@ $< -lvulkan -lX11 -lm

44
src/l1/codegen/codegen.c Normal file
View File

@ -0,0 +1,44 @@
#include "util_template_inst.h"
#include "../system/fsmanip.h"
int main() {
VecU8 e = VecU8_fmt("%sA%%%s\n", cstr("AA"), cstr("BB"));
SpanU8_print(VecU8_to_SpanU8(&e));
VecU8_drop(e);
return 0;
make_dir_nofail("l1");
{
VecU8 head = begin_header(cstr("PROTOTYPE1_L1_VECANDSPANANDOPTION_INT_PRIMITIVES_H"));
SpanU8 T[4] = {cstr("U8"), cstr("U16"), cstr("U32"), cstr("U64")};
for (size_t i = 0; i < ARRAY_SIZE(T); i++) {
VecU8_append_vec(&head, generate_util_templates_instantiation(T[i], (util_templates_instantiation_options){
.t_integer = true, .t_primitive = true, .vec = true, .vec_equal = true, .span = true, .mut_span = true,
.collab_vec_span = true, .option = true
}));
}
finish_header(head, "l1/VecAndSpanAndOption_int_primitives.h");
}
{
VecU8 head = begin_header(cstr("PROTOTYPE1_L1_VECANDSPAN_VEC_INT_PRIMITIVES_H"));
SpanU8 T[4] = {cstr("VecU8"), cstr("VecU16"), cstr("VecU32"), cstr("VecU64")};
for (size_t i = 0; i < ARRAY_SIZE(T); i++) {
VecU8_append_vec(&head, generate_util_templates_instantiation(T[i], (util_templates_instantiation_options){
.t_integer = true, .t_primitive = true, .vec = true, .vec_equal = true, .span = true, .mut_span = true,
.collab_vec_span = true, .option = true
}));
}
finish_header(head, "l1/VecAndSpan_Vec_int_primitives.h");
}
{
VecU8 head = begin_header(cstr("PROTOTYPE1_L1_VECANDSPAN_SPAN_INT_PRIMITIVES_H"));
SpanU8 T[4] = {cstr("VecU8")};
for (size_t i = 0; i < ARRAY_SIZE(T); i++) {
VecU8_append_vec(&head, generate_util_templates_instantiation(T[i], (util_templates_instantiation_options){
.t_integer = true, .t_primitive = true, .vec = true, .vec_equal = true, .span = true, .mut_span = true,
.collab_vec_span = true, .option = true
}));
}
finish_header(head, "l1/VecAndSpan_Span_int_primitives.h");
}
return 0;
}

View File

@ -3,9 +3,9 @@
#include <stdio.h>
#include "../system/fileio.h"
#include "../core/VecU8_format.h"
#include "../core/VecU8_as_str.h"
NODISCARD VecU8 begin_header(ConstSpanU8 guard) {
NODISCARD VecU8 begin_header(SpanU8 guard) {
VecU8 res = VecU8_new();
VecU8_append_span(&res, cstr("#ifndef "));
VecU8_append_span(&res, guard);
@ -18,7 +18,7 @@ NODISCARD VecU8 begin_header(ConstSpanU8 guard) {
/* Codegen script's working directory should be `gen` */
void finish_header(VecU8 text_before_endif, const char* filename) {
VecU8_append_span(&text_before_endif, cstr("#endif\n"));
write_whole_file_or_abort(filename, VecU8_to_ConstSpanU8(&text_before_endif));
write_whole_file_or_abort(filename, VecU8_to_SpanU8(&text_before_endif));
VecU8_drop(text_before_endif);
}
@ -27,24 +27,8 @@ void finish_header(VecU8 text_before_endif, const char* filename) {
#define SPACE12 " "
#define SPACE16 " "
NODISCARD VecU8 generate_type_triv_methods_and_vec(ConstSpanU8 member) {
VecU8 res = VecU8_from_cstr("#define ");
VecU8_append_span(&res, member);
VecU8_append_span(&res, cstr("_drop(x) {}\n#define "));
VecU8_append_span(&res, member);
VecU8_append_span(&res, cstr("_clone(xp) (*(xp))\n\n"));
VecU8_append_span(&res, cstr("VecT_trivmove_struct_Definition("));
VecU8_append_span(&res, member);
VecU8_append_span(&res, cstr(")\nVecT_trivmove_method_Definition("));
VecU8_append_span(&res, member);
VecU8_append_span(&res, cstr(")\nVecT_primitive_zeroinit_method_Definition("));
VecU8_append_span(&res, member);
VecU8_append_span(&res, cstr(")\n\n"));
return res;
}
/* code generation function. We don't append vector data to vecu8, we append vector name to string in VecU8 */
void VecU8_append_vecoft(VecU8* str, ConstSpanU8 t) {
void VecU8_append_vecoft(VecU8* str, SpanU8 t) {
VecU8_append_span(str, cstr("Vec"));
VecU8_append_span(str, t);
}

View File

@ -0,0 +1,471 @@
#ifndef PROTOTYPE1_SRC_L1_CODEGEN_UTIL_TEMPLATES_H
#define PROTOTYPE1_SRC_L1_CODEGEN_UTIL_TEMPLATES_H
#include "codegen.h"
/* if !primitive, requires methods T_clone, T_drop */
NODISCARD VecU8 generate_VecT_struct_and_base_methods(SpanU8 T, bool primitive) {
VecU8 g_VecT = VecU8_fmt("Vec%s", T);
SpanU8 VecT = VecU8_to_SpanU8(&g_VecT);
VecU8 res = VecU8_fmt(
"typedef struct {\n"
SPACE4 "T* buf;\n"
SPACE4 "size_t len;\n"
SPACE4 "size_t capacity;\n"
"} %s\n\n", VecT);
VecU8_append_vec(&res, VecU8_fmt("#define %s_new() (%s){ 0 }\n\n", VecT, VecT));
VecU8_append_vec(&res, VecU8_fmt("void %s_drop(%s self) {\n", VecT, VecT));
if (!primitive) {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "for (size_t i = 0; i < self.len; i++) \n"
SPACE4 SPACE4 "%s_drop(self.buf[i]);\n", T));
}
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "free(self->buf);\n"
"}\n\n"));
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_new_reserved(size_t n) {\n"
SPACE4 "return (%s){ .buf = safe_calloc(n, sizeof(%s)), .len = 0, .capacity = n };\n"
"}\n\n", VecT, VecT, VecT, T));
VecU8_append_vec(&res, VecU8_fmt(
"void %s_append(%s* self, %s el) {\n"
SPACE4 "size_t new_length = self->len + 1;\n"
SPACE4 "if (new_length > self->capacity) {\n"
SPACE4 SPACE4 "size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length);\n"
SPACE4 SPACE4 "self->buf = safe_realloc(self->buf, new_capacity * sizeof(%s));\n"
SPACE4 SPACE4 "self->capacity = new_capacity;\n"
SPACE4 "}\n"
SPACE4 "self->buf[self->len] = el;\n"
SPACE4 "self->len = new_length;\n"
"}\n\n", VecT, VecT, T, T));
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_mat(%s* self, size_t i) {\n"
SPACE4 "assert(i < self->len);\n"
SPACE4 "return &self->buf[i];\n"
"}\n\n", T, VecT, VecT));
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_at(const %s* self, size_t i) {\n"
SPACE4 "assert(i < self->len);\n"
SPACE4 "return &self->buf[i];\n"
"}\n\n", T, VecT, VecT));
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_clone(const %s* self) {\n"
SPACE4 "%s res = (%s){.buf = safe_calloc(self->len, sizeof(%s)), .len = self->len, .capacity = self->len};",
VecT, VecT, VecT, VecT, VecT, T));
if (primitive) {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "memcpy(res.buf, self->buf, self->len * sizeof(%s));", T));
} else {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "for (size_t i = 0; i < self->len; i++)\n"
SPACE4 SPACE4 "res.buf[i] = %s_clone(&self->buf[i]);\n", T));
}
VecU8_append_span(&res, cstr("}\n\n"));
VecU8_append_vec(&res, VecU8_fmt(
"void %s_append_vec(%s* self, %s b) {\n"
SPACE4 "size_t new_length = self->len + b.len;\n"
SPACE4 "if (new_length > self->capacity) {\n"
SPACE4 SPACE4 "size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length);\n"
SPACE4 SPACE4 "self->buf = safe_realloc(self->buf, new_capacity * sizeof(%s));\n"
SPACE4 SPACE4 "self->capacity = new_capacity;\n"
SPACE4 "}\n"
SPACE4 "for (size_t i = 0; i < b.len; i++){\n"
SPACE4 SPACE4 "self->buf[self->len + i] = b.buf[i];\n"
SPACE4 "}\n"
SPACE4 "self->len = new_length;\n"
SPACE4 "free(b.buf);\n"
"}\n\n", VecT, VecT, VecT, T));
if (primitive) {
VecU8_append_vec(&res, VecU8_fmt(
"NODISCARD %s %s_new_zeroinit(size_t len) {\n"
SPACE4 "return (%s){.buf = safe_calloc(len, sizeof(%s)), .len = len, .capacity = len};\n"
"}\n\n", VecT, VecT, VecT, T));
}
VecU8_drop(g_VecT); // VecT invalidated too
return res;
}
/* if !primitive, requires methods T_clone, T_drop */
NODISCARD VecU8 generate_VecT_trivmove_extended_methods(SpanU8 T, bool primitive) {
VecU8 g_VecT = VecU8_fmt("Vec%s", T);
SpanU8 VecT = VecU8_to_SpanU8(&g_VecT);
VecU8 res = VecU8_new();
VecU8_append_vec(&res, VecU8_fmt(
"%s%s %s_pop(%s* self) {\n"
SPACE4 "assert(self->len > 0);\n"
SPACE4 "self->len--;\n"
SPACE4 "return self->buf[self->len];\n"
"}\n\n", primitive ? cstr("") : cstr("NODISCARD "), T, VecT, VecT));
if (!primitive) {
VecU8_append_vec(&res, VecU8_fmt(
"void %s_pop_and_drop(%s* self) {\n"
SPACE4 "%s_drop(%s_pop(self));\n"
"}\n\n", VecT, VecT, T, VecT));
}
VecU8_append_vec(&res, VecU8_fmt(
"NODISCARD %s %s_swap_with_empty(%s* cell) {\n"
SPACE4 "%s res = *cell;\n"
SPACE4 "*cell = (%s){NULL, 0, 0};\n"
SPACE4 "return val;\n"
"}\n\n", VecT, VecT, VecT, VecT, VecT));
if (primitive) {
VecU8_append_vec(&res, VecU8_fmt(
"NODISCARD %s %s_new_filled(size_t len, %s el) {\n"
SPACE4 "%s res = (%s){.buf = safe_calloc(len, sizeof(%s)), .len = len, .capacity = len};\n"
SPACE4 "for (size_t i = 0; i < len; i++)\n"
SPACE4 SPACE4 "res.buf[i] = el;\n"
SPACE4 "return res;\n"
"}\n\n", VecT, VecT, T, VecT, VecT, T));
} else {
VecU8_append_vec(&res, VecU8_fmt(
"NODISCARD %s %s_new_filled(size_t len, const %s* el) {\n"
SPACE4 "%s res = (%s){.buf = safe_calloc(len, sizeof(%s)), .len = len, .capacity = len};\n"
SPACE4 "for (size_t i = 0; i < len; i++)\n"
SPACE4 SPACE4 "res.buf[i] = %s_clone(el);\n"
SPACE4 "return res;\n"
"}\n\n", VecT, VecT, T, VecT, VecT, T, T));
}
VecU8_drop(g_VecT); // VecT invalidated
return res;
}
/* if !integer requires method T_equal_T */
NODISCARD VecU8 generate_VecT_equal_method(SpanU8 T, bool integer) {
VecU8 g_VecT = VecU8_fmt("Vec%s", T);
SpanU8 VecT = VecU8_to_SpanU8(&g_VecT);
VecU8 res = VecU8_fmt(
"bool %s_equal_%s(const %s* A, const %s* B) {\n"
SPACE4 "if (A->len != B->len)\n"
SPACE4 SPACE4 "return false;\n"
SPACE4 "for (size_t i = 0; i < A->len; i++) {\n", VecT, VecT, VecT, VecT);
if (integer) {
VecU8_append_span(&res, cstr(SPACE8 "if (A->buf[i] != B->buf[i])\n"));
} else {
VecU8_append_vec(&res, VecU8_fmt(SPACE8 "if (!%s_equal_%s(A->buf + i, B->buf + i))\n", T, T));
}
VecU8_append_span(&res, cstr(
SPACE4 SPACE4 SPACE4 "return false;\n"
SPACE4 "}\n"
SPACE4 "return true;\n"
"}\n"));
VecU8_drop(g_VecT);
return res;
}
/* requires method T_new */
NODISCARD VecU8 generate_VecT_new_of_size_method(SpanU8 T) {
VecU8 g_VecT = VecU8_fmt("Vec%s", T);
SpanU8 VecT = VecU8_to_SpanU8(&g_VecT);
VecU8 res = VecU8_fmt(
"NODISCARD %s %s_new_of_size(size_t len) {\n"
SPACE4 "%s res = (%s){.buf = safe_calloc(len, sizeof(%s)), .len = len, .capacity = len};\n"
SPACE4 "for (size_t i = 0; i < len; i++)\n"
SPACE4 SPACE4 "res.buf[i] = %s_new();\n"
SPACE4 "return res;\n"
"}\n", VecT, VecT, VecT, VecT, T, T);
VecU8_drop(g_VecT);
return res;
}
/* helper function. SpanT is either SpanT or MutSpanT */
void codegen_append_some_span_equal_method(VecU8* res, SpanU8 SpanT) {
VecU8_append_vec(res, VecU8_fmt(
"bool %s_%s(%s A, %s B) {\n"
SPACE4 "return A->data == B->data && A->len == B->len;\n"
"}\n\n", SpanT, SpanT, SpanT, SpanT));
}
/* helper function. (SpanT, mod) is either (SpanT "const ") or (MutSpanT, "") */
void codegen_append_some_span_struct(VecU8* res, SpanU8 T, SpanU8 SpanT, SpanU8 mod) {
VecU8_append_vec(res, VecU8_fmt(
"typedef struct {\n"
SPACE4 "%s%s* data;\n"
SPACE4 "size_t len;\n"
"} %s\n\n", mod, T, SpanT));
}
/* helper function. (SpanT, mod) is either (SpanT "const ") or (MutSpanT, "") */
void codegen_append_some_span_at_method(VecU8* res, SpanU8 T, SpanU8 SpanT, SpanU8 mod) {
VecU8_append_vec(res, VecU8_fmt(
"%s%s* %s_at(%s self, size_t i) {\n"
SPACE4 "assert(i < self.len);\n"
SPACE4 "return self.data + i;\n"
"}\n\n", mod, T, SpanT, SpanT));
}
/* helper function. SpanT is either SpanT or MutSpanT
* span method retrieves subspan */
void codegen_append_some_span_span_method(VecU8* res, SpanU8 SpanT) {
VecU8_append_vec(res, VecU8_fmt(
"%s %s_span(%s self, size_t start, size_t len){\n"
"assert(start < SIZE_MAX - len && start + len <= self.len);\n"
"return (%s){.data = self.data + start, .len = len};\n"
"}\n\n", SpanT, SpanT, SpanT, SpanT));
}
/* T must be sized. Option `add_sort` requires option `add_mutable` and method T_less
* add_mutable option generates MutSpanT.
* add_equal option generates equal method. add_extended option generated extended methods
* add_sort option generates T_qcompare and MutSpanT_sort methods */
NODISCARD VecU8 generate_SpanT_struct_and_methods(
SpanU8 T, bool add_mutable, bool add_equal, bool add_extended, bool add_sort
) {
VecU8 g_SpanT = VecU8_fmt("Span%s", T);
VecU8 g_MutSpanT = VecU8_fmt("MutSpan%s", T);
SpanU8 SpanT = VecU8_to_SpanU8(&g_SpanT);
SpanU8 MutSpanT = VecU8_to_SpanU8(&g_MutSpanT);
VecU8 res = VecU8_new();
codegen_append_some_span_struct(&res, T, SpanT, cstr("const "));
if (add_mutable)
codegen_append_some_span_struct(&res, T, MutSpanT, cstr(""));
if (add_equal) {
codegen_append_some_span_equal_method(&res, SpanT);
if (add_mutable)
codegen_append_some_span_equal_method(&res, MutSpanT);
}
if (add_mutable) {
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_to_%s(%s self) {\n"
SPACE4 "return (%s){.data = self.data, .len = self.len};\n"
"}\n\n", SpanT, MutSpanT, SpanT, MutSpanT, SpanT));
}
codegen_append_some_span_at_method(&res, T, SpanT, cstr("const "));
if (add_mutable)
codegen_append_some_span_at_method(&res, T, MutSpanT, cstr(""));
if (add_extended) {
codegen_append_some_span_span_method(&res, SpanT);
if (add_mutable)
codegen_append_some_span_span_method(&res, MutSpanT);
}
if (add_sort) {
assert(add_mutable);
VecU8_append_vec(&res, VecU8_fmt(
"int %s_qcompare(const void* a, const void* b) {\n"
SPACE4 "const %s* A = a;\n"
SPACE4 "const %s* B = b;\n"
SPACE4 "return (int)%s_less_%s(B, A) - (int)%s_less_%s(A, B);\n"
"}\n\n", T, T, T, T, T, T, T));
VecU8_append_vec(&res, VecU8_fmt(
"void %s_sort(%s self) {\n"
SPACE4 "qsort(self.data, self.len, sizeof(%s), %s_qcompare);\n"
"}\n\n", MutSpanT, MutSpanT, T, T));
}
VecU8_drop(g_MutSpanT);
VecU8_drop(g_SpanT);
return res;
}
// void codegen_append_vec_some_span_method(VecU8* res, SpanU8 mod, SpanU8 )
/* T must be trivially movable. If !primitive, requires methods T_drop (implicitly), T_clone */
NODISCARD VecU8 generate_SpanT_VecT_trivmove_collab(SpanU8 T, bool primitive, bool add_mutable, bool add_extended) {
VecU8 g_SpanT = VecU8_fmt("Span%s", T);
VecU8 g_MutSpanT = VecU8_fmt("MutSpan%s", T);
VecU8 g_VecT = VecU8_fmt("Vec%s", T);
SpanU8 SpanT = VecU8_to_SpanU8(&g_SpanT);
SpanU8 MutSpanT = VecU8_to_SpanU8(&g_MutSpanT);
SpanU8 VecT = VecU8_to_SpanU8(&g_VecT);
VecU8 res = VecU8_new();
VecU8_append_vec(&res, VecU8_fmt(
"NODISCARD %s %s_from_span(%s src){\n"
SPACE4 "%s res = (%s){ .buf = safe_calloc(src.len, sizeof(%s)), .len = src.len, .capacity = src.len };\n",
VecT, VecT, SpanT, VecT, VecT, T));
if (primitive) {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "memcpy(res.buf, src.data, src.len * sizeof(%s));\n", T));
} else {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "for (size_t i = 0; i < src.len; i++)\n"
SPACE8 "res.buf[i] = %s_clone(&src.data[i]);\n", T));
}
VecU8_append_span(&res, cstr(SPACE4 "return res;\n}\n\n"));
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_to_span(const %s* vec){\n"
SPACE4 "return (%s){vec->buf, vec->len};\n"
"}\n\n", SpanT, VecT, VecT, SpanT));
if (add_mutable) {
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_to_mspan(%s* vec){\n"
SPACE4 "return (%s){vec->buf, vec->len};\n"
"}\n\n", MutSpanT, VecT, VecT, MutSpanT));
}
VecU8_append_vec(&res, VecU8_fmt(
"void %s_append_span(%s* self, %s b) {\n"
SPACE4 "size_t new_length = self->len + b.len;\n"
SPACE4 "if (new_length > self->capacity) {\n"
SPACE4 SPACE4 "size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length);\n"
SPACE4 SPACE4 "self->buf = safe_realloc(self->buf, new_capacity * sizeof(%s));\n"
SPACE4 SPACE4 "self->capacity = new_capacity;\n"
SPACE4 "}\n", VecT, VecT, SpanT, T));
if (primitive) {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "memcpy(self->buf + i, b.data, b.len * sizeof(%s));\n", T));
} else {
VecU8_append_vec(&res, VecU8_fmt(
SPACE4 "for (size_t i = 0; i < b.len; i++)\n"
SPACE4 SPACE4 "self->buf[self->len + i] = %s_clone(&b.data[i]);\n", T));
}
VecU8_append_span(&res, cstr(
SPACE4 "self->len = new_length;\n"
"}\n\n"));
if (add_extended) {
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_span(const %s* vec, size_t start, size_t len) {\n"
SPACE4 "assert(start < SIZE_MAX - len && start + len <= vec->len);\n"
SPACE4 "return (%s){.data = vec->buf + start, .len = len};\n"
"}\n\n", SpanT, VecT, VecT, SpanT));
if (add_mutable) {
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_span(%s* vec, size_t start, size_t len) {\n"
SPACE4 "assert(start < SIZE_MAX - len && start + len <= vec->len);\n"
SPACE4 "return (%s){.data = vec->buf + start, .len = len};\n"
"}\n\n", MutSpanT, VecT, VecT, MutSpanT));
}
}
VecU8_drop(g_VecT);
VecU8_drop(g_MutSpanT);
VecU8_drop(g_SpanT);
return res;
}
NODISCARD VecU8 generate_OptionT_struct_and_methods(SpanU8 T) {
VecU8 g_OptionT = VecU8_fmt("Option%s", T);
SpanU8 OptionT = VecU8_to_SpanU8(&g_OptionT);
VecU8 res = VecU8_fmt(
"typedef struct {\n"
SPACE4 "Option_variant variant;\n"
SPACE4 "%s some;\n"
"} %s;\n\n", T, OptionT);
VecU8_append_vec(&res, VecU8_fmt(
"#define None_%s() (%s){ .variant = Option_None }\n"
"#define Some_%s(expr) (%s){ .variant = Option_Some, .some = (expr) }\n\n",
T, OptionT, T, OptionT));
VecU8_append_vec(&res, VecU8_fmt(
"const %s* %s_expect_ref(const %s* self){\n"
SPACE4 "if (self->variant == Option_None)\n"
SPACE4 SPACE4 "abortf(\"Expected something in const %s* got None\\n\");\n"
SPACE4 "return &self->some;\n"
"}\n\n", T, OptionT, OptionT, OptionT));
VecU8_append_vec(&res, VecU8_fmt(
"%s* %s_expect_mut_ref(%s* self){\n"
SPACE4 "if (self->variant == Option_None)\n"
SPACE4 SPACE4 "abortf(\"Expected something in %s* got None\\n\");\n"
SPACE4 "return &self->some;\n"
"}\n\n", T, OptionT, OptionT, OptionT));
VecU8_append_vec(&res, VecU8_fmt(
"%s %s_expect(%s self){\n"
SPACE4 "if (self.variant == Option_None)\n"
SPACE4 SPACE4 "abortf(\"Expected something in %s got None\\n\");\n"
SPACE4 "return self.some;\n"
"}\n\n", T, OptionT, OptionT, OptionT));
VecU8_drop(g_OptionT);
return res;
}
/* The only reason this function exists is because in C it is easier to supply a lot of brace list arguments,
* than function arguments. This struct is an argument for generate_util_templates_instantiation
* It is assumed that all necessary properties of T have been met, including cases where
* T needs to be sized (everywhere) and trivially movable (for VecT)
*/
typedef struct {
bool t_integer;
bool t_primitive;
bool vec;
bool vec_extended;
bool vec_equal;
bool vec_new_of_size;
bool span;
bool mut_span;
bool span_extended;
bool span_sort;
bool collab_vec_span;
bool collab_vec_span_extended;
bool option;
} util_templates_instantiation_options;
NODISCARD VecU8 generate_util_templates_instantiation(SpanU8 T, util_templates_instantiation_options op) {
VecU8 res = VecU8_new();
assert(!op.t_primitive || op.t_integer);
if (op.vec) {
VecU8_append_vec(&res, generate_VecT_struct_and_base_methods(T, op.t_primitive));
}
if (op.vec_extended) {
assert(op.vec);
VecU8_append_vec(&res, generate_VecT_trivmove_extended_methods(T, op.t_primitive));
}
if (op.vec_equal) {
assert(op.vec);
VecU8_append_vec(&res, generate_VecT_equal_method(T, op.t_integer));
}
if (op.vec_new_of_size) {
assert(op.vec);
VecU8_append_vec(&res, generate_VecT_new_of_size_method(T));
}
if (op.span) {
VecU8_append_vec(&res, generate_SpanT_struct_and_methods(T, op.mut_span, false, op.span_extended, op.span_sort));
}
if (op.collab_vec_span) {
assert(op.vec && op.span);
VecU8_append_vec(&res, generate_SpanT_VecT_trivmove_collab(T, op.t_primitive, op.mut_span, op.collab_vec_span_extended));
}
if (op.option) {
VecU8_append_vec(&res, generate_OptionT_struct_and_methods(T));
}
return res;
}
void generate_lynda_header(SpanU8 name_pref, SpanU8 T, util_templates_instantiation_options op) {
if (op.vec_extended)
op.vec = true;
if (op.vec_equal)
op.vec = true;
if (op.span_extended)
op.span = true;
if (op.span_sort)
op.span = true;
if (op.mut_span)
op.span = true;
if (op.collab_vec_span_extended)
op.collab_vec_span = true;
if (op.collab_vec_span) {
op.span = true;
op.vec = true;
}
assert(op.vec || op.span || op.option);
VecU8 text = generate_util_templates_instantiation(T, op);
VecU8 filename = VecU8_fmt("lynda/%s/%s%s%s%s%s""%s%s.h", name_pref,
cstr(op.vec ? "Vec" : ""), cstr(op.vec && op.span ? "And" : ""), cstr(op.span ? "Span" : ""),
cstr(op.span && op.option ? "And" : ""), cstr(op.option ? "Option" : ""),
cstr((int)op.vec + (int)op.span + (int)op.option > 1 ? "_" : ""), T);
}
#endif

View File

@ -1,9 +0,0 @@
#ifndef PROTOTYPE1_SRC_L1_CORE_OPTION_INT_PRIMITIVES_H
#define PROTOTYPE1_SRC_L1_CORE_OPTION_INT_PRIMITIVES_H
#include "util.h"
OptionT_struct_Definition(U32)
OptionT_method_Definition(U32)
#endif

View File

@ -1,12 +0,0 @@
#ifndef PROTOTYPE1_SRC_L1_CORE_SPAN_SPAN_INT_PRIMITIVES_H
#define PROTOTYPE1_SRC_L1_CORE_SPAN_SPAN_INT_PRIMITIVES_H
#include "VecSpan_int_primitives.h"
// todo: generate all this shit by a codegen script into separate files
// todo: completely get rid of these dumb macroses and these stupid files
SpanT_struct_Definition(ConstSpanU8)
SpanT_method_Definition(ConstSpanU8)
#endif

View File

@ -1,13 +0,0 @@
#ifndef PROTOTYPE1_SRC_CORE_VECSPAN_VECSPAN_INT_PRIMITIVES_H
#define PROTOTYPE1_SRC_CORE_VECSPAN_VECSPAN_INT_PRIMITIVES_H
#include "VecSpan_int_primitives.h"
SpanT_VecT_trivmove_COMPLETE_Definition(VecU8)
VecT_with_default_method_Definition(VecU8)
SpanT_VecT_trivmove_COMPLETE_Definition(VecU32)
VecT_with_default_method_Definition(VecU32)
SpanT_VecT_trivmove_COMPLETE_Definition(VecU64)
VecT_with_default_method_Definition(VecU64)
#endif

View File

@ -1,49 +0,0 @@
#ifndef PROTOTYPE1_SRC_CORE_VECU8_H
#define PROTOTYPE1_SRC_CORE_VECU8_H
#include "util.h"
SpanT_VecT_trivmove_COMPLETE_Definition(U8)
VecT_primitive_zeroinit_method_Definition(U8)
SpanT_comparable_method_Definition(U8)
VecU8 VecU8_from_cstr(const char* dc) {
size_t n = strlen(dc);
VecU8 res = (VecU8){ .buf = safe_calloc(n, 1), .len = n, .capacity = n };
memcpy(res.buf, dc, n);
return res;
}
#define vcstr(dc) VecU8_from_cstr(dc)
ConstSpanU8 ConstSpanU8_from_cstr(const char* dc) {
return (ConstSpanU8){.data = (U8*)dc, .len = strlen(dc)};
}
#define cstr(dc) ConstSpanU8_from_cstr(dc)
/* Not thread safe (for stdout) !*/
void ConstSpanU8_print(ConstSpanU8 str) {
for (size_t i = 0; i < str.len; i++)
putc((int)*ConstSpanU8_at(str, i), stdout);
}
/* Not thread safe (for `stream`) ! */
void ConstSpanU8_fprint( ConstSpanU8 str, FILE* stream) {
for (size_t i = 0; i < str.len; i++)
putc((int)*ConstSpanU8_at(str, i), stream);
}
SpanT_VecT_trivmove_COMPLETE_Definition(U16)
VecT_primitive_zeroinit_method_Definition(U16)
SpanT_comparable_method_Definition(U16)
SpanT_VecT_trivmove_COMPLETE_Definition(U32)
VecT_primitive_zeroinit_method_Definition(U32)
SpanT_comparable_method_Definition(U32)
SpanT_VecT_trivmove_COMPLETE_Definition(U64)
VecT_primitive_zeroinit_method_Definition(U64)
SpanT_comparable_method_Definition(U64)
#endif

103
src/l1/core/VecU8_as_str.h Normal file
View File

@ -0,0 +1,103 @@
#ifndef PROTOTYPE1_SRC_L1_CORE_VECU8_AS_STR_H
#define PROTOTYPE1_SRC_L1_CORE_VECU8_AS_STR_H
#include <stdarg.h>
#ifdef PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8
#include "chicken_VecU8.h"
#else
#include "../../../gen/l1/VecAndSpan_int_primitives.h"
#endif
VecU8 VecU8_from_cstr(const char* dc) {
size_t n = strlen(dc);
VecU8 res = (VecU8){ .buf = safe_calloc(n, 1), .len = n, .capacity = n };
memcpy(res.buf, dc, n);
return res;
}
#define vcstr(dc) VecU8_from_cstr(dc)
SpanU8 SpanU8_from_cstr(const char* dc) {
return (SpanU8){.data = (const U8*)dc, .len = strlen(dc)};
}
#define cstr(dc) SpanU8_from_cstr(dc)
/* Not thread safe (for stdout) !*/
void SpanU8_print(SpanU8 str) {
for (size_t i = 0; i < str.len; i++)
putc((int)*SpanU8_at(str, i), stdout);
}
/* Not thread safe (for `stream`) ! */
void ConstSpanU8_fprint(SpanU8 str, FILE* stream) {
for (size_t i = 0; i < str.len; i++)
putc((int)*SpanU8_at(str, i), stream);
}
NODISCARD VecU8 VecU8_format(const char *fmt, ...) {
assert(fmt);
/* first pass: figure out required length */
va_list ap;
va_start(ap, fmt);
va_list ap_copy;
va_copy(ap_copy, ap);
/* bytes *without* NUL */
const int needed = vsnprintf(NULL, 0, fmt, ap);
va_end(ap);
if (needed < 0)
abortf("Formatting error\n");
const size_t len = (size_t)needed;
/* allocate buffer (+1 so vsnprintf can add its NUL) */
U8 *buf = safe_malloc(len + 1);
/* second pass: actually format */
vsnprintf((char *)buf, len + 1, fmt, ap_copy);
va_end(ap_copy);
return (VecU8){ .buf = buf, .len = len, .capacity = len + 1 };
}
NODISCARD VecU8 VecU8_fmt(const char* fmt, ...) {
assert(fmt);
size_t k = 0;
va_list args;
va_start(args, fmt);
for (const char *ch = fmt; *ch; ) {
if (*ch == '%') {
ch++;
if (*ch == '%') {
k++;
} else if (*ch == 's') {
SpanU8 s = va_arg(args, SpanU8);
k += s.len;
} else
abortf("Format syntax error at pos %lu! Watch out, be careful", (size_t)(ch - fmt));
} else {
k++;
}
ch++;
}
va_end(args);
VecU8 res = VecU8_new_reserved(k);
va_start(args, fmt);
for (const char *ch = fmt; *ch;) {
if (*ch == '%') {
ch++;
if (*ch == '%') {
VecU8_append(&res, '%');
} else if (*ch == 's') {
SpanU8 s = va_arg(args, SpanU8);
VecU8_append_span(&res, s);
} else
assert(false);
} else {
VecU8_append(&res, *ch);
}
ch++;
}
va_end(args);
return res;
}
#endif

View File

@ -1,34 +0,0 @@
#ifndef PROTOTYPE1_SRC_L1_CORE_VECU8_PRINT_H
#define PROTOTYPE1_SRC_L1_CORE_VECU8_PRINT_H
#include <stdarg.h>
#include "./VecSpan_int_primitives.h"
VecU8 VecU8_format(const char *fmt, ...)
{
if (!fmt)
abortf("NULL passed as format\n");
/* first pass: figure out required length */
va_list ap;
va_start(ap, fmt);
va_list ap_copy;
va_copy(ap_copy, ap);
/* bytes *without* NUL */
const int needed = vsnprintf(NULL, 0, fmt, ap);
va_end(ap);
if (needed < 0)
abortf("Formatting error\n");
const size_t len = (size_t)needed;
/* allocate buffer (+1 so vsnprintf can add its NUL) */
U8 *buf = safe_malloc(len + 1);
/* second pass: actually format */
vsnprintf((char *)buf, len + 1, fmt, ap_copy);
va_end(ap_copy);
return (VecU8){ .buf = buf, .len = len, .capacity = len + 1 };
}
#endif

100
src/l1/core/chicken_VecU8.h Normal file
View File

@ -0,0 +1,100 @@
#ifndef PROTOTYPE1_SRC_L1_CORE_CHICKEN_VECU8_H
#define PROTOTYPE1_SRC_L1_CORE_CHICKEN_VECU8_H
#ifndef PROTOTYPE1_L1_CODEGEN_BOOTSTRAP_USE_CHICKEN_VECU8
#error "Use chicken_VecU8.h only during the very first l1 bootstrap stage"
#endif
#include "util.h"
typedef struct {
U8* buf;
size_t len;
size_t capacity;
} VecU8;
NODISCARD VecU8 VecU8_new() {
return (VecU8){NULL, 0, 0};
}
void VecU8_drop(VecU8 obj) {
free(obj.buf);
}
VecU8 VecU8_new_reserved(size_t n) {
return (VecU8){ .buf = safe_calloc(n, sizeof(U8)), .len = 0, .capacity = n };
}
void VecU8_append(VecU8* self, U8 el) {
size_t new_length = self->len + 1;
if (new_length > self->capacity) {
size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length);
self->buf = safe_realloc(self->buf, new_capacity * sizeof(U8));
self->capacity = new_capacity;
}
self->buf[self->len] = el;
self->len = new_length;
}
U8* VecU8_mat(VecU8* self, size_t i) {
assert(i < self->len);
return self->buf + i;
}
const U8* VecU8_at(const VecU8* self, size_t i) {
assert(i < self->len);
return self->buf + i;
}
NODISCARD VecU8 VecU8_clone(const VecU8* self) {
VecU8 res = (VecU8){.buf = safe_calloc(self->len, sizeof(U8)), .len = self->len, .capacity = self->len};
memcpy(res.buf, self->buf, self->len);
return res;
}
void VecU8_append_vec(VecU8* self, VecU8 b) {
size_t new_length = self->len + b.len;
if (new_length > self->capacity) {
size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length);
self->buf = safe_realloc(self->buf, new_capacity * sizeof(U8));
self->capacity = new_capacity;
}
for (size_t i = 0; i < b.len; i++) {
self->buf[self->len + i] = b.buf[i];
}
self->len = new_length;
free(b.buf);
}
typedef struct {
const U8* data;
size_t len;
} SpanU8;
const U8* SpanU8_at(SpanU8 self, size_t i) {
assert(i < self.len);
return self.data + i;
}
NODISCARD VecU8 VecU8_from_span(SpanU8 src) {
VecU8 res = (VecU8){ .buf = safe_calloc(src.len, sizeof(U8)), .len = src.len, .capacity = src.len };
memcpy(res.buf, src.data, src.len);
return res;
}
SpanU8 VecU8_to_SpanU8(const VecU8* vec) {
return (SpanU8){vec->buf, vec->len};
}
void VecU8_append_span(VecU8* self, SpanU8 b) {
size_t new_length = self->len + b.len;
if (new_length > self->capacity) {
size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length);
self->buf = safe_realloc(self->buf, new_capacity * sizeof(U8));
self->capacity = new_capacity;
}
memcpy(self->buf + self->len, b.data, b.len);
self->len = new_length;
}
#endif

View File

@ -14,33 +14,6 @@ typedef int16_t S16;
typedef int32_t S32;
typedef int64_t S64;
#define U8_drop(x) {}
#define U16_drop(x) {}
#define U32_drop(x) {}
#define U64_drop(x) {}
#define S8_drop(x) {}
#define S16_drop(x) {}
#define S32_drop(x) {}
#define S64_drop(x) {}
#define U8_clone(vp) (*(vp))
#define U16_clone(vp) (*(vp))
#define U32_clone(vp) (*(vp))
#define U64_clone(vp) (*(vp))
#define S8_clone(vp) (*(vp))
#define S16_clone(vp) (*(vp))
#define S32_clone(vp) (*(vp))
#define S64_clone(vp) (*(vp))
#define U8_equal_U8(ap, bp) (*(ap) == *(bp))
#define U16_equal_U16(ap, bp) (*(ap) == *(bp))
#define U32_equal_U32(ap, bp) (*(ap) == *(bp))
#define U64_equal_U64(ap, bp) (*(ap) == *(bp))
#define S8_equal_S8(ap, bp) (*(ap) == *(bp))
#define S16_equal_S16(ap, bp) (*(ap) == *(bp))
#define S32_equal_S32(ap, bp) (*(ap) == *(bp))
#define S64_equal_S64(ap, bp) (*(ap) == *(bp))
#define U8_less_U8(ap, bp) (*(ap) < *(bp))
#define U16_less_U16(ap, bp) (*(ap) < *(bp))
#define U32_less_U32(ap, bp) (*(ap) < *(bp))
@ -50,6 +23,7 @@ typedef int64_t S64;
#define S32_less_S32(ap, bp) (*(ap) < *(bp))
#define S64_less_S64(ap, bp) (*(ap) < *(bp))
// todo: move these to util (l1) template instantiation too
#define int_minmax_function_Definition(T) \
T MIN_##T (T a, T b){ return a < b ? a : b; } \
T MAX_##T (T a, T b){ return a < b ? b : a; }

View File

@ -53,6 +53,7 @@ void* safe_realloc(void* ptr, size_t n) {
return res;
}
// todo: rewrite it to make it faster
#define unsigned_safe_mul_Definition(TSZ) \
U##TSZ safe_mul_U##TSZ(U##TSZ a, U##TSZ b) { \
if (b > 0 && a > UINT##TSZ##_MAX / b) \
@ -62,10 +63,9 @@ U##TSZ safe_mul_U##TSZ(U##TSZ a, U##TSZ b) { \
unsigned_safe_mul_Definition(64)
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
// I assume that new_length > old_capacity
/* I assume that new_length > old_capacity */
size_t Vec_get_new_capacity(size_t old_capacity, size_t new_length) {
if (!old_capacity)
return new_length;
@ -74,244 +74,12 @@ size_t Vec_get_new_capacity(size_t old_capacity, size_t new_length) {
return old_capacity;
}
#define VecT_trivmove_struct_Definition_custom_name(T, VecT) \
typedef struct {T* buf; size_t len; size_t capacity;} VecT;
#define VecT_trivmove_method_Definition_custom_name(T, VecT) \
NODISCARD VecT VecT##_new() { \
return (VecT){NULL, 0, 0}; \
} \
void VecT##_drop(VecT obj) { \
for (size_t i = 0; i < obj.len; i++) \
T ## _drop(obj.buf[i]); \
free(obj.buf); \
} \
VecT VecT##_new_reserved(size_t n) {\
return (VecT){ .buf = safe_calloc(n, sizeof(T)), .len = 0, .capacity = n };\
} \
NODISCARD VecT VecT##_new_filled(size_t len, const T* el) { \
VecT res = (VecT){.buf = safe_calloc(len, sizeof(T)), .len = len, .capacity = len}; \
for (size_t i = 0; i < len; i++) \
res.buf[i] = T##_clone(el); \
return res; \
} \
void VecT##_append(VecT* self, T el) { \
size_t new_length = self->len + 1; \
if (new_length > self->capacity) { \
size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length); \
self->buf = safe_realloc(self->buf, new_capacity * sizeof(T)); \
self->capacity = new_capacity; \
} \
self->buf[self->len] = el; \
self->len = new_length; \
} \
NODISCARD T VecT##_pop(VecT* self) { \
assert(self->len > 0); \
self->len--; \
return self->buf[self->len]; \
} \
void VecT##_pop_and_drop(VecT* self) { \
T##_drop(VecT##_pop(self)); \
} \
NODISCARD VecT VecT##_swap_with_empty(VecT* cell) { \
VecT val = *cell; *cell = (VecT){NULL, 0, 0}; return val; \
} \
T* VecT##_at(VecT* self, size_t i) { \
assert(i < self->len); \
return self->buf + i; \
} \
const T* VecT##_cat(const VecT* self, size_t i) { \
assert(i < self->len); \
return self->buf + i; \
} \
NODISCARD VecT VecT##_clone(const VecT* self){ \
VecT res = (VecT){.buf = safe_calloc(self->len, sizeof(T)), .len = self->len, .capacity = self->len}; \
for (size_t i = 0; i < self->len; i++) \
res.buf[i] = T##_clone(&self->buf[i]); \
return res; \
} \
void VecT##_append_vec(VecT* self, VecT b) { \
size_t new_length = self->len + b.len; \
if (new_length > self->capacity) { \
size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length); \
self->buf = safe_realloc(self->buf, new_capacity * sizeof(T)); \
self->capacity = new_capacity; \
} \
for (size_t i = 0; i < b.len; i++){ \
self->buf[self->len + i] = b.buf[i]; \
} \
self->len = new_length; \
free(b.buf); \
}
#define VecT_trivmove_equal_method_Definition_custom_name(T, VecT) \
bool VecT##_equal_##VecT(const VecT* A, const VecT* B) { \
if (A->len != B->len) \
return false; \
for (size_t i = 0; i < A->len; i++) { \
if (!T##_equal_##T(A->buf + i, B->buf + i)) \
return false; \
} \
return true; \
}
#define VecT_primitive_zeroinit_method_Definition_custom_name(T, VecT) \
VecT VecT##_new_zeroinit(size_t len) { \
return (VecT){.buf = safe_calloc(len, sizeof(T)), .len = len, .capacity = len}; \
}
#define VecT_with_default_method_Definition_custom_name(T, VecT) \
VecT VecT##_new_of_size(size_t len) { \
VecT res = (VecT){.buf = safe_calloc(len, sizeof(T)), .len = len, .capacity = len}; \
for (size_t i = 0; i < len; i++) \
res.buf[i] = T##_new(); \
return res; \
}
#define VecT_trivmove_struct_Definition(T) VecT_trivmove_struct_Definition_custom_name(T, Vec##T)
#define VecT_trivmove_method_Definition(T) VecT_trivmove_method_Definition_custom_name(T, Vec##T)
#define VecT_trivmove_equal_method_Definition(T) VecT_trivmove_equal_method_Definition_custom_name(T, Vec##T)
#define VecT_primitive_zeroinit_method_Definition(T) VecT_primitive_zeroinit_method_Definition_custom_name(T, Vec##T)
#define VecT_with_default_method_Definition(T) VecT_with_default_method_Definition_custom_name(T, Vec##T)
#define SpanT_struct_Definition_custom_name(T, SpanT, ConstSpanT) \
typedef struct {const T* data; size_t len;} ConstSpanT; \
typedef struct {T* data; size_t len;} SpanT; \
// todo: rename span to MutSpan and ConstSpan to Span
#define SpanT_method_Definition_custom_name(T, SpanT, ConstSpanT) \
void ConstSpanT##_drop(ConstSpanT) {} \
void SpanT##_drop(SpanT) {} \
ConstSpanT ConstSpanT##_clone(ConstSpanT self) { \
return (ConstSpanT){.data = self.data, .len = self.len}; \
} \
SpanT SpanT##_clone(SpanT self) { \
return (SpanT){.data = self.data, .len = self.len}; \
} \
bool ConstSpanT##_equal_##ConstSpanT(ConstSpanT a, ConstSpanT b) { \
return a.data == b.data && a.len == b.len; \
} \
bool SpanT##_equal_##SpanT(SpanT a, SpanT b) { \
return a.data == b.data && a.len == b.len; \
} \
ConstSpanT SpanT##_to_##ConstSpanT(SpanT self) { \
return (ConstSpanT){.data = self.data, .len = self.len}; \
} \
SpanT SpanT##_span(SpanT span, size_t start, size_t len){ \
assert(start < SIZE_MAX - len && start + len <= span.len); \
return (SpanT){.data = span.data + start, .len = len}; \
}; \
ConstSpanT ConstSpanT##_cspan(ConstSpanT span, size_t start, size_t len){ \
assert(start < SIZE_MAX - len && start + len <= span.len); \
return (ConstSpanT){.data = span.data + start, .len = len}; \
}; \
T* SpanT##_at(SpanT self, size_t i) { \
assert(i < self.len); \
return self.data + i; \
} \
const T* SpanT##_cat(const SpanT self, size_t i) { \
assert(i < self.len); \
return self.data + i; \
} \
const T* ConstSpanT##_at(ConstSpanT self, size_t i) { \
assert(i < self.len); \
return self.data + i; \
}
#define SpanT_VecT_method_Definition_custom_name(T, SpanT, ConstSpanT, VecT) \
NODISCARD VecT VecT##_from_##span(ConstSpanT src){ \
VecT res = (VecT){ .buf = safe_calloc(src.len, sizeof(T)), .len = src.len, .capacity = src.len }; \
for (size_t i = 0; i < src.len; i++) \
res.buf[i] = T##_clone(&src.data[i]); \
return res; \
} \
ConstSpanT VecT##_to_##ConstSpanT(const VecT* vec){ \
return (ConstSpanT){vec->buf, vec->len}; \
} \
SpanT VecT##_to_##SpanT(VecT* vec){ \
return (SpanT){vec->buf, vec->len}; \
} \
SpanT VecT##_span(VecT* vec, size_t start, size_t len){ \
assert(start < SIZE_MAX - len && start + len <= vec->len); \
return (SpanT){.data = vec->buf + start, .len = len}; \
} \
ConstSpanT VecT##_cspan(const VecT* vec, size_t start, size_t len){ \
assert(start < SIZE_MAX - len && start + len <= vec->len); \
return (ConstSpanT){.data = vec->buf + start, .len = len}; \
} \
void VecT##_append_span(VecT* self, ConstSpanT b) { \
size_t new_length = self->len + b.len; \
if (new_length > self->capacity) { \
size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length); \
self->buf = safe_realloc(self->buf, new_capacity * sizeof(T)); \
self->capacity = new_capacity; \
} \
for (size_t i = 0; i < b.len; i++){ \
self->buf[self->len + i] = T##_clone(&b.data[i]); \
} \
self->len = new_length; \
} \
// Requires normal less method and adds qcompare method for T to use in qsort
#define SpanT_comparable_method_Definition_custom_name(T, SpanT, ConstSpanT) \
int T##_qcompare(const void* a, const void* b) { \
const T* A = (const T*)a; \
const T* B = (const T*)b; \
return (int)T##_less_##T(B, A) - (int)T##_less_##T(A, B); \
} \
void SpanT##_sort(SpanT self) { \
qsort(self.data, self.len, sizeof(T), T##_qcompare); \
}
#define SpanT_struct_Definition(T) SpanT_struct_Definition_custom_name(T, Span##T, ConstSpan##T)
#define SpanT_method_Definition(T) SpanT_method_Definition_custom_name(T, Span##T, ConstSpan##T)
#define SpanT_VecT_method_Definition(T) SpanT_VecT_method_Definition_custom_name(T, Span##T, ConstSpan##T, Vec##T)
#define SpanT_comparable_method_Definition(T) SpanT_comparable_method_Definition_custom_name(T, Span##T, ConstSpan##T)
#define SpanT_VecT_trivmove_COMPLETE_Definition(T) \
VecT_trivmove_struct_Definition(T) VecT_trivmove_method_Definition(T) \
SpanT_struct_Definition(T) SpanT_method_Definition(T) SpanT_VecT_method_Definition(T) \
VecT_trivmove_equal_method_Definition(T)
#define OptionT_struct_Definition_custom_name(T, OptionT) \
typedef struct { Option_variant variant; T some; } OptionT;
#define OptionT_method_Definition_custom_name(T, OptionT) \
OptionT None_##T(){\
return (OptionT){ .variant = Option_None }; \
}; \
OptionT Some_##T(T obj){ \
return (OptionT){ .variant = Option_Some, .some = obj }; \
} \
bool OptionT##_is_some(const OptionT* self) { \
return self->variant == Option_Some; \
}\
bool OptionT##_is_none(const OptionT* self) { \
return self->variant == Option_None; \
}\
const T* OptionT##_expect_const_ptr(const OptionT* self){ \
if (self->variant == Option_None) \
abortf("Expected something in const " #OptionT "* got None\n"); \
return &self->some; \
} \
T* OptionT##_expect_ptr(OptionT* self){ \
if (self->variant == Option_None) \
abortf("Expected something in " #OptionT "* got None\n"); \
return &self->some; \
} \
T OptionT##_expect(OptionT self){ \
if (self.variant == Option_None) \
abortf("Expected something in " #OptionT " got None\n"); \
return self.some; \
}
// todo: add clone and drop methods
#define OptionT_struct_Definition(T) OptionT_struct_Definition_custom_name(T, Option##T)
#define OptionT_method_Definition(T) OptionT_method_Definition_custom_name(T, Option##T)
// #define SpanT_VecT_trivmove_COMPLETE_Definition(T) \
// VecT_trivmove_struct_Definition(T) VecT_trivmove_method_Definition(T) \
// SpanT_struct_Definition(T) SpanT_method_Definition(T) SpanT_VecT_method_Definition(T) \
// VecT_trivmove_equal_method_Definition(T)
float pow2f(float x) {
return x * x;

View File

@ -1,7 +1,7 @@
#ifndef PROTOTYPE1_SRC_SYSTEM_FILEIO_H
#define PROTOTYPE1_SRC_SYSTEM_FILEIO_H
#include "../core/VecSpan_int_primitives.h"
#include "../core/VecU8_as_str.h"
#include <stdio.h>
#include <stdlib.h>
@ -43,7 +43,7 @@ NODISCARD VecU8 read_whole_file_or_abort(const char* filename) {
if (fseek(fp, 0, SEEK_SET) != 0) {
abortf("fseek: %s\n", strerror(errno));
}
VecU8 result = VecU8_new_zeroinit(file_size);
VecU8 result = (VecU8){.buf = safe_malloc(file_size), .len = file_size, .capacity = file_size};
size_t nread = fread(result.buf, 1, (size_t)file_size, fp);
if (nread < file_size) {
abortf("fread\n");
@ -52,7 +52,7 @@ NODISCARD VecU8 read_whole_file_or_abort(const char* filename) {
return result;
}
void write_whole_file_or_abort(const char* filename, ConstSpanU8 content) {
void write_whole_file_or_abort(const char* filename, SpanU8 content) {
FILE* fd = fopen(filename, "wb");
if (!fd) {
abortf("Can't open file %s: %s\n", filename, strerror(errno));

View File

@ -262,7 +262,7 @@ void generate_func_clip_triang_on_triang_case_where_some_vertex_stuck(VecU8* res
{
/* Case where TA and TB are in different 'thirds of surface' and the vertex of tC that defines
* border is outside tT. Result is a pentagon */
VecU8_append_span(res, cstr(SPACE12 "if (\n"));
VecU8_append_span(res, cstr(SPACE12 "if ("));
append_on_the_right_stmt(res, tT, TA, tT, TB, tC, mod3_inc(rc));
VecU8_append_span(res, cstr(") {\n"));
{

View File

@ -1,7 +1,7 @@
#include "codegen/geom.h"
#include "codegen/pixel_masses.h"
#include "codegen/clipping.h"
#include "../l1/system/fsmanip.h"
#include "geom.h"
#include "pixel_masses.h"
#include "clipping.h"
#include "../../l1/system/fsmanip.h"
int main() {
make_dir_nofail("l2");

View File

@ -278,6 +278,7 @@ ResultMargaretChosenQueueFamiliesOrConstSpanU8 margaret_choose_good_queue_famili
index_for_presentation = Some_U32(i);
}
VecVkQueueFamilyProperties_drop(queue_families);
// todo: method _is_none will soon be gone
if (OptionU32_is_none(&index_for_graphics))
return (ResultMargaretChosenQueueFamiliesOrConstSpanU8){ .variant = Result_Err, .err = cstr("No graphics queue family") };
if (OptionU32_is_none(&index_for_presentation))
@ -305,6 +306,7 @@ VecVecU8 margaret_get_extensions_of_physical_device(VkPhysicalDevice physical_de
abortf("vkEnumerateDeviceExtensionProperties");
VecVecU8 res = VecVecU8_new_of_size(extensions_count);
for (size_t i = 0; i < extensions_count; i++) {
// todo: swap with empty
// Previous value here was default (_new). It can be safely discarded
// ->extensionName is some null-terminated string, we need to acquire a copy
*VecVecU8_at(&res, i) = vcstr(VecVkExtensionProperties_cat(&extensions, i)->extensionName);

32
src/l2/marie/shape_geom.h Normal file
View File

@ -0,0 +1,32 @@
#ifndef PROTOTYPE1_SRC_L2_MARIE_SHAPE_GEOM_H
#define PROTOTYPE1_SRC_L2_MARIE_SHAPE_GEOM_H
#include "../../../gen/l2/marie/clipping.h"
// // todo: move to autogenerated files (and autogenerate span and vector definitions)
// SpanT_struct_Definition(vec2)
// SpanT_method_Definition(vec2)
void marie_clip_triang_with_triang_append_to_Vec(MarieTriangle C, MarieTriangle T, VecMarieTriangle* pile) {
float SC = marie_surface(C.v0, C.v1, C.v2);
if (SC < 0) {
vec2 t = C.v0;
C.v0 = C.v1;
C.v1 = t;
}
float ST = marie_surface(T.v0, T.v1, T.v2);
if (ST < 0) {
vec2 t = T.v0;
T.v0 = T.v1;
T.v1 = t;
}
marie_clip_ccw_triang_with_ccw_triang_append_to_Vec(C, T, pile);
}
// /* Better allocate 2n elements in pile */
// void marie_closed_path_to_polygon_outline_tangy_append_to_Vec(ConstSpanvec2 path, float thickness, VecMarieTriangle* pile) {
// size_t n = path.len;
// // for (size_t )
// }
#endif