commit e05eb37c79e62026fdac9d17d74f54df63482b61 Author: Andreev Gregory Date: Sun Jun 8 04:55:08 2025 +0300 working triangles + uniform transfer diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9985800 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +*.spv +cmake-build-debug/ +.idea/ +vgcore.* +gen/ \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..02d6daa --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,21 @@ +cmake_minimum_required(VERSION 3.30) +project(splitter_draft C) + +#include_directories(${CMAKE_SOURCE_DIR}) +set(CMAKE_C_FLAGS "-Wall -Wextra -Werror=implicit-function-declaration -Werror=return-type --std=c99 -g -ggdb -O0") + +add_compile_definitions(_POSIX_C_SOURCE=200112L ) + +add_executable(main src/l1/main.c) + +add_executable(0_test src/l1/tests/t0.c) +add_executable(1_test src/l1/tests/t1.c) + +add_executable(0_render_test src/l2/tests/r0.c) +target_link_libraries(0_render_test -lvulkan -lX11 -lm) + +# Recursively collect all .h files in the src directory. +file(GLOB_RECURSE HEADER_FILES "${CMAKE_SOURCE_DIR}/src/*.h") + +# Do not build utku +add_executable(utka src/l1/main.c ${HEADER_FILES}) diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..1728ba2 --- /dev/null +++ b/Makefile @@ -0,0 +1,12 @@ +HEADERS := $(shell find src -type f -name '*.h') + +all: prototype1 + +prototype1: src/l1/main.c $(HEADERS) + @gcc --std c99 -o $@ src/l1/main.c + +clean: + @rm -f prototype1 + +.PHONY: all clean + diff --git a/src/l1/core/Option_int_primitives.h b/src/l1/core/Option_int_primitives.h new file mode 100644 index 0000000..b3d4afb --- /dev/null +++ b/src/l1/core/Option_int_primitives.h @@ -0,0 +1,9 @@ +#ifndef PROTOTYPE1_SRC_L1_CORE_OPTION_INT_PRIMITIVES_H +#define PROTOTYPE1_SRC_L1_CORE_OPTION_INT_PRIMITIVES_H + +#include "util.h" + +OptionT_struct_Definition(U32) +OptionT_method_Definition(U32) + +#endif diff --git a/src/l1/core/VecSpan_Vec_int_primitives.h b/src/l1/core/VecSpan_Vec_int_primitives.h new file mode 100644 index 0000000..8a4ac6b --- /dev/null +++ b/src/l1/core/VecSpan_Vec_int_primitives.h @@ -0,0 +1,13 @@ +#ifndef PROTOTYPE1_SRC_CORE_VECSPAN_VECSPAN_INT_PRIMITIVES_H +#define PROTOTYPE1_SRC_CORE_VECSPAN_VECSPAN_INT_PRIMITIVES_H + +#include "VecSpan_int_primitives.h" + +SpanT_VecT_trivmove_COMPLETE_Definition(VecU8) +VecT_with_default_method_Definition(VecU8) +SpanT_VecT_trivmove_COMPLETE_Definition(VecU32) +VecT_with_default_method_Definition(VecU32) +SpanT_VecT_trivmove_COMPLETE_Definition(VecU64) +VecT_with_default_method_Definition(VecU64) + +#endif diff --git a/src/l1/core/VecSpan_int_primitives.h b/src/l1/core/VecSpan_int_primitives.h new file mode 100644 index 0000000..65b9a1f --- /dev/null +++ b/src/l1/core/VecSpan_int_primitives.h @@ -0,0 +1,38 @@ +#ifndef PROTOTYPE1_SRC_CORE_VECU8_H +#define PROTOTYPE1_SRC_CORE_VECU8_H + +#include "util.h" + + +SpanT_VecT_trivmove_COMPLETE_Definition(U8) +VecT_primitive_zeroinit_method_Definition(U8) +SpanT_comparable_method_Definition(U8) + +VecU8 VecU8_from_cstr(const char* dc) { + size_t n = strlen(dc); + VecU8 res = (VecU8){ .buf = safe_calloc(n, 1), .len = n, .capacity = n }; + memcpy(res.buf, dc, n); + return res; +} +#define vcstr(dc) VecU8_from_cstr(dc) + +ConstSpanU8 ConstSpanU8_from_cstr(const char* dc) { + return (ConstSpanU8){.data = (U8*)dc, .len = strlen(dc)}; +} +#define cstr(dc) ConstSpanU8_from_cstr(dc) + +void ConstSpanU8_print(ConstSpanU8 str) { + for (size_t i = 0; i < str.len; i++) + putchar((int)*ConstSpanU8_at(str, i)); +} + +SpanT_VecT_trivmove_COMPLETE_Definition(U32) +VecT_primitive_zeroinit_method_Definition(U32) +SpanT_comparable_method_Definition(U32) + +SpanT_VecT_trivmove_COMPLETE_Definition(U64) +VecT_primitive_zeroinit_method_Definition(U64) +SpanT_comparable_method_Definition(U64) + + +#endif diff --git a/src/l1/core/int_primitives.h b/src/l1/core/int_primitives.h new file mode 100644 index 0000000..3e4e7ca --- /dev/null +++ b/src/l1/core/int_primitives.h @@ -0,0 +1,61 @@ +#ifndef PROTOTYPE1_SRC_CORE_INT_PRIMITIVES_H +#define PROTOTYPE1_SRC_CORE_INT_PRIMITIVES_H + +#include + +// *Crosses-fingers* Please optimize this all out, please optimize this all out + +typedef uint8_t U8; +typedef uint16_t U16; +typedef uint32_t U32; +typedef uint64_t U64; +typedef int8_t S8; +typedef int16_t S16; +typedef int32_t S32; +typedef int64_t S64; + +#define U8_drop(x) {} +#define U16_drop(x) {} +#define U32_drop(x) {} +#define U64_drop(x) {} +#define S8_drop(x) {} +#define S16_drop(x) {} +#define S32_drop(x) {} +#define S64_drop(x) {} + +#define U8_clone(vp) (*(vp)) +#define U16_clone(vp) (*(vp)) +#define U32_clone(vp) (*(vp)) +#define U64_clone(vp) (*(vp)) +#define S8_clone(vp) (*(vp)) +#define S16_clone(vp) (*(vp)) +#define S32_clone(vp) (*(vp)) +#define S64_clone(vp) (*(vp)) + +#define U8_equal_U8(ap, bp) (*(ap) == *(bp)) +#define U16_equal_U16(ap, bp) (*(ap) == *(bp)) +#define U32_equal_U32(ap, bp) (*(ap) == *(bp)) +#define U64_equal_U64(ap, bp) (*(ap) == *(bp)) +#define S8_equal_S8(ap, bp) (*(ap) == *(bp)) +#define S16_equal_S16(ap, bp) (*(ap) == *(bp)) +#define S32_equal_S32(ap, bp) (*(ap) == *(bp)) +#define S64_equal_S64(ap, bp) (*(ap) == *(bp)) + +#define U8_less_U8(ap, bp) (*(ap) < *(bp)) +#define U16_less_U16(ap, bp) (*(ap) < *(bp)) +#define U32_less_U32(ap, bp) (*(ap) < *(bp)) +#define U64_less_U64(ap, bp) (*(ap) < *(bp)) +#define S8_less_S8(ap, bp) (*(ap) < *(bp)) +#define S16_less_S16(ap, bp) (*(ap) < *(bp)) +#define S32_less_S32(ap, bp) (*(ap) < *(bp)) +#define S64_less_S64(ap, bp) (*(ap) < *(bp)) + +#define int_minmax_function_Definition(T) \ +T MIN_##T (T a, T b){ return a < b ? a : b; } \ +T MAX_##T (T a, T b){ return a < b ? b : a; } + +int_minmax_function_Definition(U8) +int_minmax_function_Definition(U32) +int_minmax_function_Definition(U64) + +#endif diff --git a/src/l1/core/util.h b/src/l1/core/util.h new file mode 100644 index 0000000..19e85c1 --- /dev/null +++ b/src/l1/core/util.h @@ -0,0 +1,312 @@ +#ifndef PROTOTYPE1_SRC_CORE_UTIL_H +#define PROTOTYPE1_SRC_CORE_UTIL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "int_primitives.h" + +// Gonna change it when I face pedantic compilers +#define NORETURN [[noreturn]] +#define NODISCARD [[nodiscard]] + +typedef enum { + Option_Some, Option_None +} Option_variant; + +typedef enum { + Result_Ok, Result_Err +} Result_variant; + +NORETURN +void abortf(const char* format, ...) { + va_list args; + va_start(args, format); + vfprintf(stderr, format, args); + va_end(args); + abort(); +} + +void* safe_malloc(size_t n) { + void* res = malloc(n); + if (!res) + abortf("allocation failure"); + return res; +} + +void* safe_calloc(size_t nmemb, size_t size) { + void* res = calloc(nmemb, size); + if (!res) + abortf("allocation failure"); + return res; +} + +void* safe_realloc(void* ptr, size_t n) { + void* res = realloc(ptr, n); + if (!res && n) + abortf("allocation failure"); + return res; +} + +#define unsigned_safe_mul_Definition(TSZ) \ +U##TSZ safe_mul_U##TSZ(U##TSZ a, U##TSZ b) { \ + if (b > 0 && a > UINT##TSZ##_MAX / b) \ + abortf("Overflow in multiplication: %" PRIu##TSZ " * %" PRIu##TSZ "\n", a, b); \ + return a * b; \ +} + +unsigned_safe_mul_Definition(64) + + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +// I assume that new_length > old_capacity +size_t Vec_get_new_capacity(size_t old_capacity, size_t new_length) { + if (!old_capacity) + return new_length; + while (old_capacity < new_length) + old_capacity *= 2; + return old_capacity; +} + +#define VecT_trivmove_struct_Definition_custom_name(T, VecT) \ +typedef struct {T* buf; size_t len; size_t capacity;} VecT; + +#define VecT_trivmove_method_Definition_custom_name(T, VecT) \ +NODISCARD VecT VecT##_new() { \ + return (VecT){NULL, 0, 0}; \ +} \ +void VecT##_drop(VecT obj) { \ + for (size_t i = 0; i < obj.len; i++) \ + T ## _drop(obj.buf[i]); \ + free(obj.buf); \ +} \ +NODISCARD VecT VecT##_new_filled(size_t len, const T* el) { \ + VecT res = (VecT){.buf = safe_calloc(len, sizeof(T)), .len = len, .capacity = len}; \ + for (size_t i = 0; i < len; i++) \ + res.buf[i] = T##_clone(el); \ + return res; \ +} \ +void VecT##_append(VecT* self, T el) { \ + size_t new_length = self->len + 1; \ + if (new_length > self->capacity) { \ + size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length); \ + self->buf = safe_realloc(self->buf, new_capacity * sizeof(T)); \ + self->capacity = new_capacity; \ + } \ + self->buf[self->len] = el; \ + self->len = new_length; \ +} \ +NODISCARD T VecT##_pop(VecT* self) { \ + assert(self->len > 0); \ + self->len--; \ + return self->buf[self->len]; \ +} \ +void VecT##_pop_and_drop(VecT* self) { \ + T##_drop(VecT##_pop(self)); \ +} \ +NODISCARD VecT VecT##_swap_with_empty(VecT* cell) { \ + VecT val = *cell; *cell = (VecT){NULL, 0, 0}; return val; \ +} \ +T* VecT##_at(VecT* self, size_t i) { \ + assert(i < self->len); \ + return self->buf + i; \ +} \ +const T* VecT##_cat(const VecT* self, size_t i) { \ + assert(i < self->len); \ + return self->buf + i; \ +} \ +NODISCARD VecT VecT##_clone(const VecT* self){ \ + VecT res = (VecT){.buf = safe_calloc(self->len, sizeof(T)), .len = self->len, .capacity = self->len}; \ + for (size_t i = 0; i < self->len; i++) \ + res.buf[i] = T##_clone(&self->buf[i]); \ + return res; \ +} \ +void VecT##_append_vec(VecT* self, VecT b) { \ + size_t new_length = self->len + b.len; \ + if (new_length > self->capacity) { \ + size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length); \ + self->buf = safe_realloc(self->buf, new_capacity * sizeof(T)); \ + self->capacity = new_capacity; \ + } \ + for (size_t i = 0; i < b.len; i++){ \ + self->buf[self->len + i] = b.buf[i]; \ + } \ + self->len = new_length; \ + free(b.buf); \ +} + + +#define VecT_trivmove_equal_method_Definition_custom_name(T, VecT) \ +bool VecT##_equal_##VecT(const VecT* A, const VecT* B) { \ + if (A->len != B->len) \ + return false; \ + for (size_t i = 0; i < A->len; i++) { \ + if (!T##_equal_##T(A->buf + i, B->buf + i)) \ + return false; \ + } \ + return true; \ +} + +#define VecT_primitive_zeroinit_method_Definition_custom_name(T, VecT) \ +VecT VecT##_new_zeroinit(size_t len) { \ + return (VecT){.buf = safe_calloc(len, sizeof(T)), .len = len, .capacity = len}; \ +} + +#define VecT_with_default_method_Definition_custom_name(T, VecT) \ +VecT VecT##_new_of_size(size_t len) { \ + VecT res = (VecT){.buf = safe_calloc(len, sizeof(T)), .len = len, .capacity = len}; \ + for (size_t i = 0; i < len; i++) \ + res.buf[i] = T##_new(); \ + return res; \ +} + +#define VecT_trivmove_struct_Definition(T) VecT_trivmove_struct_Definition_custom_name(T, Vec##T) +#define VecT_trivmove_method_Definition(T) VecT_trivmove_method_Definition_custom_name(T, Vec##T) +#define VecT_trivmove_equal_method_Definition(T) VecT_trivmove_equal_method_Definition_custom_name(T, Vec##T) +#define VecT_primitive_zeroinit_method_Definition(T) VecT_primitive_zeroinit_method_Definition_custom_name(T, Vec##T) +#define VecT_with_default_method_Definition(T) VecT_with_default_method_Definition_custom_name(T, Vec##T) + +#define SpanT_struct_Definition_custom_name(T, SpanT, ConstSpanT) \ +typedef struct {const T* data; size_t len;} ConstSpanT; \ +typedef struct {T* data; size_t len;} SpanT; \ + +// todo: rename span to MutSpan and ConstSpan to Span +#define SpanT_method_Definition_custom_name(T, SpanT, ConstSpanT) \ +void ConstSpanT##_drop(ConstSpanT) {} \ +void SpanT##_drop(SpanT) {} \ +ConstSpanT ConstSpanT##_clone(ConstSpanT self) { \ + return (ConstSpanT){.data = self.data, .len = self.len}; \ +} \ +SpanT SpanT##_clone(SpanT self) { \ + return (SpanT){.data = self.data, .len = self.len}; \ +} \ +bool ConstSpanT##_equal_##ConstSpanT(ConstSpanT a, ConstSpanT b) { \ + return a.data == b.data && a.len == b.len; \ +} \ +bool SpanT##_equal_##SpanT(SpanT a, SpanT b) { \ + return a.data == b.data && a.len == b.len; \ +} \ +ConstSpanT SpanT##_to_##ConstSpanT(SpanT self) { \ + return (ConstSpanT){.data = self.data, .len = self.len}; \ +} \ +SpanT SpanT##_span(SpanT span, size_t start, size_t len){ \ + assert(start < SIZE_MAX - len && start + len <= span.len); \ + return (SpanT){.data = span.data + start, .len = len}; \ +}; \ +ConstSpanT ConstSpanT##_cspan(ConstSpanT span, size_t start, size_t len){ \ + assert(start < SIZE_MAX - len && start + len <= span.len); \ + return (ConstSpanT){.data = span.data + start, .len = len}; \ +}; \ +T* SpanT##_at(SpanT self, size_t i) { \ + assert(i < self.len); \ + return self.data + i; \ +} \ +const T* SpanT##_cat(const SpanT self, size_t i) { \ + assert(i < self.len); \ + return self.data + i; \ +} \ +const T* ConstSpanT##_at(ConstSpanT self, size_t i) { \ + assert(i < self.len); \ + return self.data + i; \ +} + +#define SpanT_VecT_method_Definition_custom_name(T, SpanT, ConstSpanT, VecT) \ +NODISCARD VecT VecT##_from_##span(ConstSpanT src){ \ + VecT res = (VecT){ .buf = safe_calloc(src.len, sizeof(T)), .len = src.len, .capacity = src.len }; \ + for (size_t i = 0; i < src.len; i++) \ + res.buf[i] = T##_clone(&src.data[i]); \ + return res; \ +} \ +ConstSpanT VecT##_to_##ConstSpanT(const VecT* vec){ \ + return (ConstSpanT){vec->buf, vec->len}; \ +} \ +SpanT VecT##_to_##SpanT(VecT* vec){ \ + return (SpanT){vec->buf, vec->len}; \ +} \ +SpanT VecT##_span(VecT* vec, size_t start, size_t len){ \ + assert(start < SIZE_MAX - len && start + len <= vec->len); \ + return (SpanT){.data = vec->buf + start, .len = len}; \ +} \ +ConstSpanT VecT##_cspan(const VecT* vec, size_t start, size_t len){ \ + assert(start < SIZE_MAX - len && start + len <= vec->len); \ + return (ConstSpanT){.data = vec->buf + start, .len = len}; \ +} \ +void VecT##_append_span(VecT* self, ConstSpanT b) { \ + size_t new_length = self->len + b.len; \ + if (new_length > self->capacity) { \ + size_t new_capacity = Vec_get_new_capacity(self->capacity, new_length); \ + self->buf = safe_realloc(self->buf, new_capacity * sizeof(T)); \ + self->capacity = new_capacity; \ + } \ + for (size_t i = 0; i < b.len; i++){ \ + self->buf[self->len + i] = T##_clone(&b.data[i]); \ + } \ + self->len = new_length; \ +} \ + + +// Requires normal less method and adds qcompare method for T to use in qsort +#define SpanT_comparable_method_Definition_custom_name(T, SpanT, ConstSpanT) \ +int T##_qcompare(const void* a, const void* b) { \ + const T* A = (const T*)a; \ + const T* B = (const T*)b; \ + return (int)T##_less_##T(B, A) - (int)T##_less_##T(A, B); \ +} \ +void SpanT##_sort(SpanT self) { \ + qsort(self.data, self.len, sizeof(T), T##_qcompare); \ +} + +#define SpanT_struct_Definition(T) SpanT_struct_Definition_custom_name(T, Span##T, ConstSpan##T) +#define SpanT_method_Definition(T) SpanT_method_Definition_custom_name(T, Span##T, ConstSpan##T) +#define SpanT_VecT_method_Definition(T) SpanT_VecT_method_Definition_custom_name(T, Span##T, ConstSpan##T, Vec##T) +#define SpanT_comparable_method_Definition(T) SpanT_comparable_method_Definition_custom_name(T, Span##T, ConstSpan##T) + +#define SpanT_VecT_trivmove_COMPLETE_Definition(T) \ + VecT_trivmove_struct_Definition(T) VecT_trivmove_method_Definition(T) \ + SpanT_struct_Definition(T) SpanT_method_Definition(T) SpanT_VecT_method_Definition(T) + + + +#define OptionT_struct_Definition_custom_name(T, OptionT) \ +typedef struct { Option_variant variant; T some; } OptionT; + +#define OptionT_method_Definition_custom_name(T, OptionT) \ +OptionT None_##T(){\ + return (OptionT){ .variant = Option_None }; \ +}; \ +OptionT Some_##T(T obj){ \ + return (OptionT){ .variant = Option_Some, .some = obj }; \ +} \ +bool OptionT##_is_some(const OptionT* self) { \ + return self->variant == Option_Some; \ +}\ +bool OptionT##_is_none(const OptionT* self) { \ + return self->variant == Option_None; \ +}\ +const T* OptionT##_expect_const_ptr(const OptionT* self){ \ + if (self->variant == Option_None) \ + abortf("Expected something in const " #OptionT "* got None\n"); \ + return &self->some; \ +} \ +T* OptionT##_expect_ptr(OptionT* self){ \ + if (self->variant == Option_None) \ + abortf("Expected something in " #OptionT "* got None\n"); \ + return &self->some; \ +} \ +T OptionT##_expect(OptionT self){ \ + if (self.variant == Option_None) \ + abortf("Expected something in " #OptionT " got None\n"); \ + return self.some; \ +} +// todo: add clone and drop methods + +#define OptionT_struct_Definition(T) OptionT_struct_Definition_custom_name(T, Option##T) +#define OptionT_method_Definition(T) OptionT_method_Definition_custom_name(T, Option##T) + +#endif diff --git a/src/l1/main.c b/src/l1/main.c new file mode 100644 index 0000000..7768002 --- /dev/null +++ b/src/l1/main.c @@ -0,0 +1,76 @@ +#include + +#include "system/fileio.h" + +NODISCARD VecU8 begin_header(ConstSpanU8 guard) { + VecU8 res = VecU8_new(); + VecU8_append_span(&res, cstr("#ifndef ")); + VecU8_append_span(&res, guard); + VecU8_append_span(&res, cstr("\n#define ")); + VecU8_append_span(&res, guard); + VecU8_append_span(&res, cstr("\n/* Automatically generated file. Do not edit it. */\n")); + return res; +} + +void finish_header(VecU8 text_before_endif) { + VecU8_append_span(&text_before_endif, cstr("#endif\n")); + write_whole_file_or_abort("geom.h", VecU8_to_ConstSpanU8(&text_before_endif)); + VecU8_drop(text_before_endif); +} + +void string_append_spaces(VecU8* str, int sc) { + for (int i = 0; i < sc; i++) + VecU8_append(str, ' '); +} + +NODISCARD VecU8 generate_xvecy_struct_definition(ConstSpanU8 xvec, ConstSpanU8 member, int cc) { + assert(2 <= cc && cc <= 4); + VecU8 res = VecU8_new(); + VecU8_append_span(&res, cstr("typedef struct {\n")); + + string_append_spaces(&res, 4); + VecU8_append_span(&res, member); + VecU8_append_span(&res, cstr(" x;\n")); + + string_append_spaces(&res, 4); + VecU8_append_span(&res, member); + VecU8_append_span(&res, cstr(" y;\n")); + + if (cc >= 3) { + string_append_spaces(&res, 4); + VecU8_append_span(&res, member); + VecU8_append_span(&res, cstr(" z;\n")); + } + if (cc >= 4) { + string_append_spaces(&res, 4); + VecU8_append_span(&res, member); + VecU8_append_span(&res, cstr(" w;\n")); + } + + VecU8_append_span(&res, cstr("} ")); + VecU8_append_span(&res, xvec); + VecU8_append(&res, '0' + cc); + VecU8_append_span(&res, cstr(";\n\n")); + return res; +} + +NODISCARD VecU8 generate_xvec234_struct_definition(ConstSpanU8 xvec, ConstSpanU8 member) { + VecU8 res = VecU8_new(); + VecU8_append_vec(&res, generate_xvecy_struct_definition(xvec, member, 2)); + VecU8_append_vec(&res, generate_xvecy_struct_definition(xvec, member, 3)); + VecU8_append_vec(&res, generate_xvecy_struct_definition(xvec, member, 4)); + return res; +} + +void generate_geometry_header() { + VecU8 res = begin_header(cstr("PROTOTYPE1_GEN_GEOM")); + VecU8_append_vec(&res, generate_xvec234_struct_definition(cstr("ivec"), cstr("int32_t"))); + VecU8_append_vec(&res, generate_xvec234_struct_definition(cstr("uvec"), cstr("uint32_t"))); + VecU8_append_vec(&res, generate_xvec234_struct_definition(cstr("vec"), cstr("float"))); + VecU8_append_vec(&res, generate_xvec234_struct_definition(cstr("dvec"), cstr("double"))); + finish_header(res); +} + +int main() { + generate_geometry_header(); +} \ No newline at end of file diff --git a/src/l1/system/fileio.h b/src/l1/system/fileio.h new file mode 100644 index 0000000..ede01bf --- /dev/null +++ b/src/l1/system/fileio.h @@ -0,0 +1,67 @@ +#ifndef PROTOTYPE1_SRC_SYSTEM_FILEIO_H +#define PROTOTYPE1_SRC_SYSTEM_FILEIO_H + +#include "../core/VecSpan_int_primitives.h" + +#include +#include +#include +#include + +typedef struct { + Result_variant variant; + union { + VecU8 Ok; + int Err; + }; +} Result_VecU8_or_int; + +void Result_VecU8_or_int_drop(Result_VecU8_or_int obj) { + if (obj.variant == Result_Ok) + VecU8_drop(obj.Ok); +} + +typedef struct { + Result_variant variant; + int Err; +} Result_ok_or_int; + +void Result_ok_or_int_drop(Result_ok_or_int obj) {} + +NODISCARD VecU8 read_whole_file_or_abort(const char* filename) { + FILE* fp = fopen(filename, "rb"); + if (!fp) { + abortf("Can't open file %s: %s\n", filename, strerror(errno)); + } + if (fseek(fp, 0, SEEK_END) != 0) { + abortf("fseek: %s\n", strerror(errno)); + } + long file_size = ftell(fp); + if (file_size < 0) { + abortf("ftell: %s\n", strerror(errno)); + } + if (fseek(fp, 0, SEEK_SET) != 0) { + abortf("fseek: %s\n", strerror(errno)); + } + VecU8 result = VecU8_new_zeroinit(file_size); + size_t nread = fread(result.buf, 1, (size_t)file_size, fp); + if (nread < file_size) { + abortf("fread\n"); + } + fclose(fp); + return result; +} + +void write_whole_file_or_abort(const char* filename, ConstSpanU8 content) { + FILE* fd = fopen(filename, "wb"); + if (!fd) { + abortf("Can't open file %s: %s\n", filename, strerror(errno)); + } + if (fwrite(content.data, 1, content.len, fd) < content.len) { + abortf("fwrite\n"); + } + fclose(fd); +} + + +#endif diff --git a/src/l1/tests/t0.c b/src/l1/tests/t0.c new file mode 100644 index 0000000..95f3e30 --- /dev/null +++ b/src/l1/tests/t0.c @@ -0,0 +1,87 @@ +#include "../core/VecSpan_int_primitives.h" + +int main() { + VecU8 a = VecU8_new(); + VecU8_append(&a, 116); + assert(a.len == 1); + assert(a.capacity == 1); + VecU8_append(&a, 18); + assert(a.len == 2); + assert(a.capacity == 2); + VecU8_append(&a, 180); + assert(a.len == 3); + assert(a.capacity == 4); + VecU8_append(&a, 9); + assert(a.len == 4); + assert(a.capacity == 4); + VecU8_append(&a, 90); + assert(a.len == 5); + assert(a.capacity == 8); + assert(*VecU8_cat(&a, 0) == 116); + assert(*VecU8_at(&a, 1) == 18); + assert(*VecU8_cat(&a, 2) == 180); + assert(*VecU8_at(&a, 3) == 9); + assert(*VecU8_cat(&a, 4) == 90); + VecU8_drop(a); + + U32 five = 5; + VecU32 b = VecU32_new_filled(3, &five); + assert(b.len == 3); + assert(b.capacity == 3); + VecU32_append(&b, 41); + assert(b.len == 4); + assert(b.capacity == 6); + + VecU32 c = VecU32_new(); + VecU32_append(&c, 5); + VecU32_append(&c, 5); + VecU32_append(&c, 5); + VecU32_append(&c, 41); + assert(VecU32_equal_VecU32(&b, &c)); + VecU32_append(&c, 7); + assert(!VecU32_equal_VecU32(&b, &c)); + U32 x = VecU32_pop(&c); + assert(x == 7); + assert(VecU32_equal_VecU32(&b, &c)); + // Now b = c = [5, 5, 5, 7] + for (int i = 0; i < 4; i++) { + printf("%u %u\n", *VecU32_at(&b, i), *VecU32_at(&c, i)); + } + + *VecU32_at(&b, 3) = 17; + assert(!VecU32_equal_VecU32(&b, &c)); + *VecU32_at(&c, 3) = 17; + assert(VecU32_equal_VecU32(&b, &c)); + + SpanU32 s0 = VecU32_span(&c, 4, 0); + SpanU32 s1 = VecU32_span(&c, 3, 1); + SpanU32 s2 = VecU32_span(&c, 2, 2); + SpanU32 s3 = VecU32_span(&c, 1, 3); + SpanU32 s4 = VecU32_span(&c, 0, 4); + assert(*SpanU32_cat(s4, 3) == 17); + for (size_t i = 0; i < 3; i++) { + *VecU32_at(&c, i) = i; + } + *SpanU32_at(s3, 2) = 3; + assert(*SpanU32_at(s1, 0) == 3); + assert(*SpanU32_at(s2, 0) == 2); + assert(*SpanU32_at(s3, 0) == 1); + assert(*SpanU32_at(s4, 0) == 0); + VecU32 d = VecU32_clone(&c); + VecU32 e = VecU32_clone(&c); + VecU32_append(&c, 50); + VecU32_append(&e, 500); + assert(d.len == 4); + assert(d.capacity == 4); + assert(c.len == 5); + for (int i = 0; i < 4; i++) { + VecU32_pop_and_drop(&d); + } + assert(*VecU32_cat(&e, 4) == 500); + + VecU32_drop(e); + VecU32_drop(d); + VecU32_drop(c); + VecU32_drop(b); + return 0; +} diff --git a/src/l1/tests/t1.c b/src/l1/tests/t1.c new file mode 100644 index 0000000..3ffe897 --- /dev/null +++ b/src/l1/tests/t1.c @@ -0,0 +1,36 @@ +#include "../core/VecSpan_Vec_int_primitives.h" + +int main() { + VecU64 v = VecU64_new(); + for (int i = 0; i < 3; i++) { + VecU64_append(&v, i); + *VecU64_at(&v, i) = *VecU64_cat(&v, i) + 10; + } + VecVecU64 a = VecVecU64_new_filled(2, &v); + VecU64_drop(v); + assert(VecU64_equal_VecU64(VecVecU64_at(&a, 0), VecVecU64_at(&a, 1))); + VecU64_pop_and_drop(VecVecU64_at(&a, 1)); + assert(!VecU64_equal_VecU64(VecVecU64_at(&a, 0), VecVecU64_at(&a, 1))); + VecVecU64 b = VecVecU64_new(); + VecVecU64_append(&b, VecU64_new()); + assert(!VecVecU64_equal_VecVecU64(&a, &b)); + VecVecU64_append(&b, VecU64_new()); + assert(!VecVecU64_equal_VecVecU64(&a, &b)); + for (int i = 0; i < 3; i++) { + VecU64_append(VecVecU64_at(&b, 0), 10 + i); + } + assert(!VecVecU64_equal_VecVecU64(&a, &b)); + for (int i = 0; i < 2; i++) { + VecU64_append(VecVecU64_at(&b, 1), 10 + i); + } + assert(VecVecU64_equal_VecVecU64(&a, &b)); + VecVecU64 c = VecVecU64_clone(&a); + VecVecU64 d = VecVecU64_clone(&b); + VecVecU64_pop_and_drop(&a); + assert(VecVecU64_equal_VecVecU64(&c, &b)); + assert(VecVecU64_equal_VecVecU64(&c, &d)); + VecVecU64_drop(d); + VecVecU64_drop(c); + VecVecU64_drop(b); + VecVecU64_drop(a); +} \ No newline at end of file diff --git a/src/l2/margaret/margaret.h b/src/l2/margaret/margaret.h new file mode 100644 index 0000000..f7a935b --- /dev/null +++ b/src/l2/margaret/margaret.h @@ -0,0 +1,1365 @@ +#ifndef PROTOTYPE1_SRC_L2_MARGARET_MARGARET_H +#define PROTOTYPE1_SRC_L2_MARGARET_MARGARET_H + +#include "../../l1/core/util.h" +#include "../../l1/core/Option_int_primitives.h" +#include "../../l1/core/VecSpan_Vec_int_primitives.h" +#include +#include +#include +#include +#include "stringop.h" +// #include +#include +// #include +#include + +typedef struct timespec margaret_ns_time; + +margaret_ns_time margaret_clock_gettime_monotonic_raw() { + margaret_ns_time res; + int ret = clock_gettime(CLOCK_MONOTONIC_RAW, &res); + if (ret < 0) + abortf("clock_gettime"); + return res; +} + +int64_t margaret_ns_time_ns_diff(margaret_ns_time from, margaret_ns_time to) { + return (to.tv_sec - from.tv_sec) * 1000000000 + (to.tv_nsec - from.tv_nsec); +} + +float margaret_ns_time_sec_diff(margaret_ns_time from, margaret_ns_time to) { + return (float)margaret_ns_time_ns_diff(from, to) / 1e9; +} + +float margaret_clock_monotonic_raw_diff(margaret_ns_time start) { + return (float)margaret_ns_time_ns_diff(start, margaret_clock_gettime_monotonic_raw()) / 1e9; +} + + +typedef XEvent Xlib_Event; + +void Xlib_Event_drop(){} +Xlib_Event Xlib_Event_clone(Xlib_Event* a) {return *a; } + +VecT_trivmove_struct_Definition(Xlib_Event) +VecT_trivmove_method_Definition(Xlib_Event) +VecT_primitive_zeroinit_method_Definition(Xlib_Event) + +typedef Display Xlib_Display; +typedef Window Xlib_Window; +typedef Atom Xlib_Atom; + +VecXlib_Event margaret_read_x_events(Xlib_Display* dpy) { + int evh = XEventsQueued(dpy, QueuedAfterReading); + VecXlib_Event result = VecXlib_Event_new_zeroinit(evh); + for (int i = 0; i < evh; i++) { + XNextEvent(dpy, VecXlib_Event_at(&result, i)); + } + return result; +} + +void margaret_win_init_set_properties(Xlib_Display* dpy, Xlib_Window win) { + const char* strings[] = { + "WM_PROTOCOLS", + "ATOM", + "WM_DELETE_WINDOW" + }; + Atom atoms[3]; + int status; + status = XInternAtoms(dpy, (strings), 3, False, atoms); + if (status == 0) + abortf("XInternAtoms"); + status = XChangeProperty(dpy, win, atoms[0], atoms[1], 32, PropModeReplace, (unsigned char *)&atoms[2], 1); + if (status == 0) + abortf("XChangeProperty"); +} + +typedef struct { + Xlib_Atom A_WM_windel; + Xlib_Atom A_WM_protocols; + int width; + int height; + bool should_stop; + Xlib_Window win; +} Margaret_WEP; + +Margaret_WEP Margaret_WEP_new(Xlib_Display* dpy, Xlib_Window win) { + return (Margaret_WEP){ + .A_WM_protocols = XInternAtom(dpy, "WM_PROTOCOLS", False), + .A_WM_windel = XInternAtom(dpy, "WM_DELETE_WINDOW", False), + .should_stop = false, + .win = win, + }; +} + +void Margaret_WEP_update_with_new_event(Margaret_WEP* self, const Xlib_Event* ev) { + if (ev->xany.window != self->win) + return; + if (ev->type == ConfigureNotify) { + printf("That was ConfigureNotify\n"); + self->width = ev->xconfigure.width; + self->height = ev->xconfigure.height; + } else if (ev->type == ClientMessage) { + printf("That was ClientMessage\n"); + if (ev->xclient.message_type == self->A_WM_protocols && + ev->xclient.format == 32 && ev->xclient.data.l[0] == self->A_WM_windel + ){ + printf("WM_DELETE_WINDOW\n"); + self->should_stop = true; + } + } else if (ev->type == DestroyNotify) { + printf("That was DestroyNotify\n"); + self->should_stop = true; + } +} + +void margaret_create_debug_utils_messenger_EXT( + VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pDebugMessenger + ) { + PFN_vkCreateDebugUtilsMessengerEXT func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT"); + if (func != NULL) { + if (func(instance, pCreateInfo, pAllocator, pDebugMessenger) != VK_SUCCESS) { + abortf("vkCreateDebugUtilsMessengerEXT"); + } + } else { + abortf("VK_ERROR_EXTENSION_NOT_PRESENT"); + } +} + +void margaret_destroy_debug_utils_messenger_EXT( + VkInstance instance, VkDebugUtilsMessengerEXT debugMessenger, const VkAllocationCallbacks* pAllocator + ) { + PFN_vkDestroyDebugUtilsMessengerEXT func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(instance, "vkDestroyDebugUtilsMessengerEXT"); + if (func != NULL) + func(instance, debugMessenger, pAllocator); + else + abortf("VK_ERROR_EXTENSION_NOT_PRESENT"); +} + +// This is the function margaret will provide to vulkan for debugging +static VKAPI_ATTR VkBool32 VKAPI_CALL margaret_static_debug_callback( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData + ) { + fprintf(stderr, "Vk Validation layer: %s\n", pCallbackData->pMessage); + return VK_FALSE; +} + +typedef const char* CSTR; + +#define CSTR_drop(x) {} +#define CSTR_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(CSTR) +VecT_trivmove_method_Definition(CSTR) +VecT_primitive_zeroinit_method_Definition(CSTR) + +typedef struct { + VkInstance instance; + VkDebugUtilsMessengerEXT debug_messenger; +} MargaretInstanceAndItsDebug; + +MargaretInstanceAndItsDebug MargaretInstanceAndItsDebug_new(bool enable_validation_layers) { + // InstanceAndDebugHands res{}; + VkApplicationInfo app_info = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = "Kto prochital tot zdohnet", + .applicationVersion = VK_MAKE_VERSION(1, 0, 0), + .pEngineName = "Margaret", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = VK_API_VERSION_1_2, + }; + + VecCSTR needed_extensions = VecCSTR_new(); + VecCSTR_append(&needed_extensions, "VK_KHR_xlib_surface"); + VecCSTR_append(&needed_extensions, "VK_KHR_surface"); + VecCSTR needed_layers = VecCSTR_new(); + if (enable_validation_layers) { + VecCSTR_append(&needed_extensions, "VK_EXT_debug_utils"); + VecCSTR_append(&needed_layers, "VK_LAYER_KHRONOS_validation"); + } + + VkInstanceCreateInfo instance_crinfo = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + // .pNext may be set to `for-instance-creation-only` Debug Messanger crinfo later + .pApplicationInfo = &app_info, + .enabledLayerCount = needed_layers.len, + .ppEnabledLayerNames = needed_layers.buf, + .enabledExtensionCount = needed_extensions.len, + .ppEnabledExtensionNames = needed_extensions.buf, + }; + + if (enable_validation_layers) { + VkDebugUtilsMessengerCreateInfoEXT debug_messenger_2_crinfo = { + .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, + .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, + .pfnUserCallback = margaret_static_debug_callback, + .pUserData = NULL, + }; + instance_crinfo.pNext = &debug_messenger_2_crinfo; + } + VkInstance instance; + if (vkCreateInstance(&instance_crinfo, NULL, &instance) != VK_SUCCESS) + abortf("Failed to create Vulkan instance"); + VkDebugUtilsMessengerEXT debug_messenger = NULL; + if (enable_validation_layers) { + VkDebugUtilsMessengerCreateInfoEXT debug_messenger_crinfo = { + .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, + .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, + .pfnUserCallback = margaret_static_debug_callback, + .pUserData = NULL, + }; + margaret_create_debug_utils_messenger_EXT(instance, &debug_messenger_crinfo, NULL, &debug_messenger); + } + + return (MargaretInstanceAndItsDebug){.instance = instance, .debug_messenger = debug_messenger}; +} + +void MargaretInstanceAndItsDebug_drop(MargaretInstanceAndItsDebug instance) { + if (instance.debug_messenger) { + margaret_destroy_debug_utils_messenger_EXT(instance.instance, instance.debug_messenger, NULL); + } + vkDestroyInstance(instance.instance, NULL); +} + +typedef struct { + U32 for_graphics; + U32 for_presentation; +} MargaretChosenQueueFamilies; + +// MargaretChosenQueueFamilies MargaretChosenQueueFamilies_new() { +// return (MargaretChosenQueueFamilies){ .for_graphics = None_U32(), .for_presentation = None_U32() }; +// } + +// bool MargaretChosenQueueFamilies_is_complete(const MargaretChosenQueueFamilies* chosen_queue_families) { +// return OptionU32_is_some(&chosen_queue_families->for_graphics) && OptionU32_is_some(&chosen_queue_families->for_presentation); +// } + +#define VkQueueFamilyProperties_drop(x) {} +#define VkQueueFamilyProperties_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkQueueFamilyProperties) +VecT_trivmove_method_Definition(VkQueueFamilyProperties) +VecT_primitive_zeroinit_method_Definition(VkQueueFamilyProperties) + +/* MargaretChosenQueueFamilies or a static string, describing which part could not be found + * (may replace with VecU8 later) */ +typedef struct { + Result_variant variant; + union { + MargaretChosenQueueFamilies ok; + ConstSpanU8 err; + }; +} ResultMargaretChosenQueueFamiliesOrConstSpanU8; + +ResultMargaretChosenQueueFamiliesOrConstSpanU8 margaret_choose_good_queue_families(VkPhysicalDevice dev, VkSurfaceKHR surface) { + uint32_t queue_family_count = 0; + vkGetPhysicalDeviceQueueFamilyProperties(dev, &queue_family_count, NULL); + VecVkQueueFamilyProperties queue_families = VecVkQueueFamilyProperties_new_zeroinit(queue_family_count); + vkGetPhysicalDeviceQueueFamilyProperties(dev, &queue_family_count, queue_families.buf); + + OptionU32 index_for_graphics = None_U32(); + OptionU32 index_for_presentation = None_U32(); + + for (uint32_t i = 0; i < queue_family_count; i++) { + const VkQueueFamilyProperties* props = VecVkQueueFamilyProperties_cat(&queue_families, i); + if (props->queueFlags & VK_QUEUE_GRAPHICS_BIT && props->queueCount >= 1) { + index_for_graphics = Some_U32(i); + } + VkBool32 isPres = false; + if (vkGetPhysicalDeviceSurfaceSupportKHR(dev, i, surface, &isPres) != VK_SUCCESS) + abortf("vkGetPhysicalDeviceSurfaceSupportKHR"); + if (isPres) + index_for_presentation = Some_U32(i); + } + VecVkQueueFamilyProperties_drop(queue_families); + if (OptionU32_is_none(&index_for_graphics)) + return (ResultMargaretChosenQueueFamiliesOrConstSpanU8){ .variant = Result_Err, .err = cstr("No graphics queue family") }; + if (OptionU32_is_none(&index_for_presentation)) + return (ResultMargaretChosenQueueFamiliesOrConstSpanU8){ .variant = Result_Err, .err = cstr("No presentation queue family") }; + return (ResultMargaretChosenQueueFamiliesOrConstSpanU8){ .variant = Result_Ok, .ok = (MargaretChosenQueueFamilies){ + .for_graphics = index_for_graphics.some, .for_presentation = index_for_presentation.some + } }; +} + +#define VkExtensionProperties_drop(x) {} +#define VkExtensionProperties_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkExtensionProperties) +VecT_trivmove_method_Definition(VkExtensionProperties) +VecT_primitive_zeroinit_method_Definition(VkExtensionProperties) + + +// These are not the same as instance extensions +VecVecU8 margaret_get_extensions_of_physical_device(VkPhysicalDevice physical_device) { + uint32_t extensions_count = 0; + if (vkEnumerateDeviceExtensionProperties(physical_device, NULL, &extensions_count, NULL) != VK_SUCCESS) + abortf("vkEnumerateDeviceExtensionProperties"); + VecVkExtensionProperties extensions = VecVkExtensionProperties_new_zeroinit(extensions_count); + if (vkEnumerateDeviceExtensionProperties(physical_device, NULL, &extensions_count, extensions.buf) != VK_SUCCESS) + abortf("vkEnumerateDeviceExtensionProperties"); + VecVecU8 res = VecVecU8_new_of_size(extensions_count); + for (size_t i = 0; i < extensions_count; i++) { + // Previous value here was default (_new). It can be safely discarded + // ->extensionName is some null-terminated string, we need to acquire a copy + *VecVecU8_at(&res, i) = vcstr(VecVkExtensionProperties_cat(&extensions, i)->extensionName); + } + VecVkExtensionProperties_drop(extensions); + return res; +} + +VkDevice margaret_create_logical_device(VkPhysicalDevice physical_device, MargaretChosenQueueFamilies queue_fam) { + VkPhysicalDeviceFeatures physical_features; + vkGetPhysicalDeviceFeatures(physical_device, &physical_features); + // todo: handle case of `two in one` + float qfam_instance_priorities[1] = {1.f}; + VkDeviceQueueCreateInfo logical_device_queue_crinfo[2] = { 0 }; + for (int i = 0; i < 2; i++) { + logical_device_queue_crinfo[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + logical_device_queue_crinfo[i].queueCount = 1; + logical_device_queue_crinfo[i].pQueuePriorities = qfam_instance_priorities; + } + logical_device_queue_crinfo[0].queueFamilyIndex = queue_fam.for_graphics; + logical_device_queue_crinfo[1].queueFamilyIndex = queue_fam.for_presentation; + + // We DEMAND synchronization2 + VkPhysicalDeviceSynchronization2Features used_synchronization2_features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES, + .synchronization2 = VK_TRUE, + }; + VkPhysicalDeviceFeatures2 used_features2 = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + .pNext = (void*)&used_synchronization2_features, + .features = (VkPhysicalDeviceFeatures) { + .samplerAnisotropy = physical_features.samplerAnisotropy, + }, + }; + + const char* needed_extensions[2] = {"VK_KHR_swapchain", "VK_KHR_synchronization2"}; + + VkDeviceCreateInfo device_crinfo = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .pNext = (const void*)&used_features2, + .queueCreateInfoCount = ARRAY_SIZE(logical_device_queue_crinfo), + .pQueueCreateInfos = logical_device_queue_crinfo, + .enabledExtensionCount = ARRAY_SIZE(needed_extensions), + .ppEnabledExtensionNames = needed_extensions, + // We leave that filed because we have specified features2 in `.pNext` + .pEnabledFeatures = NULL, + }; + + VkDevice device; + if (vkCreateDevice(physical_device, &device_crinfo, NULL, &device) != VK_SUCCESS) + abortf("Could not create logical vulkan device"); + return device; +} + +OptionT_struct_Definition(VkSurfaceFormatKHR) +OptionT_method_Definition(VkSurfaceFormatKHR) + +OptionT_struct_Definition(VkPresentModeKHR) +OptionT_method_Definition(VkPresentModeKHR) + +/* These variables are in some way enforced by VkSurfaceCapabilitiesKHR (but not completely determined) */ +typedef struct { + VkSurfaceFormatKHR surface_format; + VkPresentModeKHR presentation_mode; + VkExtent2D image_extent; + uint32_t image_count; + VkSurfaceTransformFlagBitsKHR surface_pre_transform; +} MargaretChosenSwapchainDetails; + +#define VkSurfaceFormatKHR_drop(x) {} +#define VkSurfaceFormatKHR_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkSurfaceFormatKHR) +VecT_trivmove_method_Definition(VkSurfaceFormatKHR) +VecT_primitive_zeroinit_method_Definition(VkSurfaceFormatKHR) + +#define VkPresentModeKHR_drop(x) {} +#define VkPresentModeKHR_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkPresentModeKHR) +VecT_trivmove_method_Definition(VkPresentModeKHR) +VecT_primitive_zeroinit_method_Definition(VkPresentModeKHR) + +OptionVkSurfaceFormatKHR margaret_choose_surface_format(const VecVkSurfaceFormatKHR* surface_formats) { + for (size_t i = 0; i < surface_formats->len; i++) { + VkSurfaceFormatKHR f = *VecVkSurfaceFormatKHR_cat(surface_formats, i); + if ((f.format == VK_FORMAT_R8G8B8A8_SRGB || f.format == VK_FORMAT_B8G8R8A8_SRGB) && + f.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + ) { + return Some_VkSurfaceFormatKHR(f); + } + } + return None_VkSurfaceFormatKHR(); +} + +OptionVkPresentModeKHR margaret_choose_presentation_mode(const VecVkPresentModeKHR* pres_modes) { + OptionVkPresentModeKHR res = None_VkPresentModeKHR(); + for (size_t i = 0; i < pres_modes->len; i++) { + VkPresentModeKHR mode = *VecVkPresentModeKHR_cat(pres_modes, i); + if (mode == VK_PRESENT_MODE_MAILBOX_KHR) + return Some_VkPresentModeKHR(VK_PRESENT_MODE_MAILBOX_KHR); + else if (mode == VK_PRESENT_MODE_FIFO_KHR) + res = Some_VkPresentModeKHR(mode); + } + return res; +} + +VkExtent2D margaret_choose_image_extent(const VkSurfaceCapabilitiesKHR* capabilities) { + if (capabilities->currentExtent.width == UINT32_MAX) { + return (VkExtent2D){ capabilities->maxImageExtent.width, capabilities->maxImageExtent.height,}; + } else + return capabilities->currentExtent; +} + +uint32_t margaret_choose_swapchain_image_count(const VkSurfaceCapabilitiesKHR* capabilities) { + return MIN_U32(capabilities->minImageCount + 1, + capabilities->maxImageCount == 0 ? UINT32_MAX : capabilities->maxImageCount); +} + +/* MargaretChosenSwapchainDetails or static string span, describing what is missing */ +typedef struct { + Result_variant variant; + union { + MargaretChosenSwapchainDetails ok; + ConstSpanU8 err; + }; +} ResultMargaretChosenSwapchainDetailsOrConstSpanU8; + +// Both queries swapchain support details and selects needed formats and presentation modes +ResultMargaretChosenSwapchainDetailsOrConstSpanU8 margaret_choose_swapchain_details(VkPhysicalDevice physical_device, VkSurfaceKHR surface) { + /* 1. Getting surface capabilities + formats + presentation modes */ + VkSurfaceCapabilitiesKHR surface_capabilities; + if (vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, surface, &surface_capabilities) != VK_SUCCESS) + abortf("vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + + uint32_t format_count = 0; + if (vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &format_count, NULL) != VK_SUCCESS) + abortf("vkGetPhysicalDeviceSurfaceFormatsKHR"); + VecVkSurfaceFormatKHR surface_formats = VecVkSurfaceFormatKHR_new_zeroinit(format_count); + if (vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &format_count, surface_formats.buf) != VK_SUCCESS) + abortf("vkGetPhysicalDeviceSurfaceFormatsKHR"); + + uint32_t pres_mode_count = 0; + if (vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &pres_mode_count, NULL) != VK_SUCCESS) + abortf("vkGetPhysicalDeviceSurfacePresentModesKHR"); + VecVkPresentModeKHR pres_modes = VecVkPresentModeKHR_new_zeroinit(pres_mode_count); + if (vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &pres_mode_count, pres_modes.buf) != VK_SUCCESS) + abortf("vkGetPhysicalDeviceSurfacePresentModesKHR"); + + /* 2. Choosing surface format and presentation mode */ + OptionVkSurfaceFormatKHR chosen_surface_format = margaret_choose_surface_format(&surface_formats); + if (OptionVkSurfaceFormatKHR_is_none(&chosen_surface_format)) + return (ResultMargaretChosenSwapchainDetailsOrConstSpanU8){ .variant = Result_Err, + .err = cstr("No suitable surface format") }; + OptionVkPresentModeKHR chosen_present_mode = margaret_choose_presentation_mode(&pres_modes); + if (OptionVkPresentModeKHR_is_none(&chosen_present_mode)) + return (ResultMargaretChosenSwapchainDetailsOrConstSpanU8){ .variant = Result_Err, + .err = cstr("No suitable swapchain presentation mode") }; + + VkExtent2D image_extent = margaret_choose_image_extent(&surface_capabilities); + uint32_t image_count = margaret_choose_swapchain_image_count(&surface_capabilities); + + VecVkSurfaceFormatKHR_drop(surface_formats); + VecVkPresentModeKHR_drop(pres_modes); + + return (ResultMargaretChosenSwapchainDetailsOrConstSpanU8){ .variant = Result_Ok,\ + .ok = (MargaretChosenSwapchainDetails){ + .surface_format = chosen_surface_format.some, .presentation_mode = chosen_present_mode.some, + .image_extent = image_extent, .image_count = image_count, + .surface_pre_transform = surface_capabilities.currentTransform + } + }; +} + +typedef struct { + VkPhysicalDevice physical_device; + S64 score; + // static string + ConstSpanU8 disqualification_reason; +} MargaretScoredPhysicalDevice; + +MargaretScoredPhysicalDevice margaret_score_physical_device( + VkPhysicalDevice dev, VkSurfaceKHR surface, VecU8 favourite_word, VecU8 forbidden_word + ) { + VkPhysicalDeviceProperties properties; + vkGetPhysicalDeviceProperties(dev, &properties); + if (string_contains_string_ignorecase(vcstr(properties.deviceName), forbidden_word)) + return (MargaretScoredPhysicalDevice){ dev, -1, cstr("Bugged gpu") }; + VkPhysicalDeviceSynchronization2Features synchronization2_features = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES, + }; + VkPhysicalDeviceFeatures2 features2 = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + .pNext = (void*)&synchronization2_features, + }; + vkGetPhysicalDeviceFeatures2(dev, &features2); + // printf("Device %s\nmaxBoundDescriptorSets: %" PRIu32 " \nmaxPerStageDescriptorUniformBuffers: %" PRIu32 "\n" + // , properties.deviceName, properties.limits.maxBoundDescriptorSets, properties.limits.maxPerStageDescriptorUniformBuffers); + + S64 score = 0; + if (string_contains_string_ignorecase(vcstr(properties.deviceName), favourite_word)) + score += 100000; + if (properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) + score += 1000; + else if (properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU) + score += 100; + if (!features2.features.geometryShader) + return (MargaretScoredPhysicalDevice){dev, -1, cstr("No geometry shader")}; + if (!synchronization2_features.synchronization2) + return (MargaretScoredPhysicalDevice){dev, -1, cstr("No synchronization2")}; + if (features2.features.samplerAnisotropy) + score += 2; + ResultMargaretChosenQueueFamiliesOrConstSpanU8 queue_families = margaret_choose_good_queue_families(dev, surface); + if (queue_families.variant == Result_Err) + return (MargaretScoredPhysicalDevice){dev, -1, queue_families.err}; + // Checking device specific extensions (VK_KHR_swapchain required to check swap_chain support details + VecVecU8 dev_extensions = margaret_get_extensions_of_physical_device(dev); + ConstSpanU8 required_dev_extensions[] = {cstr("VK_KHR_swapchain"), cstr("VK_KHR_synchronization2")}; + for (size_t ei = 0; ei < ARRAY_SIZE(required_dev_extensions); ei++) { + if (!string_in_string_vec(required_dev_extensions[ei], &dev_extensions)) + return (MargaretScoredPhysicalDevice){dev, -1, cstr("Missing some device extensions")}; + } + VecVecU8_drop(dev_extensions); + // Extension VK_KHR_swapchain is present, now we can call query_swap_chain_support + ResultMargaretChosenSwapchainDetailsOrConstSpanU8 swapchain_details = margaret_choose_swapchain_details(dev, surface); + if (swapchain_details.variant == Result_Err) { + return (MargaretScoredPhysicalDevice){dev, -1, cstr("Physical device lacks nice swapchain support")}; + } + return (MargaretScoredPhysicalDevice){dev, score, ""}; +} + +#define MargaretScoredPhysicalDevice_drop(x) {} +#define MargaretScoredPhysicalDevice_clone(vp) (*(vp)) +#define MargaretScoredPhysicalDevice_less_MargaretScoredPhysicalDevice(cap, cbp) ((cap)->score < (cbp)->score) + +SpanT_VecT_trivmove_COMPLETE_Definition(MargaretScoredPhysicalDevice) +VecT_primitive_zeroinit_method_Definition(MargaretScoredPhysicalDevice) +SpanT_comparable_method_Definition(MargaretScoredPhysicalDevice) + +// VecT_trivmove_struct_Definition(MargaretScoredPhysicalDevice) +// VecT_trivmove_method_Definition(MargaretScoredPhysicalDevice) +// SpanT_struct_Definition(MargaretScoredPhysicalDevice) +// SpanT_method_Definition(MargaretScoredPhysicalDevice) +// SpanT_VecT_method_Definition(MargaretScoredPhysicalDevice) + +#define VkPhysicalDevice_drop(x) {} +#define VkPhysicalDevice_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkPhysicalDevice) +VecT_trivmove_method_Definition(VkPhysicalDevice) +VecT_primitive_zeroinit_method_Definition(VkPhysicalDevice) + +VecMargaretScoredPhysicalDevice margaret_get_physical_devices_scored( + VkInstance instance, VkSurfaceKHR surface, + ConstSpanU8 favourite_word, ConstSpanU8 forbidden_word + ) { + uint32_t physical_device_count = 0; + if (vkEnumeratePhysicalDevices(instance, &physical_device_count, NULL) != VK_SUCCESS) + abortf("vkEnumeratePhysicalDevices"); + VecVkPhysicalDevice physical_devices = VecVkPhysicalDevice_new_zeroinit(physical_device_count); + if (vkEnumeratePhysicalDevices(instance, &physical_device_count, physical_devices.buf) != VK_SUCCESS) + abortf("vkEnumeratePhysicalDevices"); + VecMargaretScoredPhysicalDevice scored_devices = VecMargaretScoredPhysicalDevice_new_zeroinit(physical_device_count); + for (uint32_t i = 0; i < physical_device_count; i++) { + *VecMargaretScoredPhysicalDevice_at(&scored_devices, i) = margaret_score_physical_device( + *VecVkPhysicalDevice_cat(&physical_devices, i), surface, + VecU8_from_span(favourite_word), VecU8_from_span(forbidden_word) + ); + } + SpanMargaretScoredPhysicalDevice_sort(VecMargaretScoredPhysicalDevice_to_SpanMargaretScoredPhysicalDevice(&scored_devices)); + return scored_devices; +} + +VkPhysicalDevice margaret_select_one_physical_device( + VkInstance instance, VkSurfaceKHR surface, + ConstSpanU8 favourite_word, ConstSpanU8 forbidden_word + ) { + VecMargaretScoredPhysicalDevice scored_devices = margaret_get_physical_devices_scored(instance, surface, favourite_word, forbidden_word); + printf("Physical devices (with scores):\n"); + for (size_t i = 0; i < scored_devices.len; i++) { + const MargaretScoredPhysicalDevice* dev = VecMargaretScoredPhysicalDevice_cat(&scored_devices, i); + VkPhysicalDeviceProperties dev_props; + vkGetPhysicalDeviceProperties(dev->physical_device, &dev_props); + if (dev->score >= 0) { + printf("%s: %ld\n", dev_props.deviceName, dev->score); + } else { + printf("%s: DISQUALIFIED: ", dev_props.deviceName); + ConstSpanU8_print(dev->disqualification_reason); + putchar('\n'); + } + } + if (scored_devices.len == 0) + abortf("No vulkan devices"); + const MargaretScoredPhysicalDevice* best_dev = VecMargaretScoredPhysicalDevice_cat(&scored_devices, scored_devices.len - 1); + if (best_dev->score < 0) + abortf("No suitable vulkan devices"); + return best_dev->physical_device; +} + +VkSwapchainKHR margaret_create_swapchain ( + VkDevice device, + MargaretChosenQueueFamilies queue_families, + MargaretChosenSwapchainDetails swapchain_details, + VkSurfaceKHR surface, VkSwapchainKHR old_swapchain + ) { + VkSwapchainCreateInfoKHR swapchain_crinfo = { + .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + .surface = surface, + .minImageCount = swapchain_details.image_count, + .imageFormat = swapchain_details.surface_format.format, + .imageColorSpace = swapchain_details.surface_format.colorSpace, + .imageExtent = swapchain_details.image_extent, + // when trying VR update this according to VkSurfaceCapabilitiesKHR::maxImageArrayLayers + .imageArrayLayers = 1, + .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, + // Filling imageSharingMode and queueFamilyIndexes later + .preTransform = swapchain_details.surface_pre_transform, + .compositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR, + .presentMode = swapchain_details.presentation_mode, + .clipped = VK_TRUE, + .oldSwapchain = old_swapchain, + + }; + uint32_t sharing_between_qfams[2] = {queue_families.for_graphics, queue_families.for_presentation}; + if (queue_families.for_graphics != queue_families.for_presentation) { + swapchain_crinfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + swapchain_crinfo.queueFamilyIndexCount = ARRAY_SIZE(sharing_between_qfams); + swapchain_crinfo.pQueueFamilyIndices = sharing_between_qfams; + } else { + swapchain_crinfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + } + + VkSwapchainKHR swapchain; + if (vkCreateSwapchainKHR(device, &swapchain_crinfo, NULL, &swapchain) != VK_SUCCESS) + abortf("vkCreateSwapchainKHR"); + return swapchain; +} + +#define VkImage_drop(v) {} +#define VkImage_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkImage) +VecT_trivmove_method_Definition(VkImage) +VecT_primitive_zeroinit_method_Definition(VkImage) + +// We don't do it automatically (we have to manually destroy image views when needed) +#define VkImageView_drop(d) {} +#define VkImageView_clone(p) (*(p)) + +VecT_trivmove_struct_Definition(VkImageView) +VecT_trivmove_method_Definition(VkImageView) +VecT_primitive_zeroinit_method_Definition(VkImageView) + +// I can collect my garbage myself +#define VkFramebuffer_drop(d) {} +#define VkFramebuffer_clone(p) (*(p)) + +VecT_trivmove_struct_Definition(VkFramebuffer) +VecT_trivmove_method_Definition(VkFramebuffer) +VecT_primitive_zeroinit_method_Definition(VkFramebuffer) + +VecVkImageView margaret_create_swapchain_image_views( + VkDevice device, VkSwapchainKHR swapchain, + VkFormat image_format + ) { + uint32_t swapchain_image_count = 0; + if (vkGetSwapchainImagesKHR(device, swapchain, &swapchain_image_count, NULL) != VK_SUCCESS) + abortf("vkGetSwapchainImagesKHR"); + VecVkImage swapchain_images = VecVkImage_new_zeroinit(swapchain_image_count); + if (vkGetSwapchainImagesKHR(device, swapchain, &swapchain_image_count, swapchain_images.buf) != VK_SUCCESS) + abortf("vkGetSwapchainImagesKHR"); + + VecVkImageView swapchain_image_views = VecVkImageView_new_zeroinit(swapchain_image_count); + for (uint32_t i = 0; i < swapchain_image_count; i++) { + VkImageViewCreateInfo imageview_crinfo = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = *VecVkImage_cat(&swapchain_images, i), + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = image_format, + .components.r = VK_COMPONENT_SWIZZLE_IDENTITY, + .components.g = VK_COMPONENT_SWIZZLE_IDENTITY, + .components.b = VK_COMPONENT_SWIZZLE_IDENTITY, + .components.a = VK_COMPONENT_SWIZZLE_IDENTITY, + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.baseMipLevel = 0, + .subresourceRange.levelCount = 1, + .subresourceRange.baseArrayLayer = 0, + .subresourceRange.layerCount = 1, + }; + if (vkCreateImageView(device, &imageview_crinfo, NULL, VecVkImageView_at(&swapchain_image_views, i)) != VK_SUCCESS) + abortf("vkCreateImageView"); + } + return swapchain_image_views; +} + +// Okay, this part is already somewhat pipeline-specific +VecVkFramebuffer margaret_create_swapchain_framebuffers( + VkDevice device, const VecVkImageView* swapchain_image_views, VkRenderPass render_pass, VkExtent2D image_extent + ) { + VecVkFramebuffer swapchain_framebuffers = VecVkFramebuffer_new_zeroinit(swapchain_image_views->len); + for (uint32_t i = 0; i < swapchain_image_views->len; i++) { + VkImageView attachments[1] = {*VecVkImageView_cat(swapchain_image_views, i)}; + VkFramebufferCreateInfo framebuffer_crinfo = { + .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + .renderPass = render_pass, + .attachmentCount = ARRAY_SIZE(attachments), + .pAttachments = attachments, + .width = image_extent.width, + .height = image_extent.height, + .layers = 1, + }; + + if (vkCreateFramebuffer(device, &framebuffer_crinfo, NULL, VecVkFramebuffer_at(& swapchain_framebuffers, i)) != VK_SUCCESS) + abortf("vkCreateFramebuffer"); + } + return swapchain_framebuffers; +} + +VkSemaphore margaret_create_semaphore(VkDevice device) { + VkSemaphoreCreateInfo crinfo = { .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO }; + VkSemaphore res; + if (vkCreateSemaphore(device, &crinfo, NULL, &res) != VK_SUCCESS) + abortf("vkCreateSemaphore"); + return res; +} + +VkFence margaret_create_fence(VkDevice device, bool create_signaled) { + VkFenceCreateInfo crinfo = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = create_signaled ? VK_FENCE_CREATE_SIGNALED_BIT : 0 + }; + VkFence res; + if (vkCreateFence(device, &crinfo, NULL, &res) != VK_SUCCESS) + abortf("vkCreateFence"); + return res; +} + +typedef struct { + VkSwapchainKHR swapchain; + VecVkImageView image_views; + VecVkFramebuffer framebuffers; + VkExtent2D extent; + + VkSemaphore in_frame_transfer_complete; + VkSemaphore image_available_semaphore; + VkSemaphore render_finished_semaphore; + VkFence in_flight_fence; +} MargaretSwapchainBundle; + +MargaretSwapchainBundle MargaretSwapchainBundle_new( + VkDevice device, MargaretChosenQueueFamilies queue_families, MargaretChosenSwapchainDetails swapchain_details, + VkSurfaceKHR surface, VkRenderPass render_pass, VkSwapchainKHR old_swapchain + ) { + VkSwapchainKHR swapchain = margaret_create_swapchain(device, queue_families, swapchain_details, surface, old_swapchain); + VecVkImageView image_views = margaret_create_swapchain_image_views(device, swapchain, swapchain_details.surface_format.format); + VecVkFramebuffer framebuffers = margaret_create_swapchain_framebuffers(device, &image_views, render_pass, swapchain_details.image_extent); + return (MargaretSwapchainBundle){ .swapchain = swapchain, .image_views = image_views, + .framebuffers = framebuffers, .extent = swapchain_details.image_extent, + .in_frame_transfer_complete = margaret_create_semaphore(device), + .image_available_semaphore = margaret_create_semaphore(device), + .render_finished_semaphore = margaret_create_semaphore(device), + .in_flight_fence = margaret_create_fence(device, true), + }; +} + +VkSwapchainKHR MargaretSwapchainBundle_pop_swapchain_drop_rest(VkDevice device, MargaretSwapchainBundle swfb) { + for (size_t i = 0; i < swfb.framebuffers.len; i++) { + vkDestroyFramebuffer(device, *VecVkFramebuffer_cat(&swfb.framebuffers, i), NULL); + } + for (size_t i = 0; i < swfb.image_views.len; i++) { + vkDestroyImageView(device, *VecVkImageView_cat(&swfb.image_views, i), NULL); + } + vkDestroyFence(device, swfb.in_flight_fence, NULL); + vkDestroySemaphore(device, swfb.render_finished_semaphore, NULL); + vkDestroySemaphore(device, swfb.image_available_semaphore, NULL); + vkDestroySemaphore(device, swfb.in_frame_transfer_complete, NULL); + // Old swapchain bundle is 83% dropped + return swfb.swapchain; +} + +// Not a regular _drop method, because it requires a bundled VkDevice +void MargaretSwapchainBundle_drop_with_device(VkDevice device, MargaretSwapchainBundle swfb) { + VkSwapchainKHR swapchain = MargaretSwapchainBundle_pop_swapchain_drop_rest(device, swfb); + vkDestroySwapchainKHR(device, swfb.swapchain, NULL); + // Now swapchain bundle is 100% dropped +} + +VkShaderModule margaret_VkShaderModule_new(VkDevice device, VecU8 code) { + VkShaderModuleCreateInfo shad_mod_crinfo = { + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + .codeSize = code.len, + // Now this is funny, we can't put arbitrary byte-string here, it should be 4-byte aligned + // Thanks goodness all the strings in VecU8 are allocated with calloc, which gives high aligning to virtually everything + .pCode = (const uint32_t*)code.buf + }; + VkShaderModule shad_module; + if (vkCreateShaderModule(device, &shad_mod_crinfo, NULL, &shad_module) != VK_SUCCESS) + abortf("vkCreateShaderModule"); + return shad_module; +} + +VkPipelineShaderStageCreateInfo margaret_shader_stage_vertex_crinfo(VkShaderModule module) { + return (VkPipelineShaderStageCreateInfo){ + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .module = module, + .stage = VK_SHADER_STAGE_VERTEX_BIT, .pName = "main", + }; +} + +VkPipelineShaderStageCreateInfo margaret_shader_stage_fragment_crinfo(VkShaderModule module) { + return (VkPipelineShaderStageCreateInfo){ + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, .module = module, + .stage = VK_SHADER_STAGE_FRAGMENT_BIT, .pName = "main", + }; +} + +VkCommandPool margaret_create_resettable_command_pool(VkDevice device, uint32_t wanted_queue_family) { + VkCommandPoolCreateInfo crinfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + .queueFamilyIndex = wanted_queue_family, + }; + VkCommandPool res; + if (vkCreateCommandPool(device, &crinfo, NULL, &res) != VK_SUCCESS) + abortf("vkCreateCommandPool"); + return res; +} + +VkCommandBuffer margaret_allocate_command_buffer(VkDevice device, VkCommandPool pool) { + VkCommandBufferAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, + }; + VkCommandBuffer res; + if (vkAllocateCommandBuffers(device, &alloc_info, &res) != VK_SUCCESS) + abortf("vkAllocateCommandBuffers"); + return res; +} + +typedef struct { + Xlib_Display* dpy; + Xlib_Window win; +} MargaretSingleWindowSetup; + +MargaretSingleWindowSetup MargaretSingleWindowSetup_new() { + Display *dpy = XOpenDisplay(NULL); + if (!dpy) + abortf("Unable to open X display"); + + int screen = DefaultScreen(dpy); + Window root = RootWindow(dpy, screen); + + unsigned long black = BlackPixel(dpy, screen); + unsigned long white = WhitePixel(dpy, screen); + int win_x = 50, win_y = 50; + unsigned int win_w = 400, win_h = 300; + Window win = XCreateSimpleWindow( + dpy, root, + win_x, win_y, win_w, win_h, + 1, black, white + ); + margaret_win_init_set_properties(dpy, win); + + /* 3) Select for ConfigureNotify and Expose events */ + XSelectInput(dpy, win, StructureNotifyMask | ExposureMask); + return (MargaretSingleWindowSetup){ .dpy = dpy, .win = win }; +} + +void MargaretSingleWindowSetup_drop(MargaretSingleWindowSetup x) { + XDestroyWindow(x.dpy, x.win); + XCloseDisplay(x.dpy); +} + +VkSurfaceKHR margaret_create_surface(VkInstance instance, const MargaretSingleWindowSetup* x) { + VkXlibSurfaceCreateInfoKHR surface_crinfo = { + .sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, .dpy = x->dpy, .window = x->win, + }; + VkSurfaceKHR surface; + if (vkCreateXlibSurfaceKHR(instance, &surface_crinfo, NULL, &surface) != VK_SUCCESS) + abortf("Failed to create Vulkan surface"); + return surface; +} + +#define VkCommandBuffer_drop(vp) {} +#define VkCommandBuffer_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(VkCommandBuffer) +VecT_trivmove_method_Definition(VkCommandBuffer) +VecT_primitive_zeroinit_method_Definition(VkCommandBuffer) + +// type_filter is a set of memory types (bit set) and we return one of its elements. +// Result must satisfy `properties` +// Bit index in `type_filter` is an index in VkPhysicalDeviceMemoryProperties::memoryTypes for that physical_device +uint32_t margaret_find_memory_type( + VkPhysicalDevice physical_device, uint32_t type_filter, + VkMemoryPropertyFlags properties + ) { + VkPhysicalDeviceMemoryProperties mem_properties; + vkGetPhysicalDeviceMemoryProperties(physical_device, &mem_properties); + assert(mem_properties.memoryTypeCount < 32); + for (uint32_t i = 0; i < mem_properties.memoryTypeCount; i++) { + if ((type_filter & (1 << i)) && ((mem_properties.memoryTypes[i].propertyFlags & properties) == properties)) { + return i; + } + } + abortf("Could not find a good memory type"); +} + +// Suppose the previous buffer ended at pos was. It may just so happen that `was` is aligned with the next buffer, +// but if it is not, for some reason, we skip a couple of bytes after was and return the smallest next aligned pos +VkDeviceSize margaret_align_start_of_buffer(VkDeviceSize was, VkDeviceSize alignment) { + return was % alignment ? (was + alignment - was % alignment) : was; +} + +// We first specify the necessary fields `sz`, `usage` and then Snow White creation function fills ` +// Used in autogenerated code +typedef struct { + // necessary + VkDeviceSize sz; + VkBufferUsageFlags usage; + // filled + VkDeviceSize offset; + VkBuffer buffer; +} MargaretBufferInMemoryInfo; + +SpanT_struct_Definition(MargaretBufferInMemoryInfo) +SpanT_method_Definition(MargaretBufferInMemoryInfo) + +// Used in autogenerated code +typedef struct { + // necessary + uint32_t width; + uint32_t height; + VkFormat format; + VkImageUsageFlags usage; + // filled + VkDeviceSize offset; + VkImage image; +} MargaretImageInMemoryInfo; + +SpanT_struct_Definition(MargaretImageInMemoryInfo) +SpanT_method_Definition(MargaretImageInMemoryInfo) + +// A handy function to initialize buffers and images (attaching them to allocated memory) +VkDeviceMemory margaret_initialize_buffers_and_images( + VkPhysicalDevice physical_device, VkDevice device, + SpanMargaretBufferInMemoryInfo buffer_hands, SpanMargaretImageInMemoryInfo image_hands, + VkMemoryPropertyFlags properties + ) { + uint32_t memory_types_allowed = -1; + VkDeviceSize offset = 0; + for (size_t i = 0; i < buffer_hands.len; i++) { + MargaretBufferInMemoryInfo* buf_hand = SpanMargaretBufferInMemoryInfo_at(buffer_hands, i); + VkBufferCreateInfo create_info = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .size = buf_hand->sz, + .usage = buf_hand->usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + }; + if (vkCreateBuffer(device, &create_info, NULL, &buf_hand->buffer) != VK_SUCCESS) { + abortf("vkCreateBuffer"); + } + VkMemoryRequirements memory_requirements; + vkGetBufferMemoryRequirements(device, buf_hand->buffer, &memory_requirements); + + memory_types_allowed |= memory_requirements.memoryTypeBits; + offset = margaret_align_start_of_buffer(offset, memory_requirements.alignment); + buf_hand->offset = offset; + offset = offset + memory_requirements.size; + } + + for (size_t i = 0; i < image_hands.len; i++) { + MargaretImageInMemoryInfo* img_hand = SpanMargaretImageInMemoryInfo_at(image_hands, i); + VkImageCreateInfo crinfo = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .imageType = VK_IMAGE_TYPE_2D, + .format = img_hand->format, + .extent = (VkExtent3D){ + .width = img_hand->width, + .height = img_hand->height, + .depth = 1, + }, + .mipLevels = 1, + .arrayLayers = 1, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .usage = img_hand->usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + if (vkCreateImage(device, &crinfo, NULL, &img_hand->image) != VK_SUCCESS) + abortf("vkCreateImage"); + VkMemoryRequirements memory_requirements; + vkGetImageMemoryRequirements(device, img_hand->image, &memory_requirements); + + memory_types_allowed |= memory_requirements.memoryTypeBits; + offset = margaret_align_start_of_buffer(offset, memory_requirements.alignment); + img_hand->offset = offset; + offset = offset + memory_requirements.size; + } + + VkMemoryAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = offset, + .memoryTypeIndex = margaret_find_memory_type (physical_device, memory_types_allowed, properties), + }; + + VkDeviceMemory memory; + if (vkAllocateMemory(device, &alloc_info, NULL, &memory) != VK_SUCCESS) { + abortf("Having trouble allocating %lu bytes with memory type %u\n", alloc_info.allocationSize, alloc_info.memoryTypeIndex); + } + + for (size_t i = 0; i < buffer_hands.len; i++) { + MargaretBufferInMemoryInfo* buf_hand = SpanMargaretBufferInMemoryInfo_at(buffer_hands, i); + if (vkBindBufferMemory(device, buf_hand->buffer, memory, buf_hand->offset) != VK_SUCCESS) + abortf("vkBindBufferMemory"); + } + + for (size_t i = 0; i < image_hands.len; i++) { + MargaretImageInMemoryInfo* img_hand = SpanMargaretImageInMemoryInfo_at(image_hands, i); + if (vkBindImageMemory(device, img_hand->image, memory, img_hand->offset) != VK_SUCCESS) + abortf("vkBindImageMemory"); + } + return memory; +} + +// Don't need that, we create staging (transfer_src_bit) the other way +// template +// BufferInMemoryInfo buffer_crinfo_of_staging_vbo(size_t n) { + // return BufferInMemoryInfo{.sz = sizeof(TV) * n, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT }; +// } +// BufferInMemoryInfo buffer_crinfo_of_staging_ebo(size_t n) { +// return BufferInMemoryInfo{.sz = sizeof(uint32_t) * n, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT }; +// } +// +// BufferInMemoryInfo buffer_crinfo_of_staging_texture_rgba(uint32_t w, uint32_t h) { +// return BufferInMemoryInfo{.sz = 4ull * w * h, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT}; +// } + +#define margaret_prep_buffer_mem_info_of_gpu_vbo_Definition(TV) \ +MargaretBufferInMemoryInfo TV##_buffer_crinfo_of_gpu_vbo(size_t n) { \ + return (MargaretBufferInMemoryInfo){ \ + .sz = safe_mul_U64(sizeof(TV), n), \ + .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT \ + }; \ +} + +MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ebo(size_t n) { + return (MargaretBufferInMemoryInfo){ .sz = sizeof(uint32_t) * n, + .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT }; +} + +// Not very useful (but I used it anyway) +MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_small_local_ubo(size_t struct_sz) { + return (MargaretBufferInMemoryInfo){ .sz = struct_sz, .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT }; +} + +MargaretImageInMemoryInfo image_crinfo_of_gpu_texture_rgba(uint32_t w, uint32_t h) { + return (MargaretImageInMemoryInfo){ .width = w, .height = h, .format = VK_FORMAT_R8G8B8A8_SRGB, + .usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT }; +} + +MargaretImageInMemoryInfo image_crinfo_of_zbuffer(uint32_t max_width, uint32_t max_height, VkFormat zbuf_format) { + return (MargaretImageInMemoryInfo){ .width = max_width, .height = max_height, .format = zbuf_format, + .usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT }; +} + +MargaretBufferInMemoryInfo margaret_prep_buffer_mem_info_of_gpu_ubo(size_t struct_sz) { + return (MargaretBufferInMemoryInfo){ .sz = struct_sz, + .usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT }; +} + +// Crutch for vulkan +VkCommandBuffer margaret_alloc_and_begin_single_use_command_buffer(VkDevice device, VkCommandPool command_pool) { + VkCommandBuffer command_buffers[1]; + VkCommandBufferAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = command_pool, + .commandBufferCount = ARRAY_SIZE(command_buffers), + }; + + if (vkAllocateCommandBuffers(device, &alloc_info, command_buffers) != VK_SUCCESS) + abortf("vkAllocateCommandBuffers"); + VkCommandBuffer copying_command_buffer = command_buffers[0]; + VkCommandBufferBeginInfo beginfo = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + if (vkBeginCommandBuffer(copying_command_buffer, &beginfo) != VK_SUCCESS) + abortf("vkBeginCommandBuffer"); + return command_buffers[0]; +} + +void margaret_end_and_submit_and_free_command_buffer( + VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue, + VkCommandBuffer cmd_buffer + ) { + if (vkEndCommandBuffer(cmd_buffer) != VK_SUCCESS) + abortf("vkEndCommandBuffer"); + + VkSubmitInfo submits_info[1] = {(VkSubmitInfo){ + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buffer, + }}; + if (vkQueueSubmit(graphics_queue, ARRAY_SIZE(submits_info), submits_info, VK_NULL_HANDLE) != VK_SUCCESS) + abortf("vkQueueSubmit"); + if (vkQueueWaitIdle(graphics_queue) != VK_SUCCESS) + abortf("vkQueueWaitIdle"); + vkFreeCommandBuffers(device, command_pool, 1, &cmd_buffer); +} + +// For application initialization purposes only +void margaret_copy_buffer_imm ( + VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue, + VkBuffer dest_buffer, VkBuffer src_buffer, VkDeviceSize buffer_size + ) { + VkCommandBuffer cmd_buffer = margaret_alloc_and_begin_single_use_command_buffer(device, command_pool); + + VkBufferCopy regions_to_copy[1] = {(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = buffer_size}}; + vkCmdCopyBuffer(cmd_buffer, src_buffer, dest_buffer, ARRAY_SIZE(regions_to_copy), regions_to_copy); + + margaret_end_and_submit_and_free_command_buffer(device, command_pool, graphics_queue, cmd_buffer); +} + +// For application initialization purposes only +void transition_image_layout ( + VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue, + VkImage image, VkImageLayout old_layout, VkImageLayout new_layout, + VkPipelineStageFlags src_stage_mask, VkAccessFlags src_access_mask, + VkPipelineStageFlags dst_stage_mask, VkAccessFlags dst_access_mask + ) { + VkCommandBuffer cmd_buffer = margaret_alloc_and_begin_single_use_command_buffer(device, command_pool); + VkImageMemoryBarrier barrier = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .srcAccessMask = src_access_mask, + .dstAccessMask = dst_access_mask, + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange = (VkImageSubresourceRange){ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + vkCmdPipelineBarrier(cmd_buffer, src_stage_mask, dst_stage_mask, + // Flags + 0, + 0, NULL, + 0, NULL, + 1, &barrier + ); + margaret_end_and_submit_and_free_command_buffer(device, command_pool, graphics_queue, cmd_buffer); +} + +// For application initialization purposes only +void margaret_copy_buffer_to_trans_dst_optimal_image ( + VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue, + const MargaretImageInMemoryInfo* dst_image, VkBuffer src_buffer + ) { + VkCommandBuffer cmd_buffer = margaret_alloc_and_begin_single_use_command_buffer(device, command_pool); + VkBufferImageCopy region = { + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = (VkImageSubresourceLayers){ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .imageOffset = {0, 0, 0}, + .imageExtent = { + .width = dst_image->width, + .height = dst_image->height, + .depth = 1 + }, + }; + vkCmdCopyBufferToImage(cmd_buffer, src_buffer, dst_image->image, + // We assume that image was already transitioned to optimal layout transition_image_layout + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + + margaret_end_and_submit_and_free_command_buffer(device, command_pool, graphics_queue, cmd_buffer); +} + +// For application initialization purposes only +void margaret_copy_buffer_to_texture_for_frag_shader_imm( + VkDevice device, VkCommandPool command_pool, VkQueue graphics_queue, + const MargaretImageInMemoryInfo* dst_image, VkBuffer src_buffer + ) { + transition_image_layout(device, command_pool, graphics_queue, dst_image->image, + // previous and new layouts + VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + // src stage and access + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, + // destination stage and access + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT + ); + margaret_copy_buffer_to_trans_dst_optimal_image(device, command_pool, graphics_queue, dst_image, src_buffer); + transition_image_layout(device, command_pool, graphics_queue, dst_image->image, + // previous and new layouts + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL, + // src stage and access + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, + // destination stage and access + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_SHADER_READ_BIT + ); +} + +// For texture +VkImageView margaret_create_view_for_image ( + VkDevice device, const MargaretImageInMemoryInfo* image, VkImageAspectFlags aspect_flags + ) { + VkImageViewCreateInfo crinfo = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = image->image, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = image->format, + .subresourceRange = (VkImageSubresourceRange){ + .aspectMask = aspect_flags, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + VkImageView view; + if (vkCreateImageView(device, &crinfo, NULL, &view) != VK_SUCCESS) + abortf("vkCreateImageView"); + return view; +} + +// For texture +VkSampler margaret_create_sampler(VkPhysicalDevice physical_device, VkDevice device) { + VkPhysicalDeviceProperties physical_device_properties; + vkGetPhysicalDeviceProperties(physical_device, &physical_device_properties); + VkPhysicalDeviceFeatures physical_device_features; + vkGetPhysicalDeviceFeatures(physical_device, &physical_device_features); + VkSamplerCreateInfo crinfo = { + .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + .magFilter = VK_FILTER_LINEAR, + .minFilter = VK_FILTER_LINEAR, + .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, + .mipLodBias = 0.f, // will understand that when I generate mipmaps + .anisotropyEnable = physical_device_features.samplerAnisotropy, + // only if feature device_used_features.samplerAnisotropy was enabled + .maxAnisotropy = physical_device_properties.limits.maxSamplerAnisotropy, + .compareEnable = VK_FALSE, + .compareOp = VK_COMPARE_OP_ALWAYS, + .minLod = 0.f, + .maxLod = 0.f, + .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + }; + VkSampler sampler; + if (vkCreateSampler(device, &crinfo, NULL, &sampler) != VK_SUCCESS) + abortf("vkCreateSampler"); + return sampler; +} + +SpanT_struct_Definition(VkFormat) +SpanT_method_Definition(VkFormat) + +OptionT_struct_Definition(VkFormat) +OptionT_method_Definition(VkFormat) + +OptionVkFormat margaret_find_supported_format_for_linear_tiling( + VkPhysicalDevice physical_device, ConstSpanVkFormat candidates, VkFormatFeatureFlags required_features + ) { + for (size_t i = 0; i < candidates.len; i++) { + VkFormat format = *ConstSpanVkFormat_at(candidates, i); + VkFormatProperties properties; + vkGetPhysicalDeviceFormatProperties(physical_device, format, &properties); + if ((properties.linearTilingFeatures & required_features) == required_features) + return Some_VkFormat(format); + } + return None_VkFormat(); +} + +OptionVkFormat margaret_find_supported_format_for_optimal_tiling( + VkPhysicalDevice physical_device, ConstSpanVkFormat candidates, VkFormatFeatureFlags required_features + ) { + for (size_t i = 0; i < candidates.len; i++) { + VkFormat format = *ConstSpanVkFormat_at(candidates, i); + VkFormatProperties properties; + vkGetPhysicalDeviceFormatProperties(physical_device, format, &properties); + if ((properties.optimalTilingFeatures & required_features) == required_features) + return Some_VkFormat(format); + } + return None_VkFormat(); +} + +OptionVkFormat margaret_find_supported_zbuffer_format(VkPhysicalDevice physical_device) { + VkFormat candidates[3] = { VK_FORMAT_D32_SFLOAT, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT }; + return margaret_find_supported_format_for_optimal_tiling(physical_device, + (ConstSpanVkFormat){.data = candidates, .len = ARRAY_SIZE(candidates)}, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT); +} + +VkDescriptorPool margaret_create_descriptor_set_pool(VkDevice device, + uint32_t ubo_descriptor_count, uint32_t image_sampler_descriptor_count, uint32_t max_sets + ) { + VkDescriptorPoolSize sizes[] = { + (VkDescriptorPoolSize){ + .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = ubo_descriptor_count + }, + // todo: fix it + // (VkDescriptorPoolSize){ + // .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + // .descriptorCount = image_sampler_descriptor_count + // }, + }; + // todo: check for a case when image_sampler_descriptor_count or ubo_descriptor_count is zero + VkDescriptorPoolCreateInfo crinfo = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .maxSets = max_sets, + .poolSizeCount = ARRAY_SIZE(sizes), + .pPoolSizes = sizes, + }; + VkDescriptorPool descriptor_pool; + if (vkCreateDescriptorPool(device, &crinfo, NULL, &descriptor_pool) != VK_SUCCESS) + abortf("vkCreateDescriptorPool"); + return descriptor_pool; +} + +void margaret_record_buf_copying_command_buf( + VkDevice device, VkCommandBuffer command_buffer, + VkBuffer dest_buffer, VkBuffer src_buffer, VkDeviceSize buffer_size + ) { + VkCommandBufferBeginInfo beginfo = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, }; + if (vkBeginCommandBuffer(command_buffer, &beginfo) != VK_SUCCESS) + abortf("vkBeginCommandBuffer"); + VkBufferCopy regions_to_copy[1] = {(VkBufferCopy){.srcOffset = 0, .dstOffset = 0, .size = buffer_size}}; + vkCmdCopyBuffer(command_buffer, src_buffer, dest_buffer, ARRAY_SIZE(regions_to_copy), regions_to_copy); + if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS) + abortf("vkEndCommandBuffer"); +} + + +#endif diff --git a/src/l2/margaret/stringop.h b/src/l2/margaret/stringop.h new file mode 100644 index 0000000..6f42ca9 --- /dev/null +++ b/src/l2/margaret/stringop.h @@ -0,0 +1,53 @@ +#ifndef PROTOTYPE1_SRC_L2_MARGARET_STRINGOP_H +#define PROTOTYPE1_SRC_L2_MARGARET_STRINGOP_H + +#include "../../l1/core/VecSpan_int_primitives.h" + +U8 U8_to_lowercase(U8 ch) { + if ('A' <= ch && ch <= 'Z') + return ch - 'A' + 'a'; + return ch; +} + +void string_to_lowercase(VecU8* str) { + for (size_t i = 0; i < str->len; i++) { + *VecU8_at(str, i) = U8_to_lowercase(*VecU8_cat(str, i)); + } +} + +// Worst case time complexity: O(nm) +bool string_contains_string_ignorecase(VecU8 str1, VecU8 str2) { + string_to_lowercase(&str1); + string_to_lowercase(&str2); + size_t L = str2.len; + for (size_t i = 0; i + L <= str1.len; i++) { + for (size_t j = 0; j < L; j++) { + if (*VecU8_at(&str1, i + j) != *VecU8_at(&str2, j)) + goto e; + } + return true; + e: + } + return false; +} + +bool strings_in_spans_equal(ConstSpanU8 a, ConstSpanU8 b) { + if (a.len != b.len) + return false; + for (size_t i = 0; i < a.len; i++) { + if (*ConstSpanU8_at(a, i) != *ConstSpanU8_at(b, i)) + return false; + } + return true; +} + +bool string_in_string_vec(ConstSpanU8 a, const VecVecU8* B) { + for (size_t i = 0; i < B->len; i++) { + const VecU8* b = VecVecU8_cat(B, i); + if (strings_in_spans_equal(a, VecU8_to_ConstSpanU8(b))) + return true; + } + return false; +} + +#endif diff --git a/src/l2/tests/r0.c b/src/l2/tests/r0.c new file mode 100644 index 0000000..b9cce49 --- /dev/null +++ b/src/l2/tests/r0.c @@ -0,0 +1,689 @@ +#include "../margaret/margaret.h" +#include "../../../gen/geom.h" +#include +#include "../../l1/system/fileio.h" +#include +// Only for linux +#include + +typedef struct { + vec3 pos; + vec3 color; +} OA_Vertex; + +typedef struct { + vec3 s; + float _0; +} MyUbo; + +typedef struct { + VkBuffer vbo; + VkBuffer ebo; + size_t vert_count; +} OA_ObjectOnScene; + +#define OA_ObjectOnScene_drop(vp) {} +#define OA_ObjectOnScene_clone(vp) (*(vp)) + +VecT_trivmove_struct_Definition(OA_ObjectOnScene) +VecT_trivmove_method_Definition(OA_ObjectOnScene) +VecT_primitive_zeroinit_method_Definition(OA_ObjectOnScene) + +typedef struct { + VecOA_ObjectOnScene oa_objects; + VkClearColorValue color; +} Scene; + +// todo: generate this function in l2 +VkRenderPass create_render_pass(VkDevice logical_device, VkFormat image_format) { + // Color attachments array for our render pass + VkAttachmentDescription all_attachments[1] = { (VkAttachmentDescription){ + .format = image_format, + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + } }; + + // For our one single render subpass + VkAttachmentReference color_attachment_refs[1] = { (VkAttachmentReference){ + .attachment = 0, + .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + } }; + + VkSubpassDescription subpasses_descr[1] = { (VkSubpassDescription){ + .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, + .colorAttachmentCount = ARRAY_SIZE(color_attachment_refs), + .pColorAttachments = color_attachment_refs, + + } }; + + VkSubpassDependency subpass_dependencies[1] = { + // subpass_0_external + (VkSubpassDependency) { + .srcSubpass = VK_SUBPASS_EXTERNAL, + .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .srcAccessMask = 0, + .dstSubpass = 0, + .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + }}; + + VkRenderPassCreateInfo render_pass_crinfo = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + .attachmentCount = ARRAY_SIZE(all_attachments), + .pAttachments = all_attachments, + .subpassCount = ARRAY_SIZE(subpasses_descr), + .pSubpasses = subpasses_descr, + .dependencyCount = ARRAY_SIZE(subpass_dependencies), + .pDependencies = subpass_dependencies, + }; + VkRenderPass render_pass; + if (vkCreateRenderPass(logical_device, &render_pass_crinfo, NULL, &render_pass) != VK_SUCCESS) + abortf("vkCreateRenderPass"); + return render_pass; +} + +// todo: generate this class in l2 +typedef struct { + VkPipelineLayout pipeline_layout; + VkPipeline pipeline; + VkDescriptorSetLayout descriptor_set_layout; +} PipelineHands; + +void destroy_graphics_pipeline_hands(VkDevice device, PipelineHands hands) { + vkDestroyPipeline(device, hands.pipeline, NULL); + vkDestroyPipelineLayout(device, hands.pipeline_layout, NULL); + vkDestroyDescriptorSetLayout(device, hands.descriptor_set_layout, NULL); +} + +// todo: generate this function in l2 +PipelineHands create_graphics_pipeline( + VkDevice device, VkRenderPass render_pass, uint32_t subpass + ) { + VecU8 vert_bin_code = read_whole_file_or_abort("test_shaders/spv/0/vert.spv"); + VecU8 frag_bin_code = read_whole_file_or_abort("test_shaders/spv/0/frag.spv"); + VkShaderModule vert_module = margaret_VkShaderModule_new(device, vert_bin_code); + VkShaderModule frag_module = margaret_VkShaderModule_new(device, frag_bin_code); + VecU8_drop(vert_bin_code); + VecU8_drop(frag_bin_code); + + VkPipelineShaderStageCreateInfo shader_stages_crinfo[2] = { + margaret_shader_stage_vertex_crinfo(vert_module), + margaret_shader_stage_fragment_crinfo(frag_module) + }; + + VkVertexInputBindingDescription vertex_bindings[1] = { { + .binding = 0, + .stride = sizeof(OA_Vertex), + .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, + } }; + VkVertexInputAttributeDescription vertex_attributes[2] = { + { + .location = 0, + .binding = 0, + .format = VK_FORMAT_R32G32B32_SFLOAT, + .offset = offsetof(OA_Vertex, pos), + }, + { + .location = 1, + .binding = 0, + .format = VK_FORMAT_R32G32B32_SFLOAT, + .offset = offsetof(OA_Vertex, color), + }, + }; + + VkPipelineVertexInputStateCreateInfo vertex_input_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + .vertexBindingDescriptionCount = ARRAY_SIZE(vertex_bindings), + .pVertexBindingDescriptions = vertex_bindings, + .vertexAttributeDescriptionCount = ARRAY_SIZE(vertex_attributes), + .pVertexAttributeDescriptions = vertex_attributes, + }; + + VkPipelineInputAssemblyStateCreateInfo input_assembly_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, + .primitiveRestartEnable = VK_FALSE, + }; + + VkPipelineViewportStateCreateInfo viewport_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + // We are using dynamic viewport and scissors, that is why we do not attach viewport/scissor values + // when creating a rendering pipeline. We will do that later + .viewportCount = 1, + .scissorCount = 1, + }; + + VkPipelineRasterizationStateCreateInfo rasterizer_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + .depthClampEnable = VK_FALSE, + .polygonMode = VK_POLYGON_MODE_FILL, + .cullMode = VK_CULL_MODE_BACK_BIT, + .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE, + .depthBiasEnable = VK_FALSE, + .depthBiasConstantFactor = 0.0f, + .depthBiasClamp = 0.0f, + .depthBiasSlopeFactor = 0.0f, + .lineWidth = 1.0f, + }; + + VkPipelineMultisampleStateCreateInfo multisampling_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + .sampleShadingEnable = VK_FALSE, + .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, + .minSampleShading = 1.0f, + .pSampleMask = NULL, + .alphaToCoverageEnable = VK_FALSE, + .alphaToOneEnable = VK_FALSE, + }; + + // For one framebuffer + VkPipelineColorBlendAttachmentState color_blend_attachments[1] = {(VkPipelineColorBlendAttachmentState){ + .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT, + .blendEnable = VK_FALSE, + }}; + + // For the entire pipeline + VkPipelineColorBlendStateCreateInfo color_blending_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + .logicOpEnable = VK_FALSE, + .logicOp = VK_LOGIC_OP_COPY, + .attachmentCount = ARRAY_SIZE(color_blend_attachments), + .pAttachments = color_blend_attachments, + // Blend constants specified heres + }; + + VkDynamicState dynamic_states[2] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; + VkPipelineDynamicStateCreateInfo dynamic_state_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, + .dynamicStateCount = ARRAY_SIZE(dynamic_states), + .pDynamicStates = dynamic_states, + }; + + VkDescriptorSetLayoutBinding bindings_for_my_descr_set_layout[] = { + // some random binding + { + // Binding in shader + .binding = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + // our shader variable is not an array of descriptors, so this stays 1 + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT, + }, + // VkDescriptorSetLayoutBinding { + // .binding = 1, + // .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + // .descriptorCount = 1, + // .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT, + // }, + }; + VkDescriptorSetLayoutCreateInfo descriptor_set_layout_crinfo = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = ARRAY_SIZE(bindings_for_my_descr_set_layout), + .pBindings = bindings_for_my_descr_set_layout, + }; + VkDescriptorSetLayout my_descriptor_set_layout; + if (vkCreateDescriptorSetLayout(device, &descriptor_set_layout_crinfo, NULL, &my_descriptor_set_layout) != VK_SUCCESS) + abortf("vkCreateDescriptorSetLayout"); + + VkPipelineLayoutCreateInfo layout_crinfo = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + .setLayoutCount = 1, + .pSetLayouts = &my_descriptor_set_layout, + .pushConstantRangeCount = 0, + .pPushConstantRanges = NULL, + }; + VkPipelineLayout pipeline_layout; + if (vkCreatePipelineLayout(device, &layout_crinfo, NULL, &pipeline_layout) != VK_SUCCESS) + abortf("vkCreatePipelineLayout"); + // todo: kill myself (update: still todo (update: still not done)) + VkGraphicsPipelineCreateInfo pipeline_crinfo = { + .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + .stageCount = ARRAY_SIZE(shader_stages_crinfo), + .pStages = shader_stages_crinfo, + .pVertexInputState = &vertex_input_crinfo, + .pInputAssemblyState = &input_assembly_crinfo, + .pViewportState = &viewport_state, + .pRasterizationState = &rasterizer_crinfo, + .pMultisampleState = &multisampling_crinfo, + .pDepthStencilState = NULL, + .pColorBlendState = &color_blending_crinfo, + .pDynamicState = &dynamic_state_crinfo, + .layout = pipeline_layout, + .renderPass = render_pass, + .subpass = subpass, + .basePipelineHandle = VK_NULL_HANDLE, + }; + + VkPipeline pipeline; + if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipeline_crinfo, NULL, &pipeline) != VK_SUCCESS) + abortf("vkCreateGraphicsPipelines"); + + vkDestroyShaderModule(device, frag_module, NULL); + vkDestroyShaderModule(device, vert_module, NULL); + return (PipelineHands){.pipeline_layout = pipeline_layout, .pipeline = pipeline ,.descriptor_set_layout = my_descriptor_set_layout}; +} + +void reset_and_record_command_buffer( + VkCommandBuffer command_buffer, VkRenderPass render_pass, + const PipelineHands* pipeline_and_layout, + VkFramebuffer swapchain_image_framebuffer, VkExtent2D image_extent, + const Scene* scene, VkDescriptorSet my_descriptor_set + ) { + if (vkResetCommandBuffer(command_buffer, 0) != VK_SUCCESS) + abortf("vkResetCommandBuffer"); + VkCommandBufferBeginInfo info_begin = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO }; + if (vkBeginCommandBuffer(command_buffer, &info_begin) != VK_SUCCESS) + abortf("vkBeginCommandBuffer"); + + VkClearValue clear_color[1] = {{.color = scene->color}}; + VkRenderPassBeginInfo renderpass_begin = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + .renderPass = render_pass, + .framebuffer = swapchain_image_framebuffer, + .renderArea.offset = (VkOffset2D){0, 0}, + .renderArea.extent = image_extent, + .clearValueCount = ARRAY_SIZE(clear_color), + .pClearValues = clear_color, + }; + + vkCmdBeginRenderPass(command_buffer, &renderpass_begin, VK_SUBPASS_CONTENTS_INLINE); + vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout->pipeline); + + // We forgot that viewport is not built into our pipeline + // printf("image_extent = {%u, %u}\n", image_extent.width, image_extent.height); + VkViewport viewport = { + .x = 0.0f, + .y = 0.0f, + .width = (float)(image_extent.width), + .height = (float)(image_extent.height), + .minDepth = 0.0f, + .maxDepth = 1.0f, + }; + vkCmdSetViewport(command_buffer, 0, 1, &viewport); + // We forgot that scissors are not built into out pipeline + VkRect2D scissor = { + .offset = (VkOffset2D){0, 0}, + .extent = image_extent, + }; + vkCmdSetScissor(command_buffer, 0, 1, &scissor); + + for (size_t i = 0; i < scene->oa_objects.len; i++) { + const OA_ObjectOnScene* obj = VecOA_ObjectOnScene_cat(&scene->oa_objects, i); + VkBuffer attached_buffers[1] = { obj->vbo }; + // We use our whole buffer, no need for offset + VkDeviceSize offsets_in_buffers[1] = {0}; + vkCmdBindVertexBuffers(command_buffer, 0, 1, attached_buffers, offsets_in_buffers); + vkCmdBindIndexBuffer(command_buffer, obj->ebo, 0, VK_INDEX_TYPE_UINT32); + vkCmdBindDescriptorSets( + command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_and_layout->pipeline_layout, 0, + 1, &my_descriptor_set, 0, NULL); + vkCmdDrawIndexed(command_buffer, obj->vert_count, 1, 0, 0, 0); + } + + vkCmdEndRenderPass(command_buffer); + if (vkEndCommandBuffer(command_buffer) != VK_SUCCESS) + abortf("vkEndCommandBuffer"); +} + +void recreate_swapchain( + VkPhysicalDevice physical_device, VkDevice device, MargaretChosenQueueFamilies queue_fam, VkSurfaceKHR surface, + VkRenderPass render_pass, MargaretSwapchainBundle* swfb) { + // We are about stop program and rebuild our sem+sem+fence synchronization mechanism + vkDeviceWaitIdle(device); + VkSwapchainKHR old_swapchain = MargaretSwapchainBundle_pop_swapchain_drop_rest(device, *swfb); + // old swfb is 83% dropped + ResultMargaretChosenSwapchainDetailsOrConstSpanU8 swapchain_details_res = margaret_choose_swapchain_details(physical_device, surface); + assert(swapchain_details_res.variant == Result_Ok); + MargaretChosenSwapchainDetails swapchain_details = swapchain_details_res.ok; + MargaretSwapchainBundle new_swfb = MargaretSwapchainBundle_new(device, queue_fam, swapchain_details, surface, + render_pass, swfb->swapchain); + vkDestroySwapchainKHR(device, old_swapchain, NULL); + // Now old swfb is 100% dropped + *swfb = new_swfb; +} + +margaret_prep_buffer_mem_info_of_gpu_vbo_Definition(OA_Vertex) + +void prepare_shaders() { + int ret = system("./test_shader_compile.sh"); + if (ret == -1) { + perror("system() failed"); + exit(EXIT_FAILURE); + } else if (WIFEXITED(ret) && WEXITSTATUS(ret) != 0) { + fprintf(stderr, "Error: script exited with code %d\n", WEXITSTATUS(ret)); + exit(EXIT_FAILURE); + } else if (!WIFEXITED(ret)) { + fprintf(stderr, "Error: script terminated abnormally\n"); + exit(EXIT_FAILURE); + } +} + +int main() { + prepare_shaders(); + + ConstSpanU8 GPU = cstr("amd"); + ConstSpanU8 bugged_GPU = cstr("nvidia"); + bool ENABLE_VALIDATION_LAYERS = true; + // U32 MAX_WIN_WIDTH = 1900; + // U32 MAX_WIN_HEIGHT = 800; + + // int MAX_FRAMES_IN_FLIGHT = 2; + + MargaretSingleWindowSetup x = MargaretSingleWindowSetup_new(); + Margaret_WEP wep = Margaret_WEP_new(x.dpy, x.win); + XMapWindow(x.dpy, x.win); + + MargaretInstanceAndItsDebug inst_hands = MargaretInstanceAndItsDebug_new(ENABLE_VALIDATION_LAYERS); + VkInstance instance = inst_hands.instance; + + // print_instance_available_extensions(instance); + // print_instance_available_layers(instance); + + VkSurfaceKHR surface = margaret_create_surface(instance, &x); + + VkPhysicalDevice physical_device = margaret_select_one_physical_device(instance, surface, GPU, bugged_GPU); + + // print_physical_device_available_extensions(physical_device); + + ResultMargaretChosenQueueFamiliesOrConstSpanU8 queue_fam_res = margaret_choose_good_queue_families(physical_device, surface); + assert(queue_fam_res.variant == Result_Ok); + MargaretChosenQueueFamilies queue_fam = queue_fam_res.ok; + + VkDevice device = margaret_create_logical_device(physical_device, queue_fam); + + VkQueue graphics_queue; + vkGetDeviceQueue(device, queue_fam.for_graphics, 0, &graphics_queue); + VkQueue presentation_queue; + vkGetDeviceQueue(device, queue_fam.for_graphics, 0, &presentation_queue); + + ResultMargaretChosenSwapchainDetailsOrConstSpanU8 swapchain_details_res = margaret_choose_swapchain_details(physical_device, surface); + assert(swapchain_details_res.variant == Result_Ok); + MargaretChosenSwapchainDetails swapchain_details = swapchain_details_res.ok; + + // We hope that the image format won't be changed even when window gets resized + // VkSurfaceFormatKHR image_format = choose_surface_format_i_want(swap_chain_support).value(); + + VkRenderPass render_pass = create_render_pass(device, swapchain_details.surface_format.format); + PipelineHands pipeline_hands = create_graphics_pipeline(device, render_pass, 0); + + MargaretSwapchainBundle swfb = MargaretSwapchainBundle_new(device, queue_fam, swapchain_details, surface, render_pass, NULL); + + // Filling scene info + OA_Vertex obj1_vertexes[] = { + (OA_Vertex){ .pos = {-0.6f, -1.0f, 0}, .color = {1.f, 0, 0} }, + (OA_Vertex){ .pos = {-0.8f, -0.8f, 0}, .color = {0, 1.f, 0} }, + (OA_Vertex){ .pos = {-0.8f, -0.6f, 0}, .color = {0, 0, 1.f} }, + }; + uint32_t obj1_indices[] = { 0, 1, 2 }; + OA_Vertex obj2_vertexes[] = { + (OA_Vertex){ .pos = {0.9f, 0.9f}, .color = {1.f, 0, 0} }, + (OA_Vertex){ .pos = {0.4f, -0.9f, 0}, .color = {0, 1.f, 0} }, + (OA_Vertex){ .pos = {-0.2f, 1.f}, .color = {0, 0, 1.f} }, + }; + uint32_t obj2_indices[] = {0, 1, 2}; + + // todo: add a texture into the mix + // We have only one staging buffer in host memory (because we don't really need more) + MargaretBufferInMemoryInfo host_mem_buffer = (MargaretBufferInMemoryInfo){ .sz = + MAX_U64(sizeof(obj1_vertexes), + MAX_U64(sizeof(obj1_indices), + MAX_U64(sizeof(obj2_vertexes), + MAX_U64(sizeof(obj2_indices), + MAX_U64(sizeof(MyUbo), 0))))) + , .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT }; + VkDeviceMemory host_mem = margaret_initialize_buffers_and_images(physical_device, device, + (SpanMargaretBufferInMemoryInfo){.data = &host_mem_buffer, .len = 1}, (SpanMargaretImageInMemoryInfo){ 0 }, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + MargaretBufferInMemoryInfo device_mem_buffers[] = { + OA_Vertex_buffer_crinfo_of_gpu_vbo(ARRAY_SIZE(obj1_vertexes)), + margaret_prep_buffer_mem_info_of_gpu_ebo(ARRAY_SIZE(obj1_indices)), + OA_Vertex_buffer_crinfo_of_gpu_vbo(ARRAY_SIZE(obj2_vertexes)), + margaret_prep_buffer_mem_info_of_gpu_ebo(ARRAY_SIZE(obj2_indices)), + margaret_prep_buffer_mem_info_of_gpu_ubo(sizeof(MyUbo)), + }; + VkDeviceMemory device_mem = margaret_initialize_buffers_and_images(physical_device, device, + (SpanMargaretBufferInMemoryInfo){ .data = device_mem_buffers, .len = ARRAY_SIZE(device_mem_buffers)}, + (SpanMargaretImageInMemoryInfo){ 0 }, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + MargaretBufferInMemoryInfo device_vbo_1_buffer = device_mem_buffers[0]; + MargaretBufferInMemoryInfo device_ebo_1_buffer = device_mem_buffers[1]; + MargaretBufferInMemoryInfo device_vbo_2_buffer = device_mem_buffers[2]; + MargaretBufferInMemoryInfo device_ebo_2_buffer = device_mem_buffers[3]; + MargaretBufferInMemoryInfo device_ubo_my_buffer = device_mem_buffers[4]; + // device_mem_buffers may be considered invalidated, forgotten you might say + + VkCommandPool command_pool = margaret_create_resettable_command_pool(device, queue_fam.for_graphics); + VkCommandBuffer rendering_command_buffer = margaret_allocate_command_buffer(device, command_pool); + + VkCommandBuffer uniform_transfer_command_buffer = margaret_allocate_command_buffer(device, command_pool); + margaret_record_buf_copying_command_buf(device, uniform_transfer_command_buffer, + device_ubo_my_buffer.buffer, host_mem_buffer.buffer, sizeof(MyUbo)); + + void* host_mem_buffer_mem; + if (vkMapMemory(device, host_mem, 0, VK_WHOLE_SIZE, 0, &host_mem_buffer_mem) != VK_SUCCESS) + abortf("vkMapMemory"); + // Now this is what we will do for each buffer: we first memcpy it into mapped region, then we submit a copying command + memcpy(host_mem_buffer_mem, obj1_vertexes, sizeof(obj1_vertexes)); + margaret_copy_buffer_imm(device, command_pool, graphics_queue, + device_vbo_1_buffer.buffer, host_mem_buffer.buffer, sizeof(obj1_vertexes)); + memcpy(host_mem_buffer_mem, obj1_indices, sizeof(obj1_indices)); + margaret_copy_buffer_imm(device, command_pool, graphics_queue, + device_ebo_1_buffer.buffer, host_mem_buffer.buffer, sizeof(obj1_indices)); + memcpy(host_mem_buffer_mem, obj2_vertexes, sizeof(obj2_vertexes)); + margaret_copy_buffer_imm(device, command_pool, graphics_queue, + device_vbo_2_buffer.buffer, host_mem_buffer.buffer, sizeof(obj2_vertexes)); + memcpy(host_mem_buffer_mem, obj2_indices, sizeof(obj2_indices)); + margaret_copy_buffer_imm(device, command_pool, graphics_queue, + device_ebo_2_buffer.buffer, host_mem_buffer.buffer, sizeof(obj2_indices)); + // We sent everything we needed. but host_mem_buffer_mem may be used later + + Scene scene; + scene.oa_objects = VecOA_ObjectOnScene_new_zeroinit(2); + *VecOA_ObjectOnScene_at(&scene.oa_objects, 0) = (OA_ObjectOnScene){ + .vbo = device_vbo_1_buffer.buffer, .ebo = device_ebo_1_buffer.buffer, .vert_count = ARRAY_SIZE(obj1_vertexes) }; + *VecOA_ObjectOnScene_at(&scene.oa_objects, 1) = (OA_ObjectOnScene){ + .vbo = device_vbo_2_buffer.buffer, .ebo = device_ebo_2_buffer.buffer, .vert_count = ARRAY_SIZE(obj2_vertexes) }; + // device_vob/ebo_1/2_buffer won't be used anymore + + VkDescriptorPool descriptor_pool = margaret_create_descriptor_set_pool(device, 1, 0, 1); + VkDescriptorSetAllocateInfo descriptor_sets_alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = descriptor_pool, + .descriptorSetCount = 1, + .pSetLayouts = &pipeline_hands.descriptor_set_layout, + }; + VkDescriptorSet my_descriptor_set; + if (vkAllocateDescriptorSets(device, &descriptor_sets_alloc_info, &my_descriptor_set) != VK_SUCCESS) + abortf("vkAllocateDescriptorSets"); + + // Configuring my descriptor set (of one single descriptor set layout from my pipeline) + VkDescriptorBufferInfo buffer_info_for_descriptor_0 = { + .buffer = device_ubo_my_buffer.buffer, + .offset = 0, + .range = sizeof(MyUbo), + }; + // VkDescriptorImageInfo image_info_for_descriptor_1 { + // .sampler = my_texture_sampler, + // .imageView = my_texture_image_view, + // .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + // }; + VkWriteDescriptorSet descriptor_writes[] = { + (VkWriteDescriptorSet){ + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = my_descriptor_set, + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .pBufferInfo = &buffer_info_for_descriptor_0, + }, + // VkWriteDescriptorSet{ + // .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + // .dstSet = descriptor_sets_for_ubo[i], + // .dstBinding = 1, + // .dstArrayElement = 0, + // .descriptorCount = 1, + // .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + // .pImageInfo = &image_info_for_descriptor_1, + // }, + }; + vkUpdateDescriptorSets(device, ARRAY_SIZE(descriptor_writes), descriptor_writes, 0, NULL); + + // Mainloop + margaret_ns_time start = margaret_clock_gettime_monotonic_raw(); + margaret_ns_time prev_key_frame_time = start; + int frame_count_since_key = 0; + + // margaret_ns_time prev_timestamp = margaret_clock_gettime_monotonic_raw(); + while (true) { + margaret_ns_time frame_A0 = margaret_clock_gettime_monotonic_raw(); + VecXlib_Event events = margaret_read_x_events(x.dpy); + for (size_t i = 0; i < events.len; i++) + Margaret_WEP_update_with_new_event(&wep, VecXlib_Event_cat(&events, i)); + if (wep.should_stop) + break; + // Rendering + vkWaitForFences(device, 1, &swfb.in_flight_fence, VK_TRUE, UINT64_MAX); + uint32_t ij; + VkResult aq_ret = vkAcquireNextImageKHR( + device, swfb.swapchain, + UINT64_MAX, swfb.image_available_semaphore, VK_NULL_HANDLE, &ij + ); + if (aq_ret == VK_ERROR_OUT_OF_DATE_KHR) { + fprintf(stderr, "vkAcquireNextImageKHR: VK_ERROR_OUT_OF_DATE_KHR\n"); + recreate_swapchain(physical_device, device, queue_fam, surface, render_pass, &swfb); + continue; + } else if (aq_ret == VK_SUBOPTIMAL_KHR) { + fprintf(stderr, "vkAcquireNextImageKHR: VK_SUBOPTIMAL_KHR\n"); + recreate_swapchain(physical_device, device, queue_fam, surface, render_pass, &swfb); + continue; + } else if (aq_ret != VK_SUCCESS) { + abortf("vkAcquireNextImageKHR"); + } + + vkResetFences(device, 1, &swfb.in_flight_fence); + + float ae = sinf(margaret_ns_time_sec_diff(start, frame_A0)); + scene.color = (VkClearColorValue){{0.5f, fabsf(ae), .3f, 1.0f}}; + vec3 SS = {ae * ae, 0.5f + 0.5f * ae, 0}; + + { + *(MyUbo *)host_mem_buffer_mem = (MyUbo){ .s = SS }; + VkCommandBuffer command_buffers[1] = { uniform_transfer_command_buffer }; + VkSemaphore signaling_semaphores[1] = { swfb.in_frame_transfer_complete }; + VkSubmitInfo ubo_copying_cmd_buffer_submit = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = ARRAY_SIZE(command_buffers), + .pCommandBuffers = command_buffers, + .signalSemaphoreCount = ARRAY_SIZE(signaling_semaphores), + .pSignalSemaphores = signaling_semaphores, + }; + vkQueueSubmit(graphics_queue, 1, &ubo_copying_cmd_buffer_submit, NULL); + } + + reset_and_record_command_buffer(rendering_command_buffer, render_pass, &pipeline_hands, + *VecVkFramebuffer_cat(&swfb.framebuffers, ij), swfb.extent, &scene, my_descriptor_set); + + { + VkSemaphore waiting_for_semaphores[2] = { + swfb.image_available_semaphore, swfb.in_frame_transfer_complete + }; + VkPipelineStageFlags waiting_stages[2] = { + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT + }; + assert(ARRAY_SIZE(waiting_for_semaphores) == ARRAY_SIZE(waiting_stages)); + // VkCommandBuffer command_buffers[1] = {*VecVkCommandBuffer_cat(&rendering_command_buffers, ij)}; + VkCommandBuffer command_buffers[1] = {rendering_command_buffer}; + VkSemaphore signaling_semaphores[1] = { swfb.render_finished_semaphore }; + + VkSubmitInfo cmd_submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + + // We wait for `waiting_for_semaphores` before THESE stages + // waitSemaphoreCount specifies size for both pWaitSemaphores and pWaitDstStageMask + .waitSemaphoreCount = ARRAY_SIZE(waiting_for_semaphores), + .pWaitSemaphores = waiting_for_semaphores, + .pWaitDstStageMask = waiting_stages, + + .commandBufferCount = ARRAY_SIZE(command_buffers), + .pCommandBuffers = command_buffers, + + .signalSemaphoreCount = ARRAY_SIZE(signaling_semaphores), + .pSignalSemaphores = signaling_semaphores, + }; + if (vkQueueSubmit(graphics_queue, 1, &cmd_submit_info, swfb.in_flight_fence) != VK_SUCCESS) + abortf("vkQueueSubmit"); + } + + { + VkSemaphore waiting_for_semaphores[] = { swfb.render_finished_semaphore }; + VkSwapchainKHR swapchains[] = { swfb.swapchain }; + uint32_t image_indices[] = { ij }; + assert( ARRAY_SIZE(swapchains) == ARRAY_SIZE(image_indices) ); + + VkPresentInfoKHR present_info = { + .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + .waitSemaphoreCount = ARRAY_SIZE(waiting_for_semaphores), + .pWaitSemaphores = waiting_for_semaphores, + + .swapchainCount = ARRAY_SIZE(swapchains), + .pSwapchains = swapchains, + .pImageIndices = image_indices, + .pResults = NULL, + }; + + VkResult pres_ret = vkQueuePresentKHR(presentation_queue, &present_info); + if (pres_ret == VK_ERROR_OUT_OF_DATE_KHR) { + fprintf(stderr, "vkQueuePresentKHR: VK_ERROR_OUT_OF_DATE_KHR\n"); + recreate_swapchain(physical_device, device, queue_fam, surface, render_pass, &swfb); + continue; + } else if (pres_ret == VK_SUBOPTIMAL_KHR) { + fprintf(stderr, "vkQueuePresentKHR: VK_SUBOPTIMAL_KHR\n"); + recreate_swapchain(physical_device, device, queue_fam, surface, render_pass, &swfb); + continue; + } else if (pres_ret != VK_SUCCESS) { + abortf("vkQueuePresentKHR"); + } + } + margaret_ns_time frame_B0 = margaret_clock_gettime_monotonic_raw(); + if (margaret_ns_time_sec_diff(frame_A0, frame_B0) > 0.3) { + fprintf(stderr, "]]] Profiling frame scheduling:\n" + "total: %.6lf\n" + "]]]", + margaret_ns_time_sec_diff(frame_A0, frame_B0)); + } + frame_count_since_key++; + if (margaret_ns_time_sec_diff(prev_key_frame_time, frame_B0) > 1.0) { + float fps = (float)frame_count_since_key / margaret_ns_time_sec_diff(prev_key_frame_time, frame_B0); + printf("FPS: %0.1lf\n", fps); + frame_count_since_key = 0; + prev_key_frame_time = frame_B0; + } + } + vkDeviceWaitIdle(device); + // The End + // dropping scene + VecOA_ObjectOnScene_drop(scene.oa_objects); + // destroying vulkan objects + vkDestroyDescriptorPool(device, descriptor_pool, NULL); + for (size_t i = 0; i < ARRAY_SIZE(device_mem_buffers); i++) + vkDestroyBuffer(device, device_mem_buffers[i].buffer, NULL); + vkDestroyBuffer(device, host_mem_buffer.buffer, NULL); + vkFreeMemory(device, device_mem, NULL); + vkUnmapMemory(device, host_mem); + vkFreeMemory(device, host_mem, NULL); + vkDestroyCommandPool(device, command_pool, NULL); + MargaretSwapchainBundle_drop_with_device(device, swfb); + destroy_graphics_pipeline_hands(device, pipeline_hands); + vkDestroyRenderPass(device, render_pass, NULL); + vkDestroyDevice(device, NULL); + vkDestroySurfaceKHR(instance, surface, NULL); + MargaretInstanceAndItsDebug_drop(inst_hands); + MargaretSingleWindowSetup_drop(x); +} diff --git a/src/l2/tests/test_shader_compile.sh b/src/l2/tests/test_shader_compile.sh new file mode 100755 index 0000000..a8ef12a --- /dev/null +++ b/src/l2/tests/test_shader_compile.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +cd test_shaders +glslc -o spv/0/vert.spv glsl/0/0.vert +glslc -o spv/0/frag.spv glsl/0/0.frag diff --git a/src/l2/tests/test_shaders/glsl/0/0.frag b/src/l2/tests/test_shaders/glsl/0/0.frag new file mode 100644 index 0000000..d80ed74 --- /dev/null +++ b/src/l2/tests/test_shaders/glsl/0/0.frag @@ -0,0 +1,13 @@ +#version 450 + +layout(location = 0) in vec3 fsin_color; + +layout(location = 0) out vec4 fin_color; + +layout(binding = 0) uniform my_ubo { + vec3 s; // 0 + 12 + 4 +}; + +void main() { + fin_color = vec4(fsin_color * s, 1.0); +} diff --git a/src/l2/tests/test_shaders/glsl/0/0.vert b/src/l2/tests/test_shaders/glsl/0/0.vert new file mode 100644 index 0000000..c14220b --- /dev/null +++ b/src/l2/tests/test_shaders/glsl/0/0.vert @@ -0,0 +1,23 @@ +#version 450 + +layout(location = 0) in vec3 pos; +layout(location = 1) in vec3 color; + +layout(location = 0) out vec3 vsout_color; + +vec2 positions[3] = vec2[]( + vec2(0.0, -0.5), + vec2(0.5, 0.5), + vec2(-0.5, 0.5) +); + +vec3 colors[3] = vec3[]( + vec3(1.0, 0.0, 0.0), + vec3(0.0, 1.0, 0.0), + vec3(0.0, 0.0, 1.0) +); + +void main() { + gl_Position = vec4(pos, 1.0); + vsout_color = color; +}