Saving progress. Rewrote rb_tree_set_map_template_inst.h ENTIRELY just to have Map<K, V> support. Nothing works right now
This commit is contained in:
parent
49ee178eb6
commit
e68a16d8fc
@ -27,12 +27,15 @@ void generate_margaret_eve_for_vulkan_utils() {
|
||||
|
||||
/* For l2/margaret/vulkan_memory_claire.h */
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBufferKindDescription"), false, true);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretOldBufferResizeRecord"), true, false);
|
||||
generate_Option_templ_inst_eve_header(l, ns, (option_template_instantiation_op){
|
||||
/* We won't need to clone this type, like, at all... It is actually clonable, but we just made
|
||||
* it non-clonable */
|
||||
.T = cstr("BuffRBTreeByLenRespAlign_SetMargaretFreeMemSegment")
|
||||
});
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorOneBlock"), true, false);
|
||||
generate_util_templ_inst_eve_header(l, ns, (util_templates_instantiation_options){
|
||||
.T = cstr("MargaretMemAllocatorOneBlock"), .vec = true, .vec_extended = true,
|
||||
});
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretMemAllocatorOneMemType"), true, false);
|
||||
generate_eve_span_company_for_primitive(l, ns, cstr("MargaretBufferKindInfo"), true, false);
|
||||
|
||||
|
||||
@ -6,7 +6,6 @@
|
||||
/* We assume that T is trivially movable */
|
||||
typedef struct {
|
||||
SpanU8 T;
|
||||
bool t_ptr;
|
||||
bool t_integer;
|
||||
bool t_primitive;
|
||||
bool t_clonable;
|
||||
@ -16,20 +15,22 @@ typedef struct {
|
||||
|
||||
/* GT. You probably want it to be a pointer or an integer parameter.
|
||||
* Leave empty if you don't need guest data (GT = void)
|
||||
* GT must be primitive */
|
||||
* GT must be primitive, or, even better, be integer */
|
||||
SpanU8 guest_data_T;
|
||||
|
||||
/* If `unconditional_equality` is set, methods, that are needed to return value T
|
||||
* or reference to T are not generated.
|
||||
* I, alas, wrote support for `unconditional_equality = false` but it should be noted, that
|
||||
* `unconditional_equality = false` it is absolutely useless */
|
||||
bool unconditional_equality;
|
||||
} set_instantiation_op;
|
||||
|
||||
void set_instantiation_op_fix(set_instantiation_op* self){
|
||||
if (self->t_ptr)
|
||||
self->t_integer = true;
|
||||
if (self->t_integer)
|
||||
self->t_primitive = true;
|
||||
if (self->t_primitive)
|
||||
self->t_clonable = true;
|
||||
assert(self->T.len > 0);
|
||||
assert(!self->t_integer || self->alternative_equal.len == 0);
|
||||
assert(!self->t_integer || self->alternative_less.len == 0);
|
||||
assert((self->alternative_less.len == 0 && self->alternative_equal.len == 0
|
||||
&& self->alternative_comp_set_name_embed.len == 0
|
||||
)||(
|
||||
@ -39,6 +40,53 @@ void set_instantiation_op_fix(set_instantiation_op* self){
|
||||
assert(self->alternative_comp_set_name_embed.len > 0);
|
||||
assert(self->alternative_equal.len > 0 && self->alternative_less.len > 0);
|
||||
}
|
||||
if (self->t_integer && self->alternative_comp_set_name_embed.len == 0)
|
||||
self->unconditional_equality = true;
|
||||
}
|
||||
|
||||
/* We assume K and V are trivially movable */
|
||||
typedef struct {
|
||||
SpanU8 K;
|
||||
bool k_integer;
|
||||
bool k_primitive;
|
||||
bool k_clonable;
|
||||
SpanU8 V;
|
||||
bool v_integer;
|
||||
bool v_primitive;
|
||||
bool v_clonable;
|
||||
|
||||
SpanU8 alternative_equal;
|
||||
SpanU8 alternative_less;
|
||||
SpanU8 alternative_comp_map_name_embed;
|
||||
|
||||
SpanU8 guest_data_T;
|
||||
|
||||
/* If `unconditional_equality` is set, methods, that are needed to return value (K, V)
|
||||
* or reference to K and V are generated such that they return only V part. I don't plan to support
|
||||
* `unconditional_equality=false`
|
||||
*/
|
||||
bool unconditional_equality;
|
||||
} map_instantiation_op;
|
||||
|
||||
void map_instantiation_op_fix(map_instantiation_op* self){
|
||||
if (self->k_integer)
|
||||
self->k_primitive = true;
|
||||
if (self->k_primitive)
|
||||
self->k_clonable = true;
|
||||
assert(self->K.len > 0 && self->V.len > 0);
|
||||
assert((self->alternative_less.len == 0 && self->alternative_equal.len == 0
|
||||
&& self->alternative_comp_map_name_embed.len == 0
|
||||
)||(
|
||||
self->alternative_comp_map_name_embed.len != 0 &&
|
||||
(self->alternative_less.len != 0 || self->alternative_equal.len != 0)));
|
||||
if (self->guest_data_T.len > 0) {
|
||||
assert(self->alternative_comp_map_name_embed.len > 0);
|
||||
assert(self->alternative_equal.len > 0 && self->alternative_less.len > 0);
|
||||
}
|
||||
if (self->k_integer && self->alternative_comp_map_name_embed.len == 0)
|
||||
self->unconditional_equality = true;
|
||||
if (!self->unconditional_equality)
|
||||
abortf("map_instantiation_op_fix::unconditional_equality = false isn't supported\n");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -254,19 +254,31 @@ typedef struct {
|
||||
typedef struct MargaretMemAllocator MargaretMemAllocator;
|
||||
|
||||
MargaretMemAllocator MargaretMemAllocator_new(
|
||||
VkDevice device, VkPhysicalDevice physical_device, SpanMargaretBufferKindDescription buffer_types, double alpha);
|
||||
VkDevice device, VkPhysicalDevice physical_device, SpanMargaretBufferKindDescription buffer_types);
|
||||
|
||||
/* Vibe check */
|
||||
bool MargaretMemAllocator_request_needs_silence(MargaretMemAllocator* self, MargaretMemAllocatorRequest req);
|
||||
|
||||
/* Demands + Warnings */
|
||||
typedef struct {
|
||||
/* If for some memory types we will do defragmentation, MargaretMemAllocator warns us that for
|
||||
* these memory types position-structures, that it filled, will be updated. If these values
|
||||
* (buffer/image handlers + sub-buffer positions) are dependencies of other
|
||||
* objects, these objects need to be updated (or rebuilt) */
|
||||
U32 defragmented_mem_types;
|
||||
/* If for some set of requests MargaretMemAllocator needs to execute some Vulkan copying commands,
|
||||
* it will demand you to actually execute the command buffer that you gave it. If this is `true` it does
|
||||
* not necessarily mean that defragmentation is happening right now, no, defragmentation is indicated by
|
||||
* `defragmented_mem_types` warning field, but if you are doing a DIRECT BUFFER (sub-buffer in terms of
|
||||
* vulkan) RESIZE, this sub-buffer may be copied.
|
||||
* It won't affect other data structures in your memory,
|
||||
* of course, (still, notice that position of your sub-buffer will be updated).
|
||||
*/
|
||||
bool need_command_buffer;
|
||||
} MargaretMemAllocatorDemands;
|
||||
|
||||
/* Appends copying commands into cmd_buff. It may append none. Defragmentation, device memory relocation
|
||||
* need copying commands, but buffer resize may also require copying.
|
||||
* If silence is needed, silence flag should be set, otherwise method aborts. You can use
|
||||
* _request_needs_silence method to check if silence is needed, but if you know for sure that you already have
|
||||
* silence anyway, you can pass `silence=true`.
|
||||
* Returned value: true if some_commands were appended to cmd_buff and need to be executed before any further
|
||||
* actions with memory managed by Self would make any sense */
|
||||
bool MargaretMemAllocator_carry_out_request(MargaretMemAllocator* self, VkCommandBuffer cmd_buff, bool silence);
|
||||
* need copying commands, but buffer resize may also require copying */
|
||||
MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemAllocator* self, VkCommandBuffer cmd_buff, MargaretMemAllocatorRequest request);
|
||||
|
||||
void MargaretMemAllocator_wipe_old(MargaretMemAllocator* self);
|
||||
|
||||
@ -371,6 +383,11 @@ typedef struct {
|
||||
void* mapped_memory;
|
||||
} MargaretMemAllocatorOneBlock;
|
||||
|
||||
void MargaretMemAllocatorOneBlock_drop(MargaretMemAllocatorOneBlock self){
|
||||
BuffRBTree_SetMargaretMemoryOccupation_drop(self.occupied_memory);
|
||||
BuffRBTree_SetMargaretBufferOccupationSubBuffer_drop(self.occupied_buffers);
|
||||
}
|
||||
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretMemAllocatorOneBlock.h"
|
||||
|
||||
/* Used to enumerate both free memory segments in VkDeviceMemory
|
||||
@ -432,8 +449,30 @@ typedef const MargaretFreeMemSegment* RefMargaretFreeMemSegment;
|
||||
#include "../../../gen/l1_5/eve/margaret/BuffRBTreeByLenRespAlign_SetMargaretFreeMemSegment.h"
|
||||
#include "../../../gen/l1/eve/margaret/OptionBuffRBTreeByLenRespAlign_SetMargaretFreeMemSegment.h"
|
||||
|
||||
typedef struct{
|
||||
U32 old_mem_block_id;
|
||||
U64 old_start;
|
||||
U64 old_len;
|
||||
U32 new_mem_block_id;
|
||||
U64 new_start;
|
||||
U64 new_len;
|
||||
} MargaretOldBufferResizeRecord;
|
||||
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretOldBufferResizeRecord.h"
|
||||
|
||||
typedef struct {
|
||||
VecMargaretMemAllocatorOneBlock blocks;
|
||||
/* old_blocks is usually empty. BUT! When you generated a defragmentation command buffer with
|
||||
* MargaretMemAllocator_carry_out_request, this vector will be filled with old blocks, while
|
||||
* `blocks` vector will be filled with newly created blocks.
|
||||
* All references in `answer-structures` will be immediately modified, so that they would point
|
||||
* to the right block. After you execute the command buffer,
|
||||
* that MargaretMemAllocator_carry_out_request generates, you can (and should) wipe out old blocks
|
||||
*/
|
||||
VecMargaretMemAllocatorOneBlock old_blocks;
|
||||
/* If your previous set of requests did not cause defragmentation, it could cause relocation of some data
|
||||
* in a subbuffer that you wanted to resize */
|
||||
VecMargaretOldBufferResizeRecord old_buff_resize_record;
|
||||
OptionBuffRBTreeByLenRespAlign_SetMargaretFreeMemSegment free_space_in_memory[MARGARET_ALLOC_LIMIT_ALIGNMENT_EXP];
|
||||
VkMemoryPropertyFlags mem_properties;
|
||||
} MargaretMemAllocatorOneMemType;
|
||||
@ -459,23 +498,23 @@ typedef struct {
|
||||
|
||||
#include "../../../gen/l1/eve/margaret/VecMargaretBufferKindInfo.h"
|
||||
|
||||
#define MARGARET_ALLOC_MAX_ALLOWED_BUFFER_JUTTING 255
|
||||
|
||||
/* VkDevice and VkPhysicalDevice stay remembered here. Don't forget that, please */
|
||||
struct MargaretMemAllocator {
|
||||
VecMargaretMemAllocatorOneMemType mem_types;
|
||||
VecMargaretBufferKindInfo buffer_types;
|
||||
double alpha;
|
||||
VkDevice device;
|
||||
VkPhysicalDevice physical_device;
|
||||
};
|
||||
|
||||
MargaretMemAllocator MargaretMemAllocator_new(
|
||||
VkDevice device, VkPhysicalDevice physical_device, SpanMargaretBufferKindDescription buffer_types, double alpha
|
||||
){
|
||||
VkDevice device, VkPhysicalDevice physical_device, SpanMargaretBufferKindDescription buffer_types, ){
|
||||
VkPhysicalDeviceMemoryProperties phd_props;
|
||||
vkGetPhysicalDeviceMemoryProperties(physical_device, &phd_props);
|
||||
assert(phd_props.memoryTypeCount < VK_MAX_MEMORY_TYPES);
|
||||
MargaretMemAllocator self = {.buffer_types = VecMargaretBufferKindInfo_new_zeroinit(buffer_types.len),
|
||||
.alpha = alpha, .device = device, .physical_device = physical_device,
|
||||
.device = device, .physical_device = physical_device,
|
||||
.mem_types = VecMargaretMemAllocatorOneMemType_new_zeroinit(phd_props.memoryTypeCount)};
|
||||
|
||||
for (size_t i = 0; i < buffer_types.len; i++) {
|
||||
@ -525,25 +564,60 @@ MargaretMemAllocator MargaretMemAllocator_new(
|
||||
return self;
|
||||
}
|
||||
|
||||
bool MargaretMemAllocator_request_needs_silence(MargaretMemAllocator* self, MargaretMemAllocatorRequest req){
|
||||
return false;
|
||||
// todo
|
||||
}
|
||||
|
||||
bool MargaretMemAllocator_carry_out_request(MargaretMemAllocator* self, VkCommandBuffer cmd_buff, bool silence){
|
||||
return false;
|
||||
// todo: add OLD flag to mem allocator and like uh, fuck this shit man bruh lol deng
|
||||
// todo:
|
||||
MargaretMemAllocatorDemands MargaretMemAllocator_carry_out_request(
|
||||
MargaretMemAllocator* self, VkCommandBuffer cmd_buff, MargaretMemAllocatorRequest request
|
||||
){
|
||||
MargaretMemAllocator_wipe_old(self);
|
||||
for (U8 mi = 0; mi < (U8)self->mem_types.len; mi++) {
|
||||
MargaretMemAllocatorOneMemType* x = &self->mem_types.buf[mi];
|
||||
// for (U64 i = 0; i < request->)
|
||||
}
|
||||
return (MargaretMemAllocatorDemands){.defragmented_mem_types = 0, .need_command_buffer = false};
|
||||
}
|
||||
|
||||
void MargaretMemAllocator_wipe_old(MargaretMemAllocator* self){
|
||||
// todo
|
||||
for (U64 mi = 0; mi < self->mem_types.len; mi++) {
|
||||
MargaretMemAllocatorOneMemType* m = &self->mem_types.buf[mi];
|
||||
assert(!m->old_blocks.len || !m->old_buff_resize_record.len);
|
||||
while (m->old_blocks.len > 0) {
|
||||
MargaretMemAllocatorOneBlock block = VecMargaretMemAllocatorOneBlock_pop(&m->old_blocks);
|
||||
assert(((m->mem_properties & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) ==
|
||||
(block.mapped_memory != NULL));
|
||||
if (block.mapped_memory)
|
||||
vkUnmapMemory(self->device, block.mapped_memory);
|
||||
{ /* destroying images and buffers from this block. Binary tree detour takes O(n) time */
|
||||
U64 set_it = BuffRBTree_SetMargaretMemoryOccupation_find_min(&block.occupied_memory);
|
||||
while (set_it > 0) {
|
||||
assert(set_it < block.occupied_memory.tree.len && set_it > 0);
|
||||
const MargaretMemoryOccupation* occ = &block.occupied_memory.el.buf[set_it - 1];
|
||||
if (occ->variant == MargaretMemoryOccupation_Buffer) {
|
||||
const MargaretMemoryOccupationBuffer* wb = &occ->buf;
|
||||
vkDestroyBuffer(self->device, wb->buffer, NULL);
|
||||
} else if (occ->variant == MargaretMemoryOccupation_Image) {
|
||||
const MargaretMemoryOccupationImage* wi = &occ->img;
|
||||
vkDestroyImage(self->device, wi->image, NULL);
|
||||
}
|
||||
set_it = BuffRBTree_SetMargaretMemoryOccupation_find_next(&block.occupied_memory, set_it);
|
||||
}
|
||||
}
|
||||
vkFreeMemory(self->device, block.mem_hand, NULL);
|
||||
}
|
||||
/* MargaretOldBufferResizeRecord is a primitive datatype */
|
||||
for (U64 ri = 0; ri < m->old_buff_resize_record.len; ri++) {
|
||||
const MargaretOldBufferResizeRecord* resize = &m->old_buff_resize_record.buf[ri];
|
||||
if (resize->old_mem_block_id != resize->new_mem_block_id || resize->old_start != resize->new_start) {
|
||||
// OptionMargaretBufferOccupationSubBuffer delete_me = BuffRBTree_SetMargaretBufferOccupationSubBuffer_pop(&)
|
||||
// todo: AAAAAAAAAAAAA rewrite it all using maps
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
char* MargaretMemAllocator_get_host_visible_buffer_ptr(
|
||||
const MargaretMemAllocator* self, const MargaretMemAllocatorBufferPosition* pos){
|
||||
check(pos->memory_type_id < VK_MAX_MEMORY_TYPES);
|
||||
const MargaretMemAllocatorOneMemType* memtype = &self->mem_types.buf[pos->memory_type_id];
|
||||
assert(memtype->old_blocks.len == 0);
|
||||
check((memtype->mem_properties & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
|
||||
check(pos->memory_allocation_id < memtype->blocks.len);
|
||||
const MargaretMemAllocatorOneBlock* bl = &memtype->blocks.buf[pos->memory_allocation_id];
|
||||
|
||||
@ -169,4 +169,23 @@ void vkDestroyImage(
|
||||
VkImage image,
|
||||
const VkAllocationCallbacks* pAllocator);
|
||||
|
||||
typedef int VkMemoryMapFlags;
|
||||
|
||||
VkResult vkMapMemory(
|
||||
VkDevice device,
|
||||
VkDeviceMemory memory,
|
||||
VkDeviceSize offset,
|
||||
VkDeviceSize size,
|
||||
VkMemoryMapFlags flags,
|
||||
void** ppData);
|
||||
|
||||
void vkUnmapMemory(
|
||||
VkDevice device,
|
||||
VkDeviceMemory memory);
|
||||
|
||||
void vkFreeMemory(
|
||||
VkDevice device,
|
||||
VkDeviceMemory memory,
|
||||
const VkAllocationCallbacks* pAllocator);
|
||||
|
||||
#include "../../margaret/vulkan_memory_claire.h"
|
||||
Loading…
x
Reference in New Issue
Block a user