#ifndef _VULKAN_BUFFER_H #define _VULKAN_BUFFER_H /* * This class is intended to be used with Storage Buffers and Uniform Buffers. */ template class VulkanBuffer { public: // TODO: Make these private (maybe make a getter for numObjects) // Externally, they are only used in resizeBufferSet size_t capacity; // temp field to help with ubo+ssbo resizing until they are added to this class // See if I need a separate field for this or if I can use other fields to check for this // Maybe compare uniform or storage buffer size to the size of the memory allocated here bool resized; VulkanBuffer(); VulkanBuffer(size_t capacity, size_t range, size_t minOffsetAlignment); VulkanBuffer(const VulkanBuffer&) = delete; VulkanBuffer(VulkanBuffer&& other); ~VulkanBuffer(); VulkanBuffer& operator=(const VulkanBuffer&) = delete; VulkanBuffer& operator=(VulkanBuffer&& other) noexcept; size_t memorySize(); T* data(); // Not sure I need to expose this T& get(uint32_t index); void add(T obj); void* mapped(size_t idx); void map(vector& deviceMemory, VkDevice device); void unmap(vector& deviceMemory, VkDevice device); void resize(); private: size_t alignment; size_t range; //size_t capacity; size_t numObjects; T* rawData; vector mappedData; size_t memRequirement(size_t capacity); size_t memOffset(uint32_t index); }; // Currently, for SSBOs, I store the per-object values (usually just the model matrix), on each object, so they // are not in their own array and therefore cannot be pushed to the GPU as one block. The updates must happen // separately per object. // Since Sascha WIllems' dynamic UBO example works the same way (iirc), I can implement dynamic UBOs like that as well // for now. Would be nice to plan for potentially storing the ubo data on the CPU in a contiguous block in the future, // assuming that would make updates easier. Keep in mind that this only makes sense if all or most of the objects // in the ubo get updated every frame. // ============================= TODO: Also, check when it makes sense to have a staging buffer for copying data to the GPU // and see if I actually need to use it everywhere I currently am. I think this is mentioned in Sascha WIllems dubo example // or some other Vulkan website I recently bookmarked template VulkanBuffer::VulkanBuffer() : alignment(0) , range(0) , capacity(0) , numObjects(0) , resized(false) , rawData(nullptr) , mappedData() { } template VulkanBuffer::VulkanBuffer(size_t capacity, size_t range, size_t minOffsetAlignment) : alignment(range) , range(range / sizeof(T)) , capacity(capacity) , numObjects(0) , resized(false) , rawData(nullptr) , mappedData() { if (minOffsetAlignment > 0) { alignment = (alignment + minOffsetAlignment - 1) & ~(minOffsetAlignment - 1); } rawData = (T*)malloc(memRequirement(capacity)); } template VulkanBuffer::VulkanBuffer(VulkanBuffer&& other) { // TODO: Implement } template VulkanBuffer::~VulkanBuffer() { if (rawData != nullptr) { free(rawData); } } template VulkanBuffer& VulkanBuffer::operator=(VulkanBuffer&& other) noexcept { if (this != &other) { capacity = other.capacity; numObjects = other.numObjects; resized = other.resized; alignment = other.alignment; range = other.range; mappedData = other.mappedData; if (rawData != nullptr) { free(rawData); } rawData = other.rawData; other.capacity = 0; other.numObjects = 0; other.range = 0; other.mappedData.clear(); other.rawData = nullptr; } return *this; } template size_t VulkanBuffer::memorySize() { return memRequirement(capacity); } template T* VulkanBuffer::data() { return rawData; } template T& VulkanBuffer::get(uint32_t index) { // TODO: Check that index < numObjects T* obj = (T*)((size_t)rawData + memOffset(index)); return *obj; } template void VulkanBuffer::add(T obj) { // TODO: Maybe copy this to the resize() function and call that function here if (numObjects == capacity) { // Once I add Vulkan buffer objects in here, make sure this doesn't overlap with resizeBufferSet resized = true; size_t oldMemReq = memRequirement(capacity); capacity *= 2; size_t newMemReq = memRequirement(capacity); T* newData = (T*)malloc(newMemReq); // TODO: Check for failure memcpy(newData, rawData, oldMemReq); free(rawData); rawData = newData; } T* ptr = (T*)((size_t)rawData + memOffset(numObjects)); *ptr = obj; numObjects++; } template void* VulkanBuffer::mapped(size_t idx) { return mappedData[idx]; } template void VulkanBuffer::map(vector& deviceMemory, VkDevice device) { // TODO: Make sure that mappedData initally has size 0. If not, it means the memory is already mapped // and I should return some kind of error or warning mappedData.resize(deviceMemory.size()); for (size_t i = 0; i < deviceMemory.size(); i++) { vkMapMemory(device, deviceMemory[i], 0, memorySize(), 0, &mappedData[i]); } } template void VulkanBuffer::unmap(vector& deviceMemory, VkDevice device) { for (size_t i = 0; i < deviceMemory.size(); i++) { vkUnmapMemory(device, deviceMemory[i]); } mappedData.clear(); } template void VulkanBuffer::resize() { resized = false; } template size_t VulkanBuffer::memRequirement(size_t capacity) { return (capacity / range) * alignment + (capacity % range) * sizeof(T); } template size_t VulkanBuffer::memOffset(uint32_t index) { return (index / range) * alignment + (index % range) * sizeof(T); } #endif // _VULKAN_BUFFER_H