#ifndef _VULKAN_BUFFER_H #define _VULKAN_BUFFER_H #include #include using namespace std; /* * This class is intended to be used with Storage Buffers and Uniform Buffers. */ template class VulkanBuffer { public: size_t alignment; size_t capacity; size_t numObjects; VulkanBuffer(); VulkanBuffer(size_t capacity, size_t minOffsetAlignment); VulkanBuffer(vector* vData, size_t capacity); ~VulkanBuffer(); VulkanBuffer& operator=(const VulkanBuffer& other); T* data(); void* mappedData(); // TODO: Maybe rename this to just mapped() // TODO: Add a resize function private: T* srcData; // TODO: Rename this to something else probably and rename rawData to data vector* vData; // Remember that this is a pointer to the mapped video memory // Maybe rename it to mappedData or something to make that clearer void* rawData; }; // Currently, for SSBOs, I store the per-object values (usually just the model matrix), on each object, so they // are not in their own array and therefore cannot be pushed to the GPU as one block. The updates must happen // separately per object. // Since Sascha WIllems' dynamic UBO example works the same way (iirc), I can implement dynamic UBOs like that as well // for now. Would be nice to plan for potentially storing the ubo data on the CPU in a contiguous block in the future, // assuming that would make updates easier. Keep in mind that this only makes sense if all or most of the objects // in the ubo get updated every frame. // ============================= TODO: Also, check when it makes sense to have a staging buffer for copying data to the GPU // and see if I actually need to use it everywhere I currently am. I think this is mentioned in Sascha WIllems dubo example // or some other Vulkan website I recently bookmarked template VulkanBuffer::VulkanBuffer() : alignment(0) , capacity(0) , numObjects(0) , srcData(nullptr) , rawData(nullptr) , vData(nullptr) { } template VulkanBuffer::VulkanBuffer(size_t capacity, size_t minOffsetAlignment) : alignment(sizeof(T)) , capacity(capacity) , numObjects(0) , srcData(nullptr) , rawData(nullptr) , vData(nullptr) { if (minOffsetAlignment > 0) { alignment = (alignment + minOffsetAlignment - 1) & ~(minOffsetAlignment - 1); } srcData = (T*)malloc(capacity * alignment); } template VulkanBuffer::VulkanBuffer(vector* vData, size_t capacity) : alignment(sizeof(T)) , capacity(capacity) , numObjects(0) , srcData(nullptr) , rawData(nullptr) , vData(vData) { // TODO: Allocate initial capacity in vector } template VulkanBuffer::~VulkanBuffer() { if (srcData != nullptr) { free(srcData); } } template VulkanBuffer& VulkanBuffer::operator=(const VulkanBuffer& other) { if (this == &other) { return *this; } /* // assume *this manages a reusable resource, such as a heap-allocated buffer mArray if (size != other.size) { // resource in *this cannot be reused delete[] mArray; // release resource in *this mArray = nullptr; size = 0; // preserve invariants in case next line throws mArray = new int[other.size]; // allocate resource in *this size = other.size; } */ if (srcData != nullptr) { free(srcData); srcData = nullptr; } alignment = other.alignment; capacity = other.capacity; srcData = (T*)malloc(capacity * alignment); // TODO: Check for failure memcpy(srcData, other.srcData, capacity * alignment); return *this; } template T* VulkanBuffer::data() { if (srcData != nullptr) { return srcData; } else { return vData->data(); } } template void* VulkanBuffer::mappedData() { return rawData; } #endif // _VULKAN_BUFFER_H