Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1074 #include <vulkan/vulkan.h>
1075 
1085 VK_DEFINE_HANDLE(VmaAllocator)
1086 
1087 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1089  VmaAllocator allocator,
1090  uint32_t memoryType,
1091  VkDeviceMemory memory,
1092  VkDeviceSize size);
1094 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1095  VmaAllocator allocator,
1096  uint32_t memoryType,
1097  VkDeviceMemory memory,
1098  VkDeviceSize size);
1099 
1113 
1143 
1146 typedef VkFlags VmaAllocatorCreateFlags;
1147 
1152 typedef struct VmaVulkanFunctions {
1153  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1154  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1155  PFN_vkAllocateMemory vkAllocateMemory;
1156  PFN_vkFreeMemory vkFreeMemory;
1157  PFN_vkMapMemory vkMapMemory;
1158  PFN_vkUnmapMemory vkUnmapMemory;
1159  PFN_vkBindBufferMemory vkBindBufferMemory;
1160  PFN_vkBindImageMemory vkBindImageMemory;
1161  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1162  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1163  PFN_vkCreateBuffer vkCreateBuffer;
1164  PFN_vkDestroyBuffer vkDestroyBuffer;
1165  PFN_vkCreateImage vkCreateImage;
1166  PFN_vkDestroyImage vkDestroyImage;
1167  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1168  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1170 
1173 {
1175  VmaAllocatorCreateFlags flags;
1177 
1178  VkPhysicalDevice physicalDevice;
1180 
1181  VkDevice device;
1183 
1186 
1187  const VkAllocationCallbacks* pAllocationCallbacks;
1189 
1228  const VkDeviceSize* pHeapSizeLimit;
1242 
1244 VkResult vmaCreateAllocator(
1245  const VmaAllocatorCreateInfo* pCreateInfo,
1246  VmaAllocator* pAllocator);
1247 
1249 void vmaDestroyAllocator(
1250  VmaAllocator allocator);
1251 
1257  VmaAllocator allocator,
1258  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1259 
1265  VmaAllocator allocator,
1266  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1267 
1275  VmaAllocator allocator,
1276  uint32_t memoryTypeIndex,
1277  VkMemoryPropertyFlags* pFlags);
1278 
1288  VmaAllocator allocator,
1289  uint32_t frameIndex);
1290 
1293 typedef struct VmaStatInfo
1294 {
1296  uint32_t blockCount;
1302  VkDeviceSize usedBytes;
1304  VkDeviceSize unusedBytes;
1305  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
1306  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
1307 } VmaStatInfo;
1308 
1310 typedef struct VmaStats
1311 {
1312  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1313  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1315 } VmaStats;
1316 
1318 void vmaCalculateStats(
1319  VmaAllocator allocator,
1320  VmaStats* pStats);
1321 
1322 #define VMA_STATS_STRING_ENABLED 1
1323 
1324 #if VMA_STATS_STRING_ENABLED
1325 
1327 
1329 void vmaBuildStatsString(
1330  VmaAllocator allocator,
1331  char** ppStatsString,
1332  VkBool32 detailedMap);
1333 
1334 void vmaFreeStatsString(
1335  VmaAllocator allocator,
1336  char* pStatsString);
1337 
1338 #endif // #if VMA_STATS_STRING_ENABLED
1339 
1348 VK_DEFINE_HANDLE(VmaPool)
1349 
1350 typedef enum VmaMemoryUsage
1351 {
1400 } VmaMemoryUsage;
1401 
1416 
1466 
1470 
1472 {
1474  VmaAllocationCreateFlags flags;
1485  VkMemoryPropertyFlags requiredFlags;
1490  VkMemoryPropertyFlags preferredFlags;
1498  uint32_t memoryTypeBits;
1511  void* pUserData;
1513 
1530 VkResult vmaFindMemoryTypeIndex(
1531  VmaAllocator allocator,
1532  uint32_t memoryTypeBits,
1533  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1534  uint32_t* pMemoryTypeIndex);
1535 
1549  VmaAllocator allocator,
1550  const VkBufferCreateInfo* pBufferCreateInfo,
1551  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1552  uint32_t* pMemoryTypeIndex);
1553 
1567  VmaAllocator allocator,
1568  const VkImageCreateInfo* pImageCreateInfo,
1569  const VmaAllocationCreateInfo* pAllocationCreateInfo,
1570  uint32_t* pMemoryTypeIndex);
1571 
1592 
1595 typedef VkFlags VmaPoolCreateFlags;
1596 
1599 typedef struct VmaPoolCreateInfo {
1605  VmaPoolCreateFlags flags;
1610  VkDeviceSize blockSize;
1639 
1642 typedef struct VmaPoolStats {
1645  VkDeviceSize size;
1648  VkDeviceSize unusedSize;
1661  VkDeviceSize unusedRangeSizeMax;
1662 } VmaPoolStats;
1663 
1670 VkResult vmaCreatePool(
1671  VmaAllocator allocator,
1672  const VmaPoolCreateInfo* pCreateInfo,
1673  VmaPool* pPool);
1674 
1677 void vmaDestroyPool(
1678  VmaAllocator allocator,
1679  VmaPool pool);
1680 
1687 void vmaGetPoolStats(
1688  VmaAllocator allocator,
1689  VmaPool pool,
1690  VmaPoolStats* pPoolStats);
1691 
1699  VmaAllocator allocator,
1700  VmaPool pool,
1701  size_t* pLostAllocationCount);
1702 
1727 VK_DEFINE_HANDLE(VmaAllocation)
1728 
1729 
1731 typedef struct VmaAllocationInfo {
1736  uint32_t memoryType;
1745  VkDeviceMemory deviceMemory;
1750  VkDeviceSize offset;
1755  VkDeviceSize size;
1769  void* pUserData;
1771 
1782 VkResult vmaAllocateMemory(
1783  VmaAllocator allocator,
1784  const VkMemoryRequirements* pVkMemoryRequirements,
1785  const VmaAllocationCreateInfo* pCreateInfo,
1786  VmaAllocation* pAllocation,
1787  VmaAllocationInfo* pAllocationInfo);
1788 
1796  VmaAllocator allocator,
1797  VkBuffer buffer,
1798  const VmaAllocationCreateInfo* pCreateInfo,
1799  VmaAllocation* pAllocation,
1800  VmaAllocationInfo* pAllocationInfo);
1801 
1803 VkResult vmaAllocateMemoryForImage(
1804  VmaAllocator allocator,
1805  VkImage image,
1806  const VmaAllocationCreateInfo* pCreateInfo,
1807  VmaAllocation* pAllocation,
1808  VmaAllocationInfo* pAllocationInfo);
1809 
1811 void vmaFreeMemory(
1812  VmaAllocator allocator,
1813  VmaAllocation allocation);
1814 
1832  VmaAllocator allocator,
1833  VmaAllocation allocation,
1834  VmaAllocationInfo* pAllocationInfo);
1835 
1850 VkBool32 vmaTouchAllocation(
1851  VmaAllocator allocator,
1852  VmaAllocation allocation);
1853 
1868  VmaAllocator allocator,
1869  VmaAllocation allocation,
1870  void* pUserData);
1871 
1883  VmaAllocator allocator,
1884  VmaAllocation* pAllocation);
1885 
1920 VkResult vmaMapMemory(
1921  VmaAllocator allocator,
1922  VmaAllocation allocation,
1923  void** ppData);
1924 
1929 void vmaUnmapMemory(
1930  VmaAllocator allocator,
1931  VmaAllocation allocation);
1932 
1934 typedef struct VmaDefragmentationInfo {
1939  VkDeviceSize maxBytesToMove;
1946 
1948 typedef struct VmaDefragmentationStats {
1950  VkDeviceSize bytesMoved;
1952  VkDeviceSize bytesFreed;
1958 
2041 VkResult vmaDefragment(
2042  VmaAllocator allocator,
2043  VmaAllocation* pAllocations,
2044  size_t allocationCount,
2045  VkBool32* pAllocationsChanged,
2046  const VmaDefragmentationInfo *pDefragmentationInfo,
2047  VmaDefragmentationStats* pDefragmentationStats);
2048 
2061 VkResult vmaBindBufferMemory(
2062  VmaAllocator allocator,
2063  VmaAllocation allocation,
2064  VkBuffer buffer);
2065 
2078 VkResult vmaBindImageMemory(
2079  VmaAllocator allocator,
2080  VmaAllocation allocation,
2081  VkImage image);
2082 
2109 VkResult vmaCreateBuffer(
2110  VmaAllocator allocator,
2111  const VkBufferCreateInfo* pBufferCreateInfo,
2112  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2113  VkBuffer* pBuffer,
2114  VmaAllocation* pAllocation,
2115  VmaAllocationInfo* pAllocationInfo);
2116 
2128 void vmaDestroyBuffer(
2129  VmaAllocator allocator,
2130  VkBuffer buffer,
2131  VmaAllocation allocation);
2132 
2134 VkResult vmaCreateImage(
2135  VmaAllocator allocator,
2136  const VkImageCreateInfo* pImageCreateInfo,
2137  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2138  VkImage* pImage,
2139  VmaAllocation* pAllocation,
2140  VmaAllocationInfo* pAllocationInfo);
2141 
2153 void vmaDestroyImage(
2154  VmaAllocator allocator,
2155  VkImage image,
2156  VmaAllocation allocation);
2157 
2158 #ifdef __cplusplus
2159 }
2160 #endif
2161 
2162 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
2163 
2164 // For Visual Studio IntelliSense.
2165 #ifdef __INTELLISENSE__
2166 #define VMA_IMPLEMENTATION
2167 #endif
2168 
2169 #ifdef VMA_IMPLEMENTATION
2170 #undef VMA_IMPLEMENTATION
2171 
2172 #include <cstdint>
2173 #include <cstdlib>
2174 #include <cstring>
2175 
2176 /*******************************************************************************
2177 CONFIGURATION SECTION
2178 
2179 Define some of these macros before each #include of this header or change them
2180 here if you need other then default behavior depending on your environment.
2181 */
2182 
2183 /*
2184 Define this macro to 1 to make the library fetch pointers to Vulkan functions
2185 internally, like:
2186 
2187  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
2188 
2189 Define to 0 if you are going to provide you own pointers to Vulkan functions via
2190 VmaAllocatorCreateInfo::pVulkanFunctions.
2191 */
2192 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
2193 #define VMA_STATIC_VULKAN_FUNCTIONS 1
2194 #endif
2195 
2196 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
2197 //#define VMA_USE_STL_CONTAINERS 1
2198 
2199 /* Set this macro to 1 to make the library including and using STL containers:
2200 std::pair, std::vector, std::list, std::unordered_map.
2201 
2202 Set it to 0 or undefined to make the library using its own implementation of
2203 the containers.
2204 */
2205 #if VMA_USE_STL_CONTAINERS
2206  #define VMA_USE_STL_VECTOR 1
2207  #define VMA_USE_STL_UNORDERED_MAP 1
2208  #define VMA_USE_STL_LIST 1
2209 #endif
2210 
2211 #if VMA_USE_STL_VECTOR
2212  #include <vector>
2213 #endif
2214 
2215 #if VMA_USE_STL_UNORDERED_MAP
2216  #include <unordered_map>
2217 #endif
2218 
2219 #if VMA_USE_STL_LIST
2220  #include <list>
2221 #endif
2222 
2223 /*
2224 Following headers are used in this CONFIGURATION section only, so feel free to
2225 remove them if not needed.
2226 */
2227 #include <cassert> // for assert
2228 #include <algorithm> // for min, max
2229 #include <mutex> // for std::mutex
2230 #include <atomic> // for std::atomic
2231 
2232 #if !defined(_WIN32) && !defined(__APPLE__)
2233  #include <malloc.h> // for aligned_alloc()
2234 #endif
2235 
2236 #ifndef VMA_NULL
2237  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
2238  #define VMA_NULL nullptr
2239 #endif
2240 
2241 #if defined(__APPLE__) || defined(__ANDROID__)
2242 #include <cstdlib>
2243 void *aligned_alloc(size_t alignment, size_t size)
2244 {
2245  // alignment must be >= sizeof(void*)
2246  if(alignment < sizeof(void*))
2247  {
2248  alignment = sizeof(void*);
2249  }
2250 
2251  void *pointer;
2252  if(posix_memalign(&pointer, alignment, size) == 0)
2253  return pointer;
2254  return VMA_NULL;
2255 }
2256 #endif
2257 
2258 // Normal assert to check for programmer's errors, especially in Debug configuration.
2259 #ifndef VMA_ASSERT
2260  #ifdef _DEBUG
2261  #define VMA_ASSERT(expr) assert(expr)
2262  #else
2263  #define VMA_ASSERT(expr)
2264  #endif
2265 #endif
2266 
2267 // Assert that will be called very often, like inside data structures e.g. operator[].
2268 // Making it non-empty can make program slow.
2269 #ifndef VMA_HEAVY_ASSERT
2270  #ifdef _DEBUG
2271  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
2272  #else
2273  #define VMA_HEAVY_ASSERT(expr)
2274  #endif
2275 #endif
2276 
2277 #ifndef VMA_ALIGN_OF
2278  #define VMA_ALIGN_OF(type) (__alignof(type))
2279 #endif
2280 
2281 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
2282  #if defined(_WIN32)
2283  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
2284  #else
2285  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
2286  #endif
2287 #endif
2288 
2289 #ifndef VMA_SYSTEM_FREE
2290  #if defined(_WIN32)
2291  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
2292  #else
2293  #define VMA_SYSTEM_FREE(ptr) free(ptr)
2294  #endif
2295 #endif
2296 
2297 #ifndef VMA_MIN
2298  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
2299 #endif
2300 
2301 #ifndef VMA_MAX
2302  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
2303 #endif
2304 
2305 #ifndef VMA_SWAP
2306  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
2307 #endif
2308 
2309 #ifndef VMA_SORT
2310  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
2311 #endif
2312 
2313 #ifndef VMA_DEBUG_LOG
2314  #define VMA_DEBUG_LOG(format, ...)
2315  /*
2316  #define VMA_DEBUG_LOG(format, ...) do { \
2317  printf(format, __VA_ARGS__); \
2318  printf("\n"); \
2319  } while(false)
2320  */
2321 #endif
2322 
2323 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
2324 #if VMA_STATS_STRING_ENABLED
2325  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
2326  {
2327  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
2328  }
2329  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
2330  {
2331  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
2332  }
2333  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
2334  {
2335  snprintf(outStr, strLen, "%p", ptr);
2336  }
2337 #endif
2338 
2339 #ifndef VMA_MUTEX
2340  class VmaMutex
2341  {
2342  public:
2343  VmaMutex() { }
2344  ~VmaMutex() { }
2345  void Lock() { m_Mutex.lock(); }
2346  void Unlock() { m_Mutex.unlock(); }
2347  private:
2348  std::mutex m_Mutex;
2349  };
2350  #define VMA_MUTEX VmaMutex
2351 #endif
2352 
2353 /*
2354 If providing your own implementation, you need to implement a subset of std::atomic:
2355 
2356 - Constructor(uint32_t desired)
2357 - uint32_t load() const
2358 - void store(uint32_t desired)
2359 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
2360 */
2361 #ifndef VMA_ATOMIC_UINT32
2362  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
2363 #endif
2364 
2365 #ifndef VMA_BEST_FIT
2366 
2378  #define VMA_BEST_FIT (1)
2379 #endif
2380 
2381 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
2382 
2386  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
2387 #endif
2388 
2389 #ifndef VMA_DEBUG_ALIGNMENT
2390 
2394  #define VMA_DEBUG_ALIGNMENT (1)
2395 #endif
2396 
2397 #ifndef VMA_DEBUG_MARGIN
2398 
2402  #define VMA_DEBUG_MARGIN (0)
2403 #endif
2404 
2405 #ifndef VMA_DEBUG_GLOBAL_MUTEX
2406 
2410  #define VMA_DEBUG_GLOBAL_MUTEX (0)
2411 #endif
2412 
2413 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
2414 
2418  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
2419 #endif
2420 
2421 #ifndef VMA_SMALL_HEAP_MAX_SIZE
2422  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
2424 #endif
2425 
2426 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
2427  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
2429 #endif
2430 
2431 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
2432 
2433 /*******************************************************************************
2434 END OF CONFIGURATION
2435 */
2436 
2437 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
2438  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
2439 
2440 // Returns number of bits set to 1 in (v).
2441 static inline uint32_t VmaCountBitsSet(uint32_t v)
2442 {
2443  uint32_t c = v - ((v >> 1) & 0x55555555);
2444  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
2445  c = ((c >> 4) + c) & 0x0F0F0F0F;
2446  c = ((c >> 8) + c) & 0x00FF00FF;
2447  c = ((c >> 16) + c) & 0x0000FFFF;
2448  return c;
2449 }
2450 
2451 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
2452 // Use types like uint32_t, uint64_t as T.
2453 template <typename T>
2454 static inline T VmaAlignUp(T val, T align)
2455 {
2456  return (val + align - 1) / align * align;
2457 }
2458 
2459 // Division with mathematical rounding to nearest number.
2460 template <typename T>
2461 inline T VmaRoundDiv(T x, T y)
2462 {
2463  return (x + (y / (T)2)) / y;
2464 }
2465 
2466 #ifndef VMA_SORT
2467 
2468 template<typename Iterator, typename Compare>
2469 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
2470 {
2471  Iterator centerValue = end; --centerValue;
2472  Iterator insertIndex = beg;
2473  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
2474  {
2475  if(cmp(*memTypeIndex, *centerValue))
2476  {
2477  if(insertIndex != memTypeIndex)
2478  {
2479  VMA_SWAP(*memTypeIndex, *insertIndex);
2480  }
2481  ++insertIndex;
2482  }
2483  }
2484  if(insertIndex != centerValue)
2485  {
2486  VMA_SWAP(*insertIndex, *centerValue);
2487  }
2488  return insertIndex;
2489 }
2490 
2491 template<typename Iterator, typename Compare>
2492 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
2493 {
2494  if(beg < end)
2495  {
2496  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
2497  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
2498  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
2499  }
2500 }
2501 
2502 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
2503 
2504 #endif // #ifndef VMA_SORT
2505 
2506 /*
2507 Returns true if two memory blocks occupy overlapping pages.
2508 ResourceA must be in less memory offset than ResourceB.
2509 
2510 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
2511 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
2512 */
2513 static inline bool VmaBlocksOnSamePage(
2514  VkDeviceSize resourceAOffset,
2515  VkDeviceSize resourceASize,
2516  VkDeviceSize resourceBOffset,
2517  VkDeviceSize pageSize)
2518 {
2519  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
2520  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
2521  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
2522  VkDeviceSize resourceBStart = resourceBOffset;
2523  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
2524  return resourceAEndPage == resourceBStartPage;
2525 }
2526 
2527 enum VmaSuballocationType
2528 {
2529  VMA_SUBALLOCATION_TYPE_FREE = 0,
2530  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
2531  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
2532  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
2533  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
2534  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
2535  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
2536 };
2537 
2538 /*
2539 Returns true if given suballocation types could conflict and must respect
2540 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
2541 or linear image and another one is optimal image. If type is unknown, behave
2542 conservatively.
2543 */
2544 static inline bool VmaIsBufferImageGranularityConflict(
2545  VmaSuballocationType suballocType1,
2546  VmaSuballocationType suballocType2)
2547 {
2548  if(suballocType1 > suballocType2)
2549  {
2550  VMA_SWAP(suballocType1, suballocType2);
2551  }
2552 
2553  switch(suballocType1)
2554  {
2555  case VMA_SUBALLOCATION_TYPE_FREE:
2556  return false;
2557  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
2558  return true;
2559  case VMA_SUBALLOCATION_TYPE_BUFFER:
2560  return
2561  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2562  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2563  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
2564  return
2565  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
2566  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
2567  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2568  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
2569  return
2570  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
2571  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
2572  return false;
2573  default:
2574  VMA_ASSERT(0);
2575  return true;
2576  }
2577 }
2578 
2579 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
2580 struct VmaMutexLock
2581 {
2582 public:
2583  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
2584  m_pMutex(useMutex ? &mutex : VMA_NULL)
2585  {
2586  if(m_pMutex)
2587  {
2588  m_pMutex->Lock();
2589  }
2590  }
2591 
2592  ~VmaMutexLock()
2593  {
2594  if(m_pMutex)
2595  {
2596  m_pMutex->Unlock();
2597  }
2598  }
2599 
2600 private:
2601  VMA_MUTEX* m_pMutex;
2602 };
2603 
2604 #if VMA_DEBUG_GLOBAL_MUTEX
2605  static VMA_MUTEX gDebugGlobalMutex;
2606  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
2607 #else
2608  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
2609 #endif
2610 
2611 // Minimum size of a free suballocation to register it in the free suballocation collection.
2612 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
2613 
2614 /*
2615 Performs binary search and returns iterator to first element that is greater or
2616 equal to (key), according to comparison (cmp).
2617 
2618 Cmp should return true if first argument is less than second argument.
2619 
2620 Returned value is the found element, if present in the collection or place where
2621 new element with value (key) should be inserted.
2622 */
2623 template <typename IterT, typename KeyT, typename CmpT>
2624 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpT cmp)
2625 {
2626  size_t down = 0, up = (end - beg);
2627  while(down < up)
2628  {
2629  const size_t mid = (down + up) / 2;
2630  if(cmp(*(beg+mid), key))
2631  {
2632  down = mid + 1;
2633  }
2634  else
2635  {
2636  up = mid;
2637  }
2638  }
2639  return beg + down;
2640 }
2641 
2643 // Memory allocation
2644 
2645 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
2646 {
2647  if((pAllocationCallbacks != VMA_NULL) &&
2648  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
2649  {
2650  return (*pAllocationCallbacks->pfnAllocation)(
2651  pAllocationCallbacks->pUserData,
2652  size,
2653  alignment,
2654  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2655  }
2656  else
2657  {
2658  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
2659  }
2660 }
2661 
2662 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
2663 {
2664  if((pAllocationCallbacks != VMA_NULL) &&
2665  (pAllocationCallbacks->pfnFree != VMA_NULL))
2666  {
2667  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
2668  }
2669  else
2670  {
2671  VMA_SYSTEM_FREE(ptr);
2672  }
2673 }
2674 
2675 template<typename T>
2676 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
2677 {
2678  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
2679 }
2680 
2681 template<typename T>
2682 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
2683 {
2684  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
2685 }
2686 
2687 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
2688 
2689 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
2690 
2691 template<typename T>
2692 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
2693 {
2694  ptr->~T();
2695  VmaFree(pAllocationCallbacks, ptr);
2696 }
2697 
2698 template<typename T>
2699 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
2700 {
2701  if(ptr != VMA_NULL)
2702  {
2703  for(size_t i = count; i--; )
2704  {
2705  ptr[i].~T();
2706  }
2707  VmaFree(pAllocationCallbacks, ptr);
2708  }
2709 }
2710 
2711 // STL-compatible allocator.
2712 template<typename T>
2713 class VmaStlAllocator
2714 {
2715 public:
2716  const VkAllocationCallbacks* const m_pCallbacks;
2717  typedef T value_type;
2718 
2719  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
2720  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
2721 
2722  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
2723  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
2724 
2725  template<typename U>
2726  bool operator==(const VmaStlAllocator<U>& rhs) const
2727  {
2728  return m_pCallbacks == rhs.m_pCallbacks;
2729  }
2730  template<typename U>
2731  bool operator!=(const VmaStlAllocator<U>& rhs) const
2732  {
2733  return m_pCallbacks != rhs.m_pCallbacks;
2734  }
2735 
2736  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
2737 };
2738 
2739 #if VMA_USE_STL_VECTOR
2740 
2741 #define VmaVector std::vector
2742 
2743 template<typename T, typename allocatorT>
2744 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
2745 {
2746  vec.insert(vec.begin() + index, item);
2747 }
2748 
2749 template<typename T, typename allocatorT>
2750 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
2751 {
2752  vec.erase(vec.begin() + index);
2753 }
2754 
2755 #else // #if VMA_USE_STL_VECTOR
2756 
2757 /* Class with interface compatible with subset of std::vector.
2758 T must be POD because constructors and destructors are not called and memcpy is
2759 used for these objects. */
2760 template<typename T, typename AllocatorT>
2761 class VmaVector
2762 {
2763 public:
2764  typedef T value_type;
2765 
2766  VmaVector(const AllocatorT& allocator) :
2767  m_Allocator(allocator),
2768  m_pArray(VMA_NULL),
2769  m_Count(0),
2770  m_Capacity(0)
2771  {
2772  }
2773 
2774  VmaVector(size_t count, const AllocatorT& allocator) :
2775  m_Allocator(allocator),
2776  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
2777  m_Count(count),
2778  m_Capacity(count)
2779  {
2780  }
2781 
2782  VmaVector(const VmaVector<T, AllocatorT>& src) :
2783  m_Allocator(src.m_Allocator),
2784  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
2785  m_Count(src.m_Count),
2786  m_Capacity(src.m_Count)
2787  {
2788  if(m_Count != 0)
2789  {
2790  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
2791  }
2792  }
2793 
2794  ~VmaVector()
2795  {
2796  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2797  }
2798 
2799  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
2800  {
2801  if(&rhs != this)
2802  {
2803  resize(rhs.m_Count);
2804  if(m_Count != 0)
2805  {
2806  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
2807  }
2808  }
2809  return *this;
2810  }
2811 
2812  bool empty() const { return m_Count == 0; }
2813  size_t size() const { return m_Count; }
2814  T* data() { return m_pArray; }
2815  const T* data() const { return m_pArray; }
2816 
2817  T& operator[](size_t index)
2818  {
2819  VMA_HEAVY_ASSERT(index < m_Count);
2820  return m_pArray[index];
2821  }
2822  const T& operator[](size_t index) const
2823  {
2824  VMA_HEAVY_ASSERT(index < m_Count);
2825  return m_pArray[index];
2826  }
2827 
2828  T& front()
2829  {
2830  VMA_HEAVY_ASSERT(m_Count > 0);
2831  return m_pArray[0];
2832  }
2833  const T& front() const
2834  {
2835  VMA_HEAVY_ASSERT(m_Count > 0);
2836  return m_pArray[0];
2837  }
2838  T& back()
2839  {
2840  VMA_HEAVY_ASSERT(m_Count > 0);
2841  return m_pArray[m_Count - 1];
2842  }
2843  const T& back() const
2844  {
2845  VMA_HEAVY_ASSERT(m_Count > 0);
2846  return m_pArray[m_Count - 1];
2847  }
2848 
2849  void reserve(size_t newCapacity, bool freeMemory = false)
2850  {
2851  newCapacity = VMA_MAX(newCapacity, m_Count);
2852 
2853  if((newCapacity < m_Capacity) && !freeMemory)
2854  {
2855  newCapacity = m_Capacity;
2856  }
2857 
2858  if(newCapacity != m_Capacity)
2859  {
2860  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
2861  if(m_Count != 0)
2862  {
2863  memcpy(newArray, m_pArray, m_Count * sizeof(T));
2864  }
2865  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2866  m_Capacity = newCapacity;
2867  m_pArray = newArray;
2868  }
2869  }
2870 
2871  void resize(size_t newCount, bool freeMemory = false)
2872  {
2873  size_t newCapacity = m_Capacity;
2874  if(newCount > m_Capacity)
2875  {
2876  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
2877  }
2878  else if(freeMemory)
2879  {
2880  newCapacity = newCount;
2881  }
2882 
2883  if(newCapacity != m_Capacity)
2884  {
2885  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
2886  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
2887  if(elementsToCopy != 0)
2888  {
2889  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
2890  }
2891  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
2892  m_Capacity = newCapacity;
2893  m_pArray = newArray;
2894  }
2895 
2896  m_Count = newCount;
2897  }
2898 
2899  void clear(bool freeMemory = false)
2900  {
2901  resize(0, freeMemory);
2902  }
2903 
2904  void insert(size_t index, const T& src)
2905  {
2906  VMA_HEAVY_ASSERT(index <= m_Count);
2907  const size_t oldCount = size();
2908  resize(oldCount + 1);
2909  if(index < oldCount)
2910  {
2911  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
2912  }
2913  m_pArray[index] = src;
2914  }
2915 
2916  void remove(size_t index)
2917  {
2918  VMA_HEAVY_ASSERT(index < m_Count);
2919  const size_t oldCount = size();
2920  if(index < oldCount - 1)
2921  {
2922  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
2923  }
2924  resize(oldCount - 1);
2925  }
2926 
2927  void push_back(const T& src)
2928  {
2929  const size_t newIndex = size();
2930  resize(newIndex + 1);
2931  m_pArray[newIndex] = src;
2932  }
2933 
2934  void pop_back()
2935  {
2936  VMA_HEAVY_ASSERT(m_Count > 0);
2937  resize(size() - 1);
2938  }
2939 
2940  void push_front(const T& src)
2941  {
2942  insert(0, src);
2943  }
2944 
2945  void pop_front()
2946  {
2947  VMA_HEAVY_ASSERT(m_Count > 0);
2948  remove(0);
2949  }
2950 
2951  typedef T* iterator;
2952 
2953  iterator begin() { return m_pArray; }
2954  iterator end() { return m_pArray + m_Count; }
2955 
2956 private:
2957  AllocatorT m_Allocator;
2958  T* m_pArray;
2959  size_t m_Count;
2960  size_t m_Capacity;
2961 };
2962 
2963 template<typename T, typename allocatorT>
2964 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
2965 {
2966  vec.insert(index, item);
2967 }
2968 
2969 template<typename T, typename allocatorT>
2970 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
2971 {
2972  vec.remove(index);
2973 }
2974 
2975 #endif // #if VMA_USE_STL_VECTOR
2976 
2977 template<typename CmpLess, typename VectorT>
2978 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
2979 {
2980  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
2981  vector.data(),
2982  vector.data() + vector.size(),
2983  value,
2984  CmpLess()) - vector.data();
2985  VmaVectorInsert(vector, indexToInsert, value);
2986  return indexToInsert;
2987 }
2988 
2989 template<typename CmpLess, typename VectorT>
2990 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
2991 {
2992  CmpLess comparator;
2993  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
2994  vector.begin(),
2995  vector.end(),
2996  value,
2997  comparator);
2998  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
2999  {
3000  size_t indexToRemove = it - vector.begin();
3001  VmaVectorRemove(vector, indexToRemove);
3002  return true;
3003  }
3004  return false;
3005 }
3006 
3007 template<typename CmpLess, typename VectorT>
3008 size_t VmaVectorFindSorted(const VectorT& vector, const typename VectorT::value_type& value)
3009 {
3010  CmpLess comparator;
3011  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
3012  vector.data(),
3013  vector.data() + vector.size(),
3014  value,
3015  comparator);
3016  if(it != vector.size() && !comparator(*it, value) && !comparator(value, *it))
3017  {
3018  return it - vector.begin();
3019  }
3020  else
3021  {
3022  return vector.size();
3023  }
3024 }
3025 
3027 // class VmaPoolAllocator
3028 
3029 /*
3030 Allocator for objects of type T using a list of arrays (pools) to speed up
3031 allocation. Number of elements that can be allocated is not bounded because
3032 allocator can create multiple blocks.
3033 */
3034 template<typename T>
3035 class VmaPoolAllocator
3036 {
3037 public:
3038  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
3039  ~VmaPoolAllocator();
3040  void Clear();
3041  T* Alloc();
3042  void Free(T* ptr);
3043 
3044 private:
3045  union Item
3046  {
3047  uint32_t NextFreeIndex;
3048  T Value;
3049  };
3050 
3051  struct ItemBlock
3052  {
3053  Item* pItems;
3054  uint32_t FirstFreeIndex;
3055  };
3056 
3057  const VkAllocationCallbacks* m_pAllocationCallbacks;
3058  size_t m_ItemsPerBlock;
3059  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
3060 
3061  ItemBlock& CreateNewBlock();
3062 };
3063 
3064 template<typename T>
3065 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
3066  m_pAllocationCallbacks(pAllocationCallbacks),
3067  m_ItemsPerBlock(itemsPerBlock),
3068  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
3069 {
3070  VMA_ASSERT(itemsPerBlock > 0);
3071 }
3072 
3073 template<typename T>
3074 VmaPoolAllocator<T>::~VmaPoolAllocator()
3075 {
3076  Clear();
3077 }
3078 
3079 template<typename T>
3080 void VmaPoolAllocator<T>::Clear()
3081 {
3082  for(size_t i = m_ItemBlocks.size(); i--; )
3083  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
3084  m_ItemBlocks.clear();
3085 }
3086 
3087 template<typename T>
3088 T* VmaPoolAllocator<T>::Alloc()
3089 {
3090  for(size_t i = m_ItemBlocks.size(); i--; )
3091  {
3092  ItemBlock& block = m_ItemBlocks[i];
3093  // This block has some free items: Use first one.
3094  if(block.FirstFreeIndex != UINT32_MAX)
3095  {
3096  Item* const pItem = &block.pItems[block.FirstFreeIndex];
3097  block.FirstFreeIndex = pItem->NextFreeIndex;
3098  return &pItem->Value;
3099  }
3100  }
3101 
3102  // No block has free item: Create new one and use it.
3103  ItemBlock& newBlock = CreateNewBlock();
3104  Item* const pItem = &newBlock.pItems[0];
3105  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
3106  return &pItem->Value;
3107 }
3108 
3109 template<typename T>
3110 void VmaPoolAllocator<T>::Free(T* ptr)
3111 {
3112  // Search all memory blocks to find ptr.
3113  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
3114  {
3115  ItemBlock& block = m_ItemBlocks[i];
3116 
3117  // Casting to union.
3118  Item* pItemPtr;
3119  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
3120 
3121  // Check if pItemPtr is in address range of this block.
3122  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
3123  {
3124  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
3125  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
3126  block.FirstFreeIndex = index;
3127  return;
3128  }
3129  }
3130  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
3131 }
3132 
3133 template<typename T>
3134 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
3135 {
3136  ItemBlock newBlock = {
3137  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
3138 
3139  m_ItemBlocks.push_back(newBlock);
3140 
3141  // Setup singly-linked list of all free items in this block.
3142  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
3143  newBlock.pItems[i].NextFreeIndex = i + 1;
3144  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
3145  return m_ItemBlocks.back();
3146 }
3147 
3149 // class VmaRawList, VmaList
3150 
3151 #if VMA_USE_STL_LIST
3152 
3153 #define VmaList std::list
3154 
3155 #else // #if VMA_USE_STL_LIST
3156 
3157 template<typename T>
3158 struct VmaListItem
3159 {
3160  VmaListItem* pPrev;
3161  VmaListItem* pNext;
3162  T Value;
3163 };
3164 
3165 // Doubly linked list.
3166 template<typename T>
3167 class VmaRawList
3168 {
3169 public:
3170  typedef VmaListItem<T> ItemType;
3171 
3172  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
3173  ~VmaRawList();
3174  void Clear();
3175 
3176  size_t GetCount() const { return m_Count; }
3177  bool IsEmpty() const { return m_Count == 0; }
3178 
3179  ItemType* Front() { return m_pFront; }
3180  const ItemType* Front() const { return m_pFront; }
3181  ItemType* Back() { return m_pBack; }
3182  const ItemType* Back() const { return m_pBack; }
3183 
3184  ItemType* PushBack();
3185  ItemType* PushFront();
3186  ItemType* PushBack(const T& value);
3187  ItemType* PushFront(const T& value);
3188  void PopBack();
3189  void PopFront();
3190 
3191  // Item can be null - it means PushBack.
3192  ItemType* InsertBefore(ItemType* pItem);
3193  // Item can be null - it means PushFront.
3194  ItemType* InsertAfter(ItemType* pItem);
3195 
3196  ItemType* InsertBefore(ItemType* pItem, const T& value);
3197  ItemType* InsertAfter(ItemType* pItem, const T& value);
3198 
3199  void Remove(ItemType* pItem);
3200 
3201 private:
3202  const VkAllocationCallbacks* const m_pAllocationCallbacks;
3203  VmaPoolAllocator<ItemType> m_ItemAllocator;
3204  ItemType* m_pFront;
3205  ItemType* m_pBack;
3206  size_t m_Count;
3207 
3208  // Declared not defined, to block copy constructor and assignment operator.
3209  VmaRawList(const VmaRawList<T>& src);
3210  VmaRawList<T>& operator=(const VmaRawList<T>& rhs);
3211 };
3212 
3213 template<typename T>
3214 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
3215  m_pAllocationCallbacks(pAllocationCallbacks),
3216  m_ItemAllocator(pAllocationCallbacks, 128),
3217  m_pFront(VMA_NULL),
3218  m_pBack(VMA_NULL),
3219  m_Count(0)
3220 {
3221 }
3222 
3223 template<typename T>
3224 VmaRawList<T>::~VmaRawList()
3225 {
3226  // Intentionally not calling Clear, because that would be unnecessary
3227  // computations to return all items to m_ItemAllocator as free.
3228 }
3229 
3230 template<typename T>
3231 void VmaRawList<T>::Clear()
3232 {
3233  if(IsEmpty() == false)
3234  {
3235  ItemType* pItem = m_pBack;
3236  while(pItem != VMA_NULL)
3237  {
3238  ItemType* const pPrevItem = pItem->pPrev;
3239  m_ItemAllocator.Free(pItem);
3240  pItem = pPrevItem;
3241  }
3242  m_pFront = VMA_NULL;
3243  m_pBack = VMA_NULL;
3244  m_Count = 0;
3245  }
3246 }
3247 
3248 template<typename T>
3249 VmaListItem<T>* VmaRawList<T>::PushBack()
3250 {
3251  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3252  pNewItem->pNext = VMA_NULL;
3253  if(IsEmpty())
3254  {
3255  pNewItem->pPrev = VMA_NULL;
3256  m_pFront = pNewItem;
3257  m_pBack = pNewItem;
3258  m_Count = 1;
3259  }
3260  else
3261  {
3262  pNewItem->pPrev = m_pBack;
3263  m_pBack->pNext = pNewItem;
3264  m_pBack = pNewItem;
3265  ++m_Count;
3266  }
3267  return pNewItem;
3268 }
3269 
3270 template<typename T>
3271 VmaListItem<T>* VmaRawList<T>::PushFront()
3272 {
3273  ItemType* const pNewItem = m_ItemAllocator.Alloc();
3274  pNewItem->pPrev = VMA_NULL;
3275  if(IsEmpty())
3276  {
3277  pNewItem->pNext = VMA_NULL;
3278  m_pFront = pNewItem;
3279  m_pBack = pNewItem;
3280  m_Count = 1;
3281  }
3282  else
3283  {
3284  pNewItem->pNext = m_pFront;
3285  m_pFront->pPrev = pNewItem;
3286  m_pFront = pNewItem;
3287  ++m_Count;
3288  }
3289  return pNewItem;
3290 }
3291 
3292 template<typename T>
3293 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
3294 {
3295  ItemType* const pNewItem = PushBack();
3296  pNewItem->Value = value;
3297  return pNewItem;
3298 }
3299 
3300 template<typename T>
3301 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
3302 {
3303  ItemType* const pNewItem = PushFront();
3304  pNewItem->Value = value;
3305  return pNewItem;
3306 }
3307 
3308 template<typename T>
3309 void VmaRawList<T>::PopBack()
3310 {
3311  VMA_HEAVY_ASSERT(m_Count > 0);
3312  ItemType* const pBackItem = m_pBack;
3313  ItemType* const pPrevItem = pBackItem->pPrev;
3314  if(pPrevItem != VMA_NULL)
3315  {
3316  pPrevItem->pNext = VMA_NULL;
3317  }
3318  m_pBack = pPrevItem;
3319  m_ItemAllocator.Free(pBackItem);
3320  --m_Count;
3321 }
3322 
3323 template<typename T>
3324 void VmaRawList<T>::PopFront()
3325 {
3326  VMA_HEAVY_ASSERT(m_Count > 0);
3327  ItemType* const pFrontItem = m_pFront;
3328  ItemType* const pNextItem = pFrontItem->pNext;
3329  if(pNextItem != VMA_NULL)
3330  {
3331  pNextItem->pPrev = VMA_NULL;
3332  }
3333  m_pFront = pNextItem;
3334  m_ItemAllocator.Free(pFrontItem);
3335  --m_Count;
3336 }
3337 
3338 template<typename T>
3339 void VmaRawList<T>::Remove(ItemType* pItem)
3340 {
3341  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
3342  VMA_HEAVY_ASSERT(m_Count > 0);
3343 
3344  if(pItem->pPrev != VMA_NULL)
3345  {
3346  pItem->pPrev->pNext = pItem->pNext;
3347  }
3348  else
3349  {
3350  VMA_HEAVY_ASSERT(m_pFront == pItem);
3351  m_pFront = pItem->pNext;
3352  }
3353 
3354  if(pItem->pNext != VMA_NULL)
3355  {
3356  pItem->pNext->pPrev = pItem->pPrev;
3357  }
3358  else
3359  {
3360  VMA_HEAVY_ASSERT(m_pBack == pItem);
3361  m_pBack = pItem->pPrev;
3362  }
3363 
3364  m_ItemAllocator.Free(pItem);
3365  --m_Count;
3366 }
3367 
3368 template<typename T>
3369 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
3370 {
3371  if(pItem != VMA_NULL)
3372  {
3373  ItemType* const prevItem = pItem->pPrev;
3374  ItemType* const newItem = m_ItemAllocator.Alloc();
3375  newItem->pPrev = prevItem;
3376  newItem->pNext = pItem;
3377  pItem->pPrev = newItem;
3378  if(prevItem != VMA_NULL)
3379  {
3380  prevItem->pNext = newItem;
3381  }
3382  else
3383  {
3384  VMA_HEAVY_ASSERT(m_pFront == pItem);
3385  m_pFront = newItem;
3386  }
3387  ++m_Count;
3388  return newItem;
3389  }
3390  else
3391  return PushBack();
3392 }
3393 
3394 template<typename T>
3395 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
3396 {
3397  if(pItem != VMA_NULL)
3398  {
3399  ItemType* const nextItem = pItem->pNext;
3400  ItemType* const newItem = m_ItemAllocator.Alloc();
3401  newItem->pNext = nextItem;
3402  newItem->pPrev = pItem;
3403  pItem->pNext = newItem;
3404  if(nextItem != VMA_NULL)
3405  {
3406  nextItem->pPrev = newItem;
3407  }
3408  else
3409  {
3410  VMA_HEAVY_ASSERT(m_pBack == pItem);
3411  m_pBack = newItem;
3412  }
3413  ++m_Count;
3414  return newItem;
3415  }
3416  else
3417  return PushFront();
3418 }
3419 
3420 template<typename T>
3421 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
3422 {
3423  ItemType* const newItem = InsertBefore(pItem);
3424  newItem->Value = value;
3425  return newItem;
3426 }
3427 
3428 template<typename T>
3429 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
3430 {
3431  ItemType* const newItem = InsertAfter(pItem);
3432  newItem->Value = value;
3433  return newItem;
3434 }
3435 
3436 template<typename T, typename AllocatorT>
3437 class VmaList
3438 {
3439 public:
3440  class iterator
3441  {
3442  public:
3443  iterator() :
3444  m_pList(VMA_NULL),
3445  m_pItem(VMA_NULL)
3446  {
3447  }
3448 
3449  T& operator*() const
3450  {
3451  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3452  return m_pItem->Value;
3453  }
3454  T* operator->() const
3455  {
3456  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3457  return &m_pItem->Value;
3458  }
3459 
3460  iterator& operator++()
3461  {
3462  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3463  m_pItem = m_pItem->pNext;
3464  return *this;
3465  }
3466  iterator& operator--()
3467  {
3468  if(m_pItem != VMA_NULL)
3469  {
3470  m_pItem = m_pItem->pPrev;
3471  }
3472  else
3473  {
3474  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3475  m_pItem = m_pList->Back();
3476  }
3477  return *this;
3478  }
3479 
3480  iterator operator++(int)
3481  {
3482  iterator result = *this;
3483  ++*this;
3484  return result;
3485  }
3486  iterator operator--(int)
3487  {
3488  iterator result = *this;
3489  --*this;
3490  return result;
3491  }
3492 
3493  bool operator==(const iterator& rhs) const
3494  {
3495  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3496  return m_pItem == rhs.m_pItem;
3497  }
3498  bool operator!=(const iterator& rhs) const
3499  {
3500  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3501  return m_pItem != rhs.m_pItem;
3502  }
3503 
3504  private:
3505  VmaRawList<T>* m_pList;
3506  VmaListItem<T>* m_pItem;
3507 
3508  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
3509  m_pList(pList),
3510  m_pItem(pItem)
3511  {
3512  }
3513 
3514  friend class VmaList<T, AllocatorT>;
3515  };
3516 
3517  class const_iterator
3518  {
3519  public:
3520  const_iterator() :
3521  m_pList(VMA_NULL),
3522  m_pItem(VMA_NULL)
3523  {
3524  }
3525 
3526  const_iterator(const iterator& src) :
3527  m_pList(src.m_pList),
3528  m_pItem(src.m_pItem)
3529  {
3530  }
3531 
3532  const T& operator*() const
3533  {
3534  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3535  return m_pItem->Value;
3536  }
3537  const T* operator->() const
3538  {
3539  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3540  return &m_pItem->Value;
3541  }
3542 
3543  const_iterator& operator++()
3544  {
3545  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
3546  m_pItem = m_pItem->pNext;
3547  return *this;
3548  }
3549  const_iterator& operator--()
3550  {
3551  if(m_pItem != VMA_NULL)
3552  {
3553  m_pItem = m_pItem->pPrev;
3554  }
3555  else
3556  {
3557  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
3558  m_pItem = m_pList->Back();
3559  }
3560  return *this;
3561  }
3562 
3563  const_iterator operator++(int)
3564  {
3565  const_iterator result = *this;
3566  ++*this;
3567  return result;
3568  }
3569  const_iterator operator--(int)
3570  {
3571  const_iterator result = *this;
3572  --*this;
3573  return result;
3574  }
3575 
3576  bool operator==(const const_iterator& rhs) const
3577  {
3578  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3579  return m_pItem == rhs.m_pItem;
3580  }
3581  bool operator!=(const const_iterator& rhs) const
3582  {
3583  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
3584  return m_pItem != rhs.m_pItem;
3585  }
3586 
3587  private:
3588  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
3589  m_pList(pList),
3590  m_pItem(pItem)
3591  {
3592  }
3593 
3594  const VmaRawList<T>* m_pList;
3595  const VmaListItem<T>* m_pItem;
3596 
3597  friend class VmaList<T, AllocatorT>;
3598  };
3599 
3600  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
3601 
3602  bool empty() const { return m_RawList.IsEmpty(); }
3603  size_t size() const { return m_RawList.GetCount(); }
3604 
3605  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
3606  iterator end() { return iterator(&m_RawList, VMA_NULL); }
3607 
3608  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
3609  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
3610 
3611  void clear() { m_RawList.Clear(); }
3612  void push_back(const T& value) { m_RawList.PushBack(value); }
3613  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
3614  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
3615 
3616 private:
3617  VmaRawList<T> m_RawList;
3618 };
3619 
3620 #endif // #if VMA_USE_STL_LIST
3621 
3623 // class VmaMap
3624 
3625 // Unused in this version.
3626 #if 0
3627 
3628 #if VMA_USE_STL_UNORDERED_MAP
3629 
3630 #define VmaPair std::pair
3631 
3632 #define VMA_MAP_TYPE(KeyT, ValueT) \
3633  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
3634 
3635 #else // #if VMA_USE_STL_UNORDERED_MAP
3636 
3637 template<typename T1, typename T2>
3638 struct VmaPair
3639 {
3640  T1 first;
3641  T2 second;
3642 
3643  VmaPair() : first(), second() { }
3644  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
3645 };
3646 
3647 /* Class compatible with subset of interface of std::unordered_map.
3648 KeyT, ValueT must be POD because they will be stored in VmaVector.
3649 */
3650 template<typename KeyT, typename ValueT>
3651 class VmaMap
3652 {
3653 public:
3654  typedef VmaPair<KeyT, ValueT> PairType;
3655  typedef PairType* iterator;
3656 
3657  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
3658 
3659  iterator begin() { return m_Vector.begin(); }
3660  iterator end() { return m_Vector.end(); }
3661 
3662  void insert(const PairType& pair);
3663  iterator find(const KeyT& key);
3664  void erase(iterator it);
3665 
3666 private:
3667  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
3668 };
3669 
3670 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
3671 
3672 template<typename FirstT, typename SecondT>
3673 struct VmaPairFirstLess
3674 {
3675  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
3676  {
3677  return lhs.first < rhs.first;
3678  }
3679  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
3680  {
3681  return lhs.first < rhsFirst;
3682  }
3683 };
3684 
3685 template<typename KeyT, typename ValueT>
3686 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
3687 {
3688  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
3689  m_Vector.data(),
3690  m_Vector.data() + m_Vector.size(),
3691  pair,
3692  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
3693  VmaVectorInsert(m_Vector, indexToInsert, pair);
3694 }
3695 
3696 template<typename KeyT, typename ValueT>
3697 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
3698 {
3699  PairType* it = VmaBinaryFindFirstNotLess(
3700  m_Vector.data(),
3701  m_Vector.data() + m_Vector.size(),
3702  key,
3703  VmaPairFirstLess<KeyT, ValueT>());
3704  if((it != m_Vector.end()) && (it->first == key))
3705  {
3706  return it;
3707  }
3708  else
3709  {
3710  return m_Vector.end();
3711  }
3712 }
3713 
3714 template<typename KeyT, typename ValueT>
3715 void VmaMap<KeyT, ValueT>::erase(iterator it)
3716 {
3717  VmaVectorRemove(m_Vector, it - m_Vector.begin());
3718 }
3719 
3720 #endif // #if VMA_USE_STL_UNORDERED_MAP
3721 
3722 #endif // #if 0
3723 
3725 
3726 class VmaDeviceMemoryBlock;
3727 
3728 struct VmaAllocation_T
3729 {
3730 private:
3731  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
3732 
3733  enum FLAGS
3734  {
3735  FLAG_USER_DATA_STRING = 0x01,
3736  };
3737 
3738 public:
3739  enum ALLOCATION_TYPE
3740  {
3741  ALLOCATION_TYPE_NONE,
3742  ALLOCATION_TYPE_BLOCK,
3743  ALLOCATION_TYPE_DEDICATED,
3744  };
3745 
3746  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
3747  m_Alignment(1),
3748  m_Size(0),
3749  m_pUserData(VMA_NULL),
3750  m_LastUseFrameIndex(currentFrameIndex),
3751  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
3752  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
3753  m_MapCount(0),
3754  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
3755  {
3756  }
3757 
3758  ~VmaAllocation_T()
3759  {
3760  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
3761 
3762  // Check if owned string was freed.
3763  VMA_ASSERT(m_pUserData == VMA_NULL);
3764  }
3765 
3766  void InitBlockAllocation(
3767  VmaPool hPool,
3768  VmaDeviceMemoryBlock* block,
3769  VkDeviceSize offset,
3770  VkDeviceSize alignment,
3771  VkDeviceSize size,
3772  VmaSuballocationType suballocationType,
3773  bool mapped,
3774  bool canBecomeLost)
3775  {
3776  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3777  VMA_ASSERT(block != VMA_NULL);
3778  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3779  m_Alignment = alignment;
3780  m_Size = size;
3781  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3782  m_SuballocationType = (uint8_t)suballocationType;
3783  m_BlockAllocation.m_hPool = hPool;
3784  m_BlockAllocation.m_Block = block;
3785  m_BlockAllocation.m_Offset = offset;
3786  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
3787  }
3788 
3789  void InitLost()
3790  {
3791  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3792  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
3793  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
3794  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
3795  m_BlockAllocation.m_Block = VMA_NULL;
3796  m_BlockAllocation.m_Offset = 0;
3797  m_BlockAllocation.m_CanBecomeLost = true;
3798  }
3799 
3800  void ChangeBlockAllocation(
3801  VmaAllocator hAllocator,
3802  VmaDeviceMemoryBlock* block,
3803  VkDeviceSize offset);
3804 
3805  // pMappedData not null means allocation is created with MAPPED flag.
3806  void InitDedicatedAllocation(
3807  uint32_t memoryTypeIndex,
3808  VkDeviceMemory hMemory,
3809  VmaSuballocationType suballocationType,
3810  void* pMappedData,
3811  VkDeviceSize size)
3812  {
3813  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
3814  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
3815  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
3816  m_Alignment = 0;
3817  m_Size = size;
3818  m_SuballocationType = (uint8_t)suballocationType;
3819  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
3820  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
3821  m_DedicatedAllocation.m_hMemory = hMemory;
3822  m_DedicatedAllocation.m_pMappedData = pMappedData;
3823  }
3824 
3825  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
3826  VkDeviceSize GetAlignment() const { return m_Alignment; }
3827  VkDeviceSize GetSize() const { return m_Size; }
3828  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
3829  void* GetUserData() const { return m_pUserData; }
3830  void SetUserData(VmaAllocator hAllocator, void* pUserData);
3831  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
3832 
3833  VmaDeviceMemoryBlock* GetBlock() const
3834  {
3835  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
3836  return m_BlockAllocation.m_Block;
3837  }
3838  VkDeviceSize GetOffset() const;
3839  VkDeviceMemory GetMemory() const;
3840  uint32_t GetMemoryTypeIndex() const;
3841  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
3842  void* GetMappedData() const;
3843  bool CanBecomeLost() const;
3844  VmaPool GetPool() const;
3845 
3846  uint32_t GetLastUseFrameIndex() const
3847  {
3848  return m_LastUseFrameIndex.load();
3849  }
3850  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
3851  {
3852  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
3853  }
3854  /*
3855  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
3856  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
3857  - Else, returns false.
3858 
3859  If hAllocation is already lost, assert - you should not call it then.
3860  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
3861  */
3862  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
3863 
3864  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
3865  {
3866  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
3867  outInfo.blockCount = 1;
3868  outInfo.allocationCount = 1;
3869  outInfo.unusedRangeCount = 0;
3870  outInfo.usedBytes = m_Size;
3871  outInfo.unusedBytes = 0;
3872  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
3873  outInfo.unusedRangeSizeMin = UINT64_MAX;
3874  outInfo.unusedRangeSizeMax = 0;
3875  }
3876 
3877  void BlockAllocMap();
3878  void BlockAllocUnmap();
3879  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
3880  void DedicatedAllocUnmap(VmaAllocator hAllocator);
3881 
3882 private:
3883  VkDeviceSize m_Alignment;
3884  VkDeviceSize m_Size;
3885  void* m_pUserData;
3886  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
3887  uint8_t m_Type; // ALLOCATION_TYPE
3888  uint8_t m_SuballocationType; // VmaSuballocationType
3889  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
3890  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
3891  uint8_t m_MapCount;
3892  uint8_t m_Flags; // enum FLAGS
3893 
3894  // Allocation out of VmaDeviceMemoryBlock.
3895  struct BlockAllocation
3896  {
3897  VmaPool m_hPool; // Null if belongs to general memory.
3898  VmaDeviceMemoryBlock* m_Block;
3899  VkDeviceSize m_Offset;
3900  bool m_CanBecomeLost;
3901  };
3902 
3903  // Allocation for an object that has its own private VkDeviceMemory.
3904  struct DedicatedAllocation
3905  {
3906  uint32_t m_MemoryTypeIndex;
3907  VkDeviceMemory m_hMemory;
3908  void* m_pMappedData; // Not null means memory is mapped.
3909  };
3910 
3911  union
3912  {
3913  // Allocation out of VmaDeviceMemoryBlock.
3914  BlockAllocation m_BlockAllocation;
3915  // Allocation for an object that has its own private VkDeviceMemory.
3916  DedicatedAllocation m_DedicatedAllocation;
3917  };
3918 
3919  void FreeUserDataString(VmaAllocator hAllocator);
3920 };
3921 
3922 /*
3923 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
3924 allocated memory block or free.
3925 */
3926 struct VmaSuballocation
3927 {
3928  VkDeviceSize offset;
3929  VkDeviceSize size;
3930  VmaAllocation hAllocation;
3931  VmaSuballocationType type;
3932 };
3933 
3934 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
3935 
3936 // Cost of one additional allocation lost, as equivalent in bytes.
3937 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
3938 
3939 /*
3940 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
3941 
3942 If canMakeOtherLost was false:
3943 - item points to a FREE suballocation.
3944 - itemsToMakeLostCount is 0.
3945 
3946 If canMakeOtherLost was true:
3947 - item points to first of sequence of suballocations, which are either FREE,
3948  or point to VmaAllocations that can become lost.
3949 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
3950  the requested allocation to succeed.
3951 */
3952 struct VmaAllocationRequest
3953 {
3954  VkDeviceSize offset;
3955  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
3956  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
3957  VmaSuballocationList::iterator item;
3958  size_t itemsToMakeLostCount;
3959 
3960  VkDeviceSize CalcCost() const
3961  {
3962  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
3963  }
3964 };
3965 
3966 /*
3967 Data structure used for bookkeeping of allocations and unused ranges of memory
3968 in a single VkDeviceMemory block.
3969 */
3970 class VmaBlockMetadata
3971 {
3972 public:
3973  VmaBlockMetadata(VmaAllocator hAllocator);
3974  ~VmaBlockMetadata();
3975  void Init(VkDeviceSize size);
3976 
3977  // Validates all data structures inside this object. If not valid, returns false.
3978  bool Validate() const;
3979  VkDeviceSize GetSize() const { return m_Size; }
3980  size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
3981  VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
3982  VkDeviceSize GetUnusedRangeSizeMax() const;
3983  // Returns true if this block is empty - contains only single free suballocation.
3984  bool IsEmpty() const;
3985 
3986  void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
3987  void AddPoolStats(VmaPoolStats& inoutStats) const;
3988 
3989 #if VMA_STATS_STRING_ENABLED
3990  void PrintDetailedMap(class VmaJsonWriter& json) const;
3991 #endif
3992 
3993  // Creates trivial request for case when block is empty.
3994  void CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest);
3995 
3996  // Tries to find a place for suballocation with given parameters inside this block.
3997  // If succeeded, fills pAllocationRequest and returns true.
3998  // If failed, returns false.
3999  bool CreateAllocationRequest(
4000  uint32_t currentFrameIndex,
4001  uint32_t frameInUseCount,
4002  VkDeviceSize bufferImageGranularity,
4003  VkDeviceSize allocSize,
4004  VkDeviceSize allocAlignment,
4005  VmaSuballocationType allocType,
4006  bool canMakeOtherLost,
4007  VmaAllocationRequest* pAllocationRequest);
4008 
4009  bool MakeRequestedAllocationsLost(
4010  uint32_t currentFrameIndex,
4011  uint32_t frameInUseCount,
4012  VmaAllocationRequest* pAllocationRequest);
4013 
4014  uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4015 
4016  // Makes actual allocation based on request. Request must already be checked and valid.
4017  void Alloc(
4018  const VmaAllocationRequest& request,
4019  VmaSuballocationType type,
4020  VkDeviceSize allocSize,
4021  VmaAllocation hAllocation);
4022 
4023  // Frees suballocation assigned to given memory region.
4024  void Free(const VmaAllocation allocation);
4025  void FreeAtOffset(VkDeviceSize offset);
4026 
4027 private:
4028  VkDeviceSize m_Size;
4029  uint32_t m_FreeCount;
4030  VkDeviceSize m_SumFreeSize;
4031  VmaSuballocationList m_Suballocations;
4032  // Suballocations that are free and have size greater than certain threshold.
4033  // Sorted by size, ascending.
4034  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
4035 
4036  bool ValidateFreeSuballocationList() const;
4037 
4038  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
4039  // If yes, fills pOffset and returns true. If no, returns false.
4040  bool CheckAllocation(
4041  uint32_t currentFrameIndex,
4042  uint32_t frameInUseCount,
4043  VkDeviceSize bufferImageGranularity,
4044  VkDeviceSize allocSize,
4045  VkDeviceSize allocAlignment,
4046  VmaSuballocationType allocType,
4047  VmaSuballocationList::const_iterator suballocItem,
4048  bool canMakeOtherLost,
4049  VkDeviceSize* pOffset,
4050  size_t* itemsToMakeLostCount,
4051  VkDeviceSize* pSumFreeSize,
4052  VkDeviceSize* pSumItemSize) const;
4053  // Given free suballocation, it merges it with following one, which must also be free.
4054  void MergeFreeWithNext(VmaSuballocationList::iterator item);
4055  // Releases given suballocation, making it free.
4056  // Merges it with adjacent free suballocations if applicable.
4057  // Returns iterator to new free suballocation at this place.
4058  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
4059  // Given free suballocation, it inserts it into sorted list of
4060  // m_FreeSuballocationsBySize if it's suitable.
4061  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
4062  // Given free suballocation, it removes it from sorted list of
4063  // m_FreeSuballocationsBySize if it's suitable.
4064  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
4065 };
4066 
4067 /*
4068 Represents a single block of device memory (`VkDeviceMemory`) with all the
4069 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
4070 
4071 Thread-safety: This class must be externally synchronized.
4072 */
4073 class VmaDeviceMemoryBlock
4074 {
4075 public:
4076  VmaBlockMetadata m_Metadata;
4077 
4078  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
4079 
4080  ~VmaDeviceMemoryBlock()
4081  {
4082  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
4083  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
4084  }
4085 
4086  // Always call after construction.
4087  void Init(
4088  uint32_t newMemoryTypeIndex,
4089  VkDeviceMemory newMemory,
4090  VkDeviceSize newSize);
4091  // Always call before destruction.
4092  void Destroy(VmaAllocator allocator);
4093 
4094  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
4095  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4096  void* GetMappedData() const { return m_pMappedData; }
4097 
4098  // Validates all data structures inside this object. If not valid, returns false.
4099  bool Validate() const;
4100 
4101  // ppData can be null.
4102  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
4103  void Unmap(VmaAllocator hAllocator, uint32_t count);
4104 
4105  VkResult BindBufferMemory(
4106  const VmaAllocator hAllocator,
4107  const VmaAllocation hAllocation,
4108  VkBuffer hBuffer);
4109  VkResult BindImageMemory(
4110  const VmaAllocator hAllocator,
4111  const VmaAllocation hAllocation,
4112  VkImage hImage);
4113 
4114 private:
4115  uint32_t m_MemoryTypeIndex;
4116  VkDeviceMemory m_hMemory;
4117 
4118  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
4119  // Also protects m_MapCount, m_pMappedData.
4120  VMA_MUTEX m_Mutex;
4121  uint32_t m_MapCount;
4122  void* m_pMappedData;
4123 };
4124 
4125 struct VmaPointerLess
4126 {
4127  bool operator()(const void* lhs, const void* rhs) const
4128  {
4129  return lhs < rhs;
4130  }
4131 };
4132 
4133 class VmaDefragmentator;
4134 
4135 /*
4136 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
4137 Vulkan memory type.
4138 
4139 Synchronized internally with a mutex.
4140 */
4141 struct VmaBlockVector
4142 {
4143  VmaBlockVector(
4144  VmaAllocator hAllocator,
4145  uint32_t memoryTypeIndex,
4146  VkDeviceSize preferredBlockSize,
4147  size_t minBlockCount,
4148  size_t maxBlockCount,
4149  VkDeviceSize bufferImageGranularity,
4150  uint32_t frameInUseCount,
4151  bool isCustomPool);
4152  ~VmaBlockVector();
4153 
4154  VkResult CreateMinBlocks();
4155 
4156  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
4157  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
4158  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
4159  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
4160 
4161  void GetPoolStats(VmaPoolStats* pStats);
4162 
4163  bool IsEmpty() const { return m_Blocks.empty(); }
4164 
4165  VkResult Allocate(
4166  VmaPool hCurrentPool,
4167  uint32_t currentFrameIndex,
4168  const VkMemoryRequirements& vkMemReq,
4169  const VmaAllocationCreateInfo& createInfo,
4170  VmaSuballocationType suballocType,
4171  VmaAllocation* pAllocation);
4172 
4173  void Free(
4174  VmaAllocation hAllocation);
4175 
4176  // Adds statistics of this BlockVector to pStats.
4177  void AddStats(VmaStats* pStats);
4178 
4179 #if VMA_STATS_STRING_ENABLED
4180  void PrintDetailedMap(class VmaJsonWriter& json);
4181 #endif
4182 
4183  void MakePoolAllocationsLost(
4184  uint32_t currentFrameIndex,
4185  size_t* pLostAllocationCount);
4186 
4187  VmaDefragmentator* EnsureDefragmentator(
4188  VmaAllocator hAllocator,
4189  uint32_t currentFrameIndex);
4190 
4191  VkResult Defragment(
4192  VmaDefragmentationStats* pDefragmentationStats,
4193  VkDeviceSize& maxBytesToMove,
4194  uint32_t& maxAllocationsToMove);
4195 
4196  void DestroyDefragmentator();
4197 
4198 private:
4199  friend class VmaDefragmentator;
4200 
4201  const VmaAllocator m_hAllocator;
4202  const uint32_t m_MemoryTypeIndex;
4203  const VkDeviceSize m_PreferredBlockSize;
4204  const size_t m_MinBlockCount;
4205  const size_t m_MaxBlockCount;
4206  const VkDeviceSize m_BufferImageGranularity;
4207  const uint32_t m_FrameInUseCount;
4208  const bool m_IsCustomPool;
4209  VMA_MUTEX m_Mutex;
4210  // Incrementally sorted by sumFreeSize, ascending.
4211  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
4212  /* There can be at most one allocation that is completely empty - a
4213  hysteresis to avoid pessimistic case of alternating creation and destruction
4214  of a VkDeviceMemory. */
4215  bool m_HasEmptyBlock;
4216  VmaDefragmentator* m_pDefragmentator;
4217 
4218  size_t CalcMaxBlockSize() const;
4219 
4220  // Finds and removes given block from vector.
4221  void Remove(VmaDeviceMemoryBlock* pBlock);
4222 
4223  // Performs single step in sorting m_Blocks. They may not be fully sorted
4224  // after this call.
4225  void IncrementallySortBlocks();
4226 
4227  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
4228 };
4229 
4230 struct VmaPool_T
4231 {
4232 public:
4233  VmaBlockVector m_BlockVector;
4234 
4235  // Takes ownership.
4236  VmaPool_T(
4237  VmaAllocator hAllocator,
4238  const VmaPoolCreateInfo& createInfo);
4239  ~VmaPool_T();
4240 
4241  VmaBlockVector& GetBlockVector() { return m_BlockVector; }
4242 
4243 #if VMA_STATS_STRING_ENABLED
4244  //void PrintDetailedMap(class VmaStringBuilder& sb);
4245 #endif
4246 };
4247 
4248 class VmaDefragmentator
4249 {
4250  const VmaAllocator m_hAllocator;
4251  VmaBlockVector* const m_pBlockVector;
4252  uint32_t m_CurrentFrameIndex;
4253  VkDeviceSize m_BytesMoved;
4254  uint32_t m_AllocationsMoved;
4255 
4256  struct AllocationInfo
4257  {
4258  VmaAllocation m_hAllocation;
4259  VkBool32* m_pChanged;
4260 
4261  AllocationInfo() :
4262  m_hAllocation(VK_NULL_HANDLE),
4263  m_pChanged(VMA_NULL)
4264  {
4265  }
4266  };
4267 
4268  struct AllocationInfoSizeGreater
4269  {
4270  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
4271  {
4272  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
4273  }
4274  };
4275 
4276  // Used between AddAllocation and Defragment.
4277  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4278 
4279  struct BlockInfo
4280  {
4281  VmaDeviceMemoryBlock* m_pBlock;
4282  bool m_HasNonMovableAllocations;
4283  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
4284 
4285  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
4286  m_pBlock(VMA_NULL),
4287  m_HasNonMovableAllocations(true),
4288  m_Allocations(pAllocationCallbacks),
4289  m_pMappedDataForDefragmentation(VMA_NULL)
4290  {
4291  }
4292 
4293  void CalcHasNonMovableAllocations()
4294  {
4295  const size_t blockAllocCount = m_pBlock->m_Metadata.GetAllocationCount();
4296  const size_t defragmentAllocCount = m_Allocations.size();
4297  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
4298  }
4299 
4300  void SortAllocationsBySizeDescecnding()
4301  {
4302  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
4303  }
4304 
4305  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
4306  void Unmap(VmaAllocator hAllocator);
4307 
4308  private:
4309  // Not null if mapped for defragmentation only, not originally mapped.
4310  void* m_pMappedDataForDefragmentation;
4311  };
4312 
4313  struct BlockPointerLess
4314  {
4315  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
4316  {
4317  return pLhsBlockInfo->m_pBlock < pRhsBlock;
4318  }
4319  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4320  {
4321  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
4322  }
4323  };
4324 
4325  // 1. Blocks with some non-movable allocations go first.
4326  // 2. Blocks with smaller sumFreeSize go first.
4327  struct BlockInfoCompareMoveDestination
4328  {
4329  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
4330  {
4331  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
4332  {
4333  return true;
4334  }
4335  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
4336  {
4337  return false;
4338  }
4339  if(pLhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_Metadata.GetSumFreeSize())
4340  {
4341  return true;
4342  }
4343  return false;
4344  }
4345  };
4346 
4347  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
4348  BlockInfoVector m_Blocks;
4349 
4350  VkResult DefragmentRound(
4351  VkDeviceSize maxBytesToMove,
4352  uint32_t maxAllocationsToMove);
4353 
4354  static bool MoveMakesSense(
4355  size_t dstBlockIndex, VkDeviceSize dstOffset,
4356  size_t srcBlockIndex, VkDeviceSize srcOffset);
4357 
4358 public:
4359  VmaDefragmentator(
4360  VmaAllocator hAllocator,
4361  VmaBlockVector* pBlockVector,
4362  uint32_t currentFrameIndex);
4363 
4364  ~VmaDefragmentator();
4365 
4366  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
4367  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
4368 
4369  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
4370 
4371  VkResult Defragment(
4372  VkDeviceSize maxBytesToMove,
4373  uint32_t maxAllocationsToMove);
4374 };
4375 
4376 // Main allocator object.
4377 struct VmaAllocator_T
4378 {
4379  bool m_UseMutex;
4380  bool m_UseKhrDedicatedAllocation;
4381  VkDevice m_hDevice;
4382  bool m_AllocationCallbacksSpecified;
4383  VkAllocationCallbacks m_AllocationCallbacks;
4384  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
4385 
4386  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
4387  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
4388  VMA_MUTEX m_HeapSizeLimitMutex;
4389 
4390  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
4391  VkPhysicalDeviceMemoryProperties m_MemProps;
4392 
4393  // Default pools.
4394  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
4395 
4396  // Each vector is sorted by memory (handle value).
4397  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
4398  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
4399  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
4400 
4401  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
4402  ~VmaAllocator_T();
4403 
4404  const VkAllocationCallbacks* GetAllocationCallbacks() const
4405  {
4406  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
4407  }
4408  const VmaVulkanFunctions& GetVulkanFunctions() const
4409  {
4410  return m_VulkanFunctions;
4411  }
4412 
4413  VkDeviceSize GetBufferImageGranularity() const
4414  {
4415  return VMA_MAX(
4416  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
4417  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
4418  }
4419 
4420  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
4421  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
4422 
4423  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
4424  {
4425  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
4426  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
4427  }
4428 
4429  void GetBufferMemoryRequirements(
4430  VkBuffer hBuffer,
4431  VkMemoryRequirements& memReq,
4432  bool& requiresDedicatedAllocation,
4433  bool& prefersDedicatedAllocation) const;
4434  void GetImageMemoryRequirements(
4435  VkImage hImage,
4436  VkMemoryRequirements& memReq,
4437  bool& requiresDedicatedAllocation,
4438  bool& prefersDedicatedAllocation) const;
4439 
4440  // Main allocation function.
4441  VkResult AllocateMemory(
4442  const VkMemoryRequirements& vkMemReq,
4443  bool requiresDedicatedAllocation,
4444  bool prefersDedicatedAllocation,
4445  VkBuffer dedicatedBuffer,
4446  VkImage dedicatedImage,
4447  const VmaAllocationCreateInfo& createInfo,
4448  VmaSuballocationType suballocType,
4449  VmaAllocation* pAllocation);
4450 
4451  // Main deallocation function.
4452  void FreeMemory(const VmaAllocation allocation);
4453 
4454  void CalculateStats(VmaStats* pStats);
4455 
4456 #if VMA_STATS_STRING_ENABLED
4457  void PrintDetailedMap(class VmaJsonWriter& json);
4458 #endif
4459 
4460  VkResult Defragment(
4461  VmaAllocation* pAllocations,
4462  size_t allocationCount,
4463  VkBool32* pAllocationsChanged,
4464  const VmaDefragmentationInfo* pDefragmentationInfo,
4465  VmaDefragmentationStats* pDefragmentationStats);
4466 
4467  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
4468  bool TouchAllocation(VmaAllocation hAllocation);
4469 
4470  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
4471  void DestroyPool(VmaPool pool);
4472  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
4473 
4474  void SetCurrentFrameIndex(uint32_t frameIndex);
4475 
4476  void MakePoolAllocationsLost(
4477  VmaPool hPool,
4478  size_t* pLostAllocationCount);
4479 
4480  void CreateLostAllocation(VmaAllocation* pAllocation);
4481 
4482  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
4483  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
4484 
4485  VkResult Map(VmaAllocation hAllocation, void** ppData);
4486  void Unmap(VmaAllocation hAllocation);
4487 
4488  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
4489  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
4490 
4491 private:
4492  VkDeviceSize m_PreferredLargeHeapBlockSize;
4493 
4494  VkPhysicalDevice m_PhysicalDevice;
4495  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
4496 
4497  VMA_MUTEX m_PoolsMutex;
4498  // Protected by m_PoolsMutex. Sorted by pointer value.
4499  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
4500 
4501  VmaVulkanFunctions m_VulkanFunctions;
4502 
4503  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
4504 
4505  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
4506 
4507  VkResult AllocateMemoryOfType(
4508  const VkMemoryRequirements& vkMemReq,
4509  bool dedicatedAllocation,
4510  VkBuffer dedicatedBuffer,
4511  VkImage dedicatedImage,
4512  const VmaAllocationCreateInfo& createInfo,
4513  uint32_t memTypeIndex,
4514  VmaSuballocationType suballocType,
4515  VmaAllocation* pAllocation);
4516 
4517  // Allocates and registers new VkDeviceMemory specifically for single allocation.
4518  VkResult AllocateDedicatedMemory(
4519  VkDeviceSize size,
4520  VmaSuballocationType suballocType,
4521  uint32_t memTypeIndex,
4522  bool map,
4523  bool isUserDataString,
4524  void* pUserData,
4525  VkBuffer dedicatedBuffer,
4526  VkImage dedicatedImage,
4527  VmaAllocation* pAllocation);
4528 
4529  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
4530  void FreeDedicatedMemory(VmaAllocation allocation);
4531 };
4532 
4534 // Memory allocation #2 after VmaAllocator_T definition
4535 
4536 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
4537 {
4538  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
4539 }
4540 
4541 static void VmaFree(VmaAllocator hAllocator, void* ptr)
4542 {
4543  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
4544 }
4545 
4546 template<typename T>
4547 static T* VmaAllocate(VmaAllocator hAllocator)
4548 {
4549  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
4550 }
4551 
4552 template<typename T>
4553 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
4554 {
4555  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
4556 }
4557 
4558 template<typename T>
4559 static void vma_delete(VmaAllocator hAllocator, T* ptr)
4560 {
4561  if(ptr != VMA_NULL)
4562  {
4563  ptr->~T();
4564  VmaFree(hAllocator, ptr);
4565  }
4566 }
4567 
4568 template<typename T>
4569 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
4570 {
4571  if(ptr != VMA_NULL)
4572  {
4573  for(size_t i = count; i--; )
4574  ptr[i].~T();
4575  VmaFree(hAllocator, ptr);
4576  }
4577 }
4578 
4580 // VmaStringBuilder
4581 
4582 #if VMA_STATS_STRING_ENABLED
4583 
4584 class VmaStringBuilder
4585 {
4586 public:
4587  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
4588  size_t GetLength() const { return m_Data.size(); }
4589  const char* GetData() const { return m_Data.data(); }
4590 
4591  void Add(char ch) { m_Data.push_back(ch); }
4592  void Add(const char* pStr);
4593  void AddNewLine() { Add('\n'); }
4594  void AddNumber(uint32_t num);
4595  void AddNumber(uint64_t num);
4596  void AddPointer(const void* ptr);
4597 
4598 private:
4599  VmaVector< char, VmaStlAllocator<char> > m_Data;
4600 };
4601 
4602 void VmaStringBuilder::Add(const char* pStr)
4603 {
4604  const size_t strLen = strlen(pStr);
4605  if(strLen > 0)
4606  {
4607  const size_t oldCount = m_Data.size();
4608  m_Data.resize(oldCount + strLen);
4609  memcpy(m_Data.data() + oldCount, pStr, strLen);
4610  }
4611 }
4612 
4613 void VmaStringBuilder::AddNumber(uint32_t num)
4614 {
4615  char buf[11];
4616  VmaUint32ToStr(buf, sizeof(buf), num);
4617  Add(buf);
4618 }
4619 
4620 void VmaStringBuilder::AddNumber(uint64_t num)
4621 {
4622  char buf[21];
4623  VmaUint64ToStr(buf, sizeof(buf), num);
4624  Add(buf);
4625 }
4626 
4627 void VmaStringBuilder::AddPointer(const void* ptr)
4628 {
4629  char buf[21];
4630  VmaPtrToStr(buf, sizeof(buf), ptr);
4631  Add(buf);
4632 }
4633 
4634 #endif // #if VMA_STATS_STRING_ENABLED
4635 
4637 // VmaJsonWriter
4638 
4639 #if VMA_STATS_STRING_ENABLED
4640 
4641 class VmaJsonWriter
4642 {
4643 public:
4644  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
4645  ~VmaJsonWriter();
4646 
4647  void BeginObject(bool singleLine = false);
4648  void EndObject();
4649 
4650  void BeginArray(bool singleLine = false);
4651  void EndArray();
4652 
4653  void WriteString(const char* pStr);
4654  void BeginString(const char* pStr = VMA_NULL);
4655  void ContinueString(const char* pStr);
4656  void ContinueString(uint32_t n);
4657  void ContinueString(uint64_t n);
4658  void ContinueString_Pointer(const void* ptr);
4659  void EndString(const char* pStr = VMA_NULL);
4660 
4661  void WriteNumber(uint32_t n);
4662  void WriteNumber(uint64_t n);
4663  void WriteBool(bool b);
4664  void WriteNull();
4665 
4666 private:
4667  static const char* const INDENT;
4668 
4669  enum COLLECTION_TYPE
4670  {
4671  COLLECTION_TYPE_OBJECT,
4672  COLLECTION_TYPE_ARRAY,
4673  };
4674  struct StackItem
4675  {
4676  COLLECTION_TYPE type;
4677  uint32_t valueCount;
4678  bool singleLineMode;
4679  };
4680 
4681  VmaStringBuilder& m_SB;
4682  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
4683  bool m_InsideString;
4684 
4685  void BeginValue(bool isString);
4686  void WriteIndent(bool oneLess = false);
4687 };
4688 
4689 const char* const VmaJsonWriter::INDENT = " ";
4690 
4691 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
4692  m_SB(sb),
4693  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
4694  m_InsideString(false)
4695 {
4696 }
4697 
4698 VmaJsonWriter::~VmaJsonWriter()
4699 {
4700  VMA_ASSERT(!m_InsideString);
4701  VMA_ASSERT(m_Stack.empty());
4702 }
4703 
4704 void VmaJsonWriter::BeginObject(bool singleLine)
4705 {
4706  VMA_ASSERT(!m_InsideString);
4707 
4708  BeginValue(false);
4709  m_SB.Add('{');
4710 
4711  StackItem item;
4712  item.type = COLLECTION_TYPE_OBJECT;
4713  item.valueCount = 0;
4714  item.singleLineMode = singleLine;
4715  m_Stack.push_back(item);
4716 }
4717 
4718 void VmaJsonWriter::EndObject()
4719 {
4720  VMA_ASSERT(!m_InsideString);
4721 
4722  WriteIndent(true);
4723  m_SB.Add('}');
4724 
4725  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
4726  m_Stack.pop_back();
4727 }
4728 
4729 void VmaJsonWriter::BeginArray(bool singleLine)
4730 {
4731  VMA_ASSERT(!m_InsideString);
4732 
4733  BeginValue(false);
4734  m_SB.Add('[');
4735 
4736  StackItem item;
4737  item.type = COLLECTION_TYPE_ARRAY;
4738  item.valueCount = 0;
4739  item.singleLineMode = singleLine;
4740  m_Stack.push_back(item);
4741 }
4742 
4743 void VmaJsonWriter::EndArray()
4744 {
4745  VMA_ASSERT(!m_InsideString);
4746 
4747  WriteIndent(true);
4748  m_SB.Add(']');
4749 
4750  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
4751  m_Stack.pop_back();
4752 }
4753 
4754 void VmaJsonWriter::WriteString(const char* pStr)
4755 {
4756  BeginString(pStr);
4757  EndString();
4758 }
4759 
4760 void VmaJsonWriter::BeginString(const char* pStr)
4761 {
4762  VMA_ASSERT(!m_InsideString);
4763 
4764  BeginValue(true);
4765  m_SB.Add('"');
4766  m_InsideString = true;
4767  if(pStr != VMA_NULL && pStr[0] != '\0')
4768  {
4769  ContinueString(pStr);
4770  }
4771 }
4772 
4773 void VmaJsonWriter::ContinueString(const char* pStr)
4774 {
4775  VMA_ASSERT(m_InsideString);
4776 
4777  const size_t strLen = strlen(pStr);
4778  for(size_t i = 0; i < strLen; ++i)
4779  {
4780  char ch = pStr[i];
4781  if(ch == '\'')
4782  {
4783  m_SB.Add("\\\\");
4784  }
4785  else if(ch == '"')
4786  {
4787  m_SB.Add("\\\"");
4788  }
4789  else if(ch >= 32)
4790  {
4791  m_SB.Add(ch);
4792  }
4793  else switch(ch)
4794  {
4795  case '\b':
4796  m_SB.Add("\\b");
4797  break;
4798  case '\f':
4799  m_SB.Add("\\f");
4800  break;
4801  case '\n':
4802  m_SB.Add("\\n");
4803  break;
4804  case '\r':
4805  m_SB.Add("\\r");
4806  break;
4807  case '\t':
4808  m_SB.Add("\\t");
4809  break;
4810  default:
4811  VMA_ASSERT(0 && "Character not currently supported.");
4812  break;
4813  }
4814  }
4815 }
4816 
4817 void VmaJsonWriter::ContinueString(uint32_t n)
4818 {
4819  VMA_ASSERT(m_InsideString);
4820  m_SB.AddNumber(n);
4821 }
4822 
4823 void VmaJsonWriter::ContinueString(uint64_t n)
4824 {
4825  VMA_ASSERT(m_InsideString);
4826  m_SB.AddNumber(n);
4827 }
4828 
4829 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
4830 {
4831  VMA_ASSERT(m_InsideString);
4832  m_SB.AddPointer(ptr);
4833 }
4834 
4835 void VmaJsonWriter::EndString(const char* pStr)
4836 {
4837  VMA_ASSERT(m_InsideString);
4838  if(pStr != VMA_NULL && pStr[0] != '\0')
4839  {
4840  ContinueString(pStr);
4841  }
4842  m_SB.Add('"');
4843  m_InsideString = false;
4844 }
4845 
4846 void VmaJsonWriter::WriteNumber(uint32_t n)
4847 {
4848  VMA_ASSERT(!m_InsideString);
4849  BeginValue(false);
4850  m_SB.AddNumber(n);
4851 }
4852 
4853 void VmaJsonWriter::WriteNumber(uint64_t n)
4854 {
4855  VMA_ASSERT(!m_InsideString);
4856  BeginValue(false);
4857  m_SB.AddNumber(n);
4858 }
4859 
4860 void VmaJsonWriter::WriteBool(bool b)
4861 {
4862  VMA_ASSERT(!m_InsideString);
4863  BeginValue(false);
4864  m_SB.Add(b ? "true" : "false");
4865 }
4866 
4867 void VmaJsonWriter::WriteNull()
4868 {
4869  VMA_ASSERT(!m_InsideString);
4870  BeginValue(false);
4871  m_SB.Add("null");
4872 }
4873 
4874 void VmaJsonWriter::BeginValue(bool isString)
4875 {
4876  if(!m_Stack.empty())
4877  {
4878  StackItem& currItem = m_Stack.back();
4879  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4880  currItem.valueCount % 2 == 0)
4881  {
4882  VMA_ASSERT(isString);
4883  }
4884 
4885  if(currItem.type == COLLECTION_TYPE_OBJECT &&
4886  currItem.valueCount % 2 != 0)
4887  {
4888  m_SB.Add(": ");
4889  }
4890  else if(currItem.valueCount > 0)
4891  {
4892  m_SB.Add(", ");
4893  WriteIndent();
4894  }
4895  else
4896  {
4897  WriteIndent();
4898  }
4899  ++currItem.valueCount;
4900  }
4901 }
4902 
4903 void VmaJsonWriter::WriteIndent(bool oneLess)
4904 {
4905  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
4906  {
4907  m_SB.AddNewLine();
4908 
4909  size_t count = m_Stack.size();
4910  if(count > 0 && oneLess)
4911  {
4912  --count;
4913  }
4914  for(size_t i = 0; i < count; ++i)
4915  {
4916  m_SB.Add(INDENT);
4917  }
4918  }
4919 }
4920 
4921 #endif // #if VMA_STATS_STRING_ENABLED
4922 
4924 
4925 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
4926 {
4927  if(IsUserDataString())
4928  {
4929  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
4930 
4931  FreeUserDataString(hAllocator);
4932 
4933  if(pUserData != VMA_NULL)
4934  {
4935  const char* const newStrSrc = (char*)pUserData;
4936  const size_t newStrLen = strlen(newStrSrc);
4937  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
4938  memcpy(newStrDst, newStrSrc, newStrLen + 1);
4939  m_pUserData = newStrDst;
4940  }
4941  }
4942  else
4943  {
4944  m_pUserData = pUserData;
4945  }
4946 }
4947 
4948 void VmaAllocation_T::ChangeBlockAllocation(
4949  VmaAllocator hAllocator,
4950  VmaDeviceMemoryBlock* block,
4951  VkDeviceSize offset)
4952 {
4953  VMA_ASSERT(block != VMA_NULL);
4954  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4955 
4956  // Move mapping reference counter from old block to new block.
4957  if(block != m_BlockAllocation.m_Block)
4958  {
4959  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
4960  if(IsPersistentMap())
4961  ++mapRefCount;
4962  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
4963  block->Map(hAllocator, mapRefCount, VMA_NULL);
4964  }
4965 
4966  m_BlockAllocation.m_Block = block;
4967  m_BlockAllocation.m_Offset = offset;
4968 }
4969 
4970 VkDeviceSize VmaAllocation_T::GetOffset() const
4971 {
4972  switch(m_Type)
4973  {
4974  case ALLOCATION_TYPE_BLOCK:
4975  return m_BlockAllocation.m_Offset;
4976  case ALLOCATION_TYPE_DEDICATED:
4977  return 0;
4978  default:
4979  VMA_ASSERT(0);
4980  return 0;
4981  }
4982 }
4983 
4984 VkDeviceMemory VmaAllocation_T::GetMemory() const
4985 {
4986  switch(m_Type)
4987  {
4988  case ALLOCATION_TYPE_BLOCK:
4989  return m_BlockAllocation.m_Block->GetDeviceMemory();
4990  case ALLOCATION_TYPE_DEDICATED:
4991  return m_DedicatedAllocation.m_hMemory;
4992  default:
4993  VMA_ASSERT(0);
4994  return VK_NULL_HANDLE;
4995  }
4996 }
4997 
4998 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
4999 {
5000  switch(m_Type)
5001  {
5002  case ALLOCATION_TYPE_BLOCK:
5003  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
5004  case ALLOCATION_TYPE_DEDICATED:
5005  return m_DedicatedAllocation.m_MemoryTypeIndex;
5006  default:
5007  VMA_ASSERT(0);
5008  return UINT32_MAX;
5009  }
5010 }
5011 
5012 void* VmaAllocation_T::GetMappedData() const
5013 {
5014  switch(m_Type)
5015  {
5016  case ALLOCATION_TYPE_BLOCK:
5017  if(m_MapCount != 0)
5018  {
5019  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
5020  VMA_ASSERT(pBlockData != VMA_NULL);
5021  return (char*)pBlockData + m_BlockAllocation.m_Offset;
5022  }
5023  else
5024  {
5025  return VMA_NULL;
5026  }
5027  break;
5028  case ALLOCATION_TYPE_DEDICATED:
5029  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
5030  return m_DedicatedAllocation.m_pMappedData;
5031  default:
5032  VMA_ASSERT(0);
5033  return VMA_NULL;
5034  }
5035 }
5036 
5037 bool VmaAllocation_T::CanBecomeLost() const
5038 {
5039  switch(m_Type)
5040  {
5041  case ALLOCATION_TYPE_BLOCK:
5042  return m_BlockAllocation.m_CanBecomeLost;
5043  case ALLOCATION_TYPE_DEDICATED:
5044  return false;
5045  default:
5046  VMA_ASSERT(0);
5047  return false;
5048  }
5049 }
5050 
5051 VmaPool VmaAllocation_T::GetPool() const
5052 {
5053  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
5054  return m_BlockAllocation.m_hPool;
5055 }
5056 
5057 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5058 {
5059  VMA_ASSERT(CanBecomeLost());
5060 
5061  /*
5062  Warning: This is a carefully designed algorithm.
5063  Do not modify unless you really know what you're doing :)
5064  */
5065  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
5066  for(;;)
5067  {
5068  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
5069  {
5070  VMA_ASSERT(0);
5071  return false;
5072  }
5073  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
5074  {
5075  return false;
5076  }
5077  else // Last use time earlier than current time.
5078  {
5079  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
5080  {
5081  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
5082  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
5083  return true;
5084  }
5085  }
5086  }
5087 }
5088 
5089 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
5090 {
5091  VMA_ASSERT(IsUserDataString());
5092  if(m_pUserData != VMA_NULL)
5093  {
5094  char* const oldStr = (char*)m_pUserData;
5095  const size_t oldStrLen = strlen(oldStr);
5096  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
5097  m_pUserData = VMA_NULL;
5098  }
5099 }
5100 
5101 void VmaAllocation_T::BlockAllocMap()
5102 {
5103  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5104 
5105  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5106  {
5107  ++m_MapCount;
5108  }
5109  else
5110  {
5111  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
5112  }
5113 }
5114 
5115 void VmaAllocation_T::BlockAllocUnmap()
5116 {
5117  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
5118 
5119  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5120  {
5121  --m_MapCount;
5122  }
5123  else
5124  {
5125  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
5126  }
5127 }
5128 
5129 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
5130 {
5131  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5132 
5133  if(m_MapCount != 0)
5134  {
5135  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
5136  {
5137  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
5138  *ppData = m_DedicatedAllocation.m_pMappedData;
5139  ++m_MapCount;
5140  return VK_SUCCESS;
5141  }
5142  else
5143  {
5144  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
5145  return VK_ERROR_MEMORY_MAP_FAILED;
5146  }
5147  }
5148  else
5149  {
5150  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
5151  hAllocator->m_hDevice,
5152  m_DedicatedAllocation.m_hMemory,
5153  0, // offset
5154  VK_WHOLE_SIZE,
5155  0, // flags
5156  ppData);
5157  if(result == VK_SUCCESS)
5158  {
5159  m_DedicatedAllocation.m_pMappedData = *ppData;
5160  m_MapCount = 1;
5161  }
5162  return result;
5163  }
5164 }
5165 
5166 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
5167 {
5168  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
5169 
5170  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
5171  {
5172  --m_MapCount;
5173  if(m_MapCount == 0)
5174  {
5175  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
5176  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
5177  hAllocator->m_hDevice,
5178  m_DedicatedAllocation.m_hMemory);
5179  }
5180  }
5181  else
5182  {
5183  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
5184  }
5185 }
5186 
5187 #if VMA_STATS_STRING_ENABLED
5188 
5189 // Correspond to values of enum VmaSuballocationType.
5190 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
5191  "FREE",
5192  "UNKNOWN",
5193  "BUFFER",
5194  "IMAGE_UNKNOWN",
5195  "IMAGE_LINEAR",
5196  "IMAGE_OPTIMAL",
5197 };
5198 
5199 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
5200 {
5201  json.BeginObject();
5202 
5203  json.WriteString("Blocks");
5204  json.WriteNumber(stat.blockCount);
5205 
5206  json.WriteString("Allocations");
5207  json.WriteNumber(stat.allocationCount);
5208 
5209  json.WriteString("UnusedRanges");
5210  json.WriteNumber(stat.unusedRangeCount);
5211 
5212  json.WriteString("UsedBytes");
5213  json.WriteNumber(stat.usedBytes);
5214 
5215  json.WriteString("UnusedBytes");
5216  json.WriteNumber(stat.unusedBytes);
5217 
5218  if(stat.allocationCount > 1)
5219  {
5220  json.WriteString("AllocationSize");
5221  json.BeginObject(true);
5222  json.WriteString("Min");
5223  json.WriteNumber(stat.allocationSizeMin);
5224  json.WriteString("Avg");
5225  json.WriteNumber(stat.allocationSizeAvg);
5226  json.WriteString("Max");
5227  json.WriteNumber(stat.allocationSizeMax);
5228  json.EndObject();
5229  }
5230 
5231  if(stat.unusedRangeCount > 1)
5232  {
5233  json.WriteString("UnusedRangeSize");
5234  json.BeginObject(true);
5235  json.WriteString("Min");
5236  json.WriteNumber(stat.unusedRangeSizeMin);
5237  json.WriteString("Avg");
5238  json.WriteNumber(stat.unusedRangeSizeAvg);
5239  json.WriteString("Max");
5240  json.WriteNumber(stat.unusedRangeSizeMax);
5241  json.EndObject();
5242  }
5243 
5244  json.EndObject();
5245 }
5246 
5247 #endif // #if VMA_STATS_STRING_ENABLED
5248 
5249 struct VmaSuballocationItemSizeLess
5250 {
5251  bool operator()(
5252  const VmaSuballocationList::iterator lhs,
5253  const VmaSuballocationList::iterator rhs) const
5254  {
5255  return lhs->size < rhs->size;
5256  }
5257  bool operator()(
5258  const VmaSuballocationList::iterator lhs,
5259  VkDeviceSize rhsSize) const
5260  {
5261  return lhs->size < rhsSize;
5262  }
5263 };
5264 
5266 // class VmaBlockMetadata
5267 
5268 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
5269  m_Size(0),
5270  m_FreeCount(0),
5271  m_SumFreeSize(0),
5272  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
5273  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
5274 {
5275 }
5276 
5277 VmaBlockMetadata::~VmaBlockMetadata()
5278 {
5279 }
5280 
5281 void VmaBlockMetadata::Init(VkDeviceSize size)
5282 {
5283  m_Size = size;
5284  m_FreeCount = 1;
5285  m_SumFreeSize = size;
5286 
5287  VmaSuballocation suballoc = {};
5288  suballoc.offset = 0;
5289  suballoc.size = size;
5290  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5291  suballoc.hAllocation = VK_NULL_HANDLE;
5292 
5293  m_Suballocations.push_back(suballoc);
5294  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
5295  --suballocItem;
5296  m_FreeSuballocationsBySize.push_back(suballocItem);
5297 }
5298 
5299 bool VmaBlockMetadata::Validate() const
5300 {
5301  if(m_Suballocations.empty())
5302  {
5303  return false;
5304  }
5305 
5306  // Expected offset of new suballocation as calculates from previous ones.
5307  VkDeviceSize calculatedOffset = 0;
5308  // Expected number of free suballocations as calculated from traversing their list.
5309  uint32_t calculatedFreeCount = 0;
5310  // Expected sum size of free suballocations as calculated from traversing their list.
5311  VkDeviceSize calculatedSumFreeSize = 0;
5312  // Expected number of free suballocations that should be registered in
5313  // m_FreeSuballocationsBySize calculated from traversing their list.
5314  size_t freeSuballocationsToRegister = 0;
5315  // True if previous visisted suballocation was free.
5316  bool prevFree = false;
5317 
5318  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5319  suballocItem != m_Suballocations.cend();
5320  ++suballocItem)
5321  {
5322  const VmaSuballocation& subAlloc = *suballocItem;
5323 
5324  // Actual offset of this suballocation doesn't match expected one.
5325  if(subAlloc.offset != calculatedOffset)
5326  {
5327  return false;
5328  }
5329 
5330  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
5331  // Two adjacent free suballocations are invalid. They should be merged.
5332  if(prevFree && currFree)
5333  {
5334  return false;
5335  }
5336 
5337  if(currFree != (subAlloc.hAllocation == VK_NULL_HANDLE))
5338  {
5339  return false;
5340  }
5341 
5342  if(currFree)
5343  {
5344  calculatedSumFreeSize += subAlloc.size;
5345  ++calculatedFreeCount;
5346  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5347  {
5348  ++freeSuballocationsToRegister;
5349  }
5350  }
5351  else
5352  {
5353  if(subAlloc.hAllocation->GetOffset() != subAlloc.offset)
5354  {
5355  return false;
5356  }
5357  if(subAlloc.hAllocation->GetSize() != subAlloc.size)
5358  {
5359  return false;
5360  }
5361  }
5362 
5363  calculatedOffset += subAlloc.size;
5364  prevFree = currFree;
5365  }
5366 
5367  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
5368  // match expected one.
5369  if(m_FreeSuballocationsBySize.size() != freeSuballocationsToRegister)
5370  {
5371  return false;
5372  }
5373 
5374  VkDeviceSize lastSize = 0;
5375  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
5376  {
5377  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
5378 
5379  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
5380  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5381  {
5382  return false;
5383  }
5384  // They must be sorted by size ascending.
5385  if(suballocItem->size < lastSize)
5386  {
5387  return false;
5388  }
5389 
5390  lastSize = suballocItem->size;
5391  }
5392 
5393  // Check if totals match calculacted values.
5394  if(!ValidateFreeSuballocationList() ||
5395  (calculatedOffset != m_Size) ||
5396  (calculatedSumFreeSize != m_SumFreeSize) ||
5397  (calculatedFreeCount != m_FreeCount))
5398  {
5399  return false;
5400  }
5401 
5402  return true;
5403 }
5404 
5405 VkDeviceSize VmaBlockMetadata::GetUnusedRangeSizeMax() const
5406 {
5407  if(!m_FreeSuballocationsBySize.empty())
5408  {
5409  return m_FreeSuballocationsBySize.back()->size;
5410  }
5411  else
5412  {
5413  return 0;
5414  }
5415 }
5416 
5417 bool VmaBlockMetadata::IsEmpty() const
5418 {
5419  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
5420 }
5421 
5422 void VmaBlockMetadata::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
5423 {
5424  outInfo.blockCount = 1;
5425 
5426  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5427  outInfo.allocationCount = rangeCount - m_FreeCount;
5428  outInfo.unusedRangeCount = m_FreeCount;
5429 
5430  outInfo.unusedBytes = m_SumFreeSize;
5431  outInfo.usedBytes = m_Size - outInfo.unusedBytes;
5432 
5433  outInfo.allocationSizeMin = UINT64_MAX;
5434  outInfo.allocationSizeMax = 0;
5435  outInfo.unusedRangeSizeMin = UINT64_MAX;
5436  outInfo.unusedRangeSizeMax = 0;
5437 
5438  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5439  suballocItem != m_Suballocations.cend();
5440  ++suballocItem)
5441  {
5442  const VmaSuballocation& suballoc = *suballocItem;
5443  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
5444  {
5445  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
5446  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
5447  }
5448  else
5449  {
5450  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
5451  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
5452  }
5453  }
5454 }
5455 
5456 void VmaBlockMetadata::AddPoolStats(VmaPoolStats& inoutStats) const
5457 {
5458  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
5459 
5460  inoutStats.size += m_Size;
5461  inoutStats.unusedSize += m_SumFreeSize;
5462  inoutStats.allocationCount += rangeCount - m_FreeCount;
5463  inoutStats.unusedRangeCount += m_FreeCount;
5464  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
5465 }
5466 
5467 #if VMA_STATS_STRING_ENABLED
5468 
5469 void VmaBlockMetadata::PrintDetailedMap(class VmaJsonWriter& json) const
5470 {
5471  json.BeginObject();
5472 
5473  json.WriteString("TotalBytes");
5474  json.WriteNumber(m_Size);
5475 
5476  json.WriteString("UnusedBytes");
5477  json.WriteNumber(m_SumFreeSize);
5478 
5479  json.WriteString("Allocations");
5480  json.WriteNumber((uint64_t)m_Suballocations.size() - m_FreeCount);
5481 
5482  json.WriteString("UnusedRanges");
5483  json.WriteNumber(m_FreeCount);
5484 
5485  json.WriteString("Suballocations");
5486  json.BeginArray();
5487  size_t i = 0;
5488  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
5489  suballocItem != m_Suballocations.cend();
5490  ++suballocItem, ++i)
5491  {
5492  json.BeginObject(true);
5493 
5494  json.WriteString("Type");
5495  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[suballocItem->type]);
5496 
5497  json.WriteString("Size");
5498  json.WriteNumber(suballocItem->size);
5499 
5500  json.WriteString("Offset");
5501  json.WriteNumber(suballocItem->offset);
5502 
5503  if(suballocItem->type != VMA_SUBALLOCATION_TYPE_FREE)
5504  {
5505  const void* pUserData = suballocItem->hAllocation->GetUserData();
5506  if(pUserData != VMA_NULL)
5507  {
5508  json.WriteString("UserData");
5509  if(suballocItem->hAllocation->IsUserDataString())
5510  {
5511  json.WriteString((const char*)pUserData);
5512  }
5513  else
5514  {
5515  json.BeginString();
5516  json.ContinueString_Pointer(pUserData);
5517  json.EndString();
5518  }
5519  }
5520  }
5521 
5522  json.EndObject();
5523  }
5524  json.EndArray();
5525 
5526  json.EndObject();
5527 }
5528 
5529 #endif // #if VMA_STATS_STRING_ENABLED
5530 
5531 /*
5532 How many suitable free suballocations to analyze before choosing best one.
5533 - Set to 1 to use First-Fit algorithm - first suitable free suballocation will
5534  be chosen.
5535 - Set to UINT32_MAX to use Best-Fit/Worst-Fit algorithm - all suitable free
5536  suballocations will be analized and best one will be chosen.
5537 - Any other value is also acceptable.
5538 */
5539 //static const uint32_t MAX_SUITABLE_SUBALLOCATIONS_TO_CHECK = 8;
5540 
5541 void VmaBlockMetadata::CreateFirstAllocationRequest(VmaAllocationRequest* pAllocationRequest)
5542 {
5543  VMA_ASSERT(IsEmpty());
5544  pAllocationRequest->offset = 0;
5545  pAllocationRequest->sumFreeSize = m_SumFreeSize;
5546  pAllocationRequest->sumItemSize = 0;
5547  pAllocationRequest->item = m_Suballocations.begin();
5548  pAllocationRequest->itemsToMakeLostCount = 0;
5549 }
5550 
5551 bool VmaBlockMetadata::CreateAllocationRequest(
5552  uint32_t currentFrameIndex,
5553  uint32_t frameInUseCount,
5554  VkDeviceSize bufferImageGranularity,
5555  VkDeviceSize allocSize,
5556  VkDeviceSize allocAlignment,
5557  VmaSuballocationType allocType,
5558  bool canMakeOtherLost,
5559  VmaAllocationRequest* pAllocationRequest)
5560 {
5561  VMA_ASSERT(allocSize > 0);
5562  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5563  VMA_ASSERT(pAllocationRequest != VMA_NULL);
5564  VMA_HEAVY_ASSERT(Validate());
5565 
5566  // There is not enough total free space in this block to fullfill the request: Early return.
5567  if(canMakeOtherLost == false && m_SumFreeSize < allocSize)
5568  {
5569  return false;
5570  }
5571 
5572  // New algorithm, efficiently searching freeSuballocationsBySize.
5573  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
5574  if(freeSuballocCount > 0)
5575  {
5576  if(VMA_BEST_FIT)
5577  {
5578  // Find first free suballocation with size not less than allocSize.
5579  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
5580  m_FreeSuballocationsBySize.data(),
5581  m_FreeSuballocationsBySize.data() + freeSuballocCount,
5582  allocSize,
5583  VmaSuballocationItemSizeLess());
5584  size_t index = it - m_FreeSuballocationsBySize.data();
5585  for(; index < freeSuballocCount; ++index)
5586  {
5587  if(CheckAllocation(
5588  currentFrameIndex,
5589  frameInUseCount,
5590  bufferImageGranularity,
5591  allocSize,
5592  allocAlignment,
5593  allocType,
5594  m_FreeSuballocationsBySize[index],
5595  false, // canMakeOtherLost
5596  &pAllocationRequest->offset,
5597  &pAllocationRequest->itemsToMakeLostCount,
5598  &pAllocationRequest->sumFreeSize,
5599  &pAllocationRequest->sumItemSize))
5600  {
5601  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5602  return true;
5603  }
5604  }
5605  }
5606  else
5607  {
5608  // Search staring from biggest suballocations.
5609  for(size_t index = freeSuballocCount; index--; )
5610  {
5611  if(CheckAllocation(
5612  currentFrameIndex,
5613  frameInUseCount,
5614  bufferImageGranularity,
5615  allocSize,
5616  allocAlignment,
5617  allocType,
5618  m_FreeSuballocationsBySize[index],
5619  false, // canMakeOtherLost
5620  &pAllocationRequest->offset,
5621  &pAllocationRequest->itemsToMakeLostCount,
5622  &pAllocationRequest->sumFreeSize,
5623  &pAllocationRequest->sumItemSize))
5624  {
5625  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
5626  return true;
5627  }
5628  }
5629  }
5630  }
5631 
5632  if(canMakeOtherLost)
5633  {
5634  // Brute-force algorithm. TODO: Come up with something better.
5635 
5636  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
5637  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
5638 
5639  VmaAllocationRequest tmpAllocRequest = {};
5640  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
5641  suballocIt != m_Suballocations.end();
5642  ++suballocIt)
5643  {
5644  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
5645  suballocIt->hAllocation->CanBecomeLost())
5646  {
5647  if(CheckAllocation(
5648  currentFrameIndex,
5649  frameInUseCount,
5650  bufferImageGranularity,
5651  allocSize,
5652  allocAlignment,
5653  allocType,
5654  suballocIt,
5655  canMakeOtherLost,
5656  &tmpAllocRequest.offset,
5657  &tmpAllocRequest.itemsToMakeLostCount,
5658  &tmpAllocRequest.sumFreeSize,
5659  &tmpAllocRequest.sumItemSize))
5660  {
5661  tmpAllocRequest.item = suballocIt;
5662 
5663  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
5664  {
5665  *pAllocationRequest = tmpAllocRequest;
5666  }
5667  }
5668  }
5669  }
5670 
5671  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
5672  {
5673  return true;
5674  }
5675  }
5676 
5677  return false;
5678 }
5679 
5680 bool VmaBlockMetadata::MakeRequestedAllocationsLost(
5681  uint32_t currentFrameIndex,
5682  uint32_t frameInUseCount,
5683  VmaAllocationRequest* pAllocationRequest)
5684 {
5685  while(pAllocationRequest->itemsToMakeLostCount > 0)
5686  {
5687  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
5688  {
5689  ++pAllocationRequest->item;
5690  }
5691  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5692  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
5693  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
5694  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5695  {
5696  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
5697  --pAllocationRequest->itemsToMakeLostCount;
5698  }
5699  else
5700  {
5701  return false;
5702  }
5703  }
5704 
5705  VMA_HEAVY_ASSERT(Validate());
5706  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
5707  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
5708 
5709  return true;
5710 }
5711 
5712 uint32_t VmaBlockMetadata::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
5713 {
5714  uint32_t lostAllocationCount = 0;
5715  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
5716  it != m_Suballocations.end();
5717  ++it)
5718  {
5719  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
5720  it->hAllocation->CanBecomeLost() &&
5721  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
5722  {
5723  it = FreeSuballocation(it);
5724  ++lostAllocationCount;
5725  }
5726  }
5727  return lostAllocationCount;
5728 }
5729 
5730 void VmaBlockMetadata::Alloc(
5731  const VmaAllocationRequest& request,
5732  VmaSuballocationType type,
5733  VkDeviceSize allocSize,
5734  VmaAllocation hAllocation)
5735 {
5736  VMA_ASSERT(request.item != m_Suballocations.end());
5737  VmaSuballocation& suballoc = *request.item;
5738  // Given suballocation is a free block.
5739  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
5740  // Given offset is inside this suballocation.
5741  VMA_ASSERT(request.offset >= suballoc.offset);
5742  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
5743  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
5744  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
5745 
5746  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
5747  // it to become used.
5748  UnregisterFreeSuballocation(request.item);
5749 
5750  suballoc.offset = request.offset;
5751  suballoc.size = allocSize;
5752  suballoc.type = type;
5753  suballoc.hAllocation = hAllocation;
5754 
5755  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
5756  if(paddingEnd)
5757  {
5758  VmaSuballocation paddingSuballoc = {};
5759  paddingSuballoc.offset = request.offset + allocSize;
5760  paddingSuballoc.size = paddingEnd;
5761  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5762  VmaSuballocationList::iterator next = request.item;
5763  ++next;
5764  const VmaSuballocationList::iterator paddingEndItem =
5765  m_Suballocations.insert(next, paddingSuballoc);
5766  RegisterFreeSuballocation(paddingEndItem);
5767  }
5768 
5769  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
5770  if(paddingBegin)
5771  {
5772  VmaSuballocation paddingSuballoc = {};
5773  paddingSuballoc.offset = request.offset - paddingBegin;
5774  paddingSuballoc.size = paddingBegin;
5775  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
5776  const VmaSuballocationList::iterator paddingBeginItem =
5777  m_Suballocations.insert(request.item, paddingSuballoc);
5778  RegisterFreeSuballocation(paddingBeginItem);
5779  }
5780 
5781  // Update totals.
5782  m_FreeCount = m_FreeCount - 1;
5783  if(paddingBegin > 0)
5784  {
5785  ++m_FreeCount;
5786  }
5787  if(paddingEnd > 0)
5788  {
5789  ++m_FreeCount;
5790  }
5791  m_SumFreeSize -= allocSize;
5792 }
5793 
5794 void VmaBlockMetadata::Free(const VmaAllocation allocation)
5795 {
5796  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5797  suballocItem != m_Suballocations.end();
5798  ++suballocItem)
5799  {
5800  VmaSuballocation& suballoc = *suballocItem;
5801  if(suballoc.hAllocation == allocation)
5802  {
5803  FreeSuballocation(suballocItem);
5804  VMA_HEAVY_ASSERT(Validate());
5805  return;
5806  }
5807  }
5808  VMA_ASSERT(0 && "Not found!");
5809 }
5810 
5811 void VmaBlockMetadata::FreeAtOffset(VkDeviceSize offset)
5812 {
5813  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
5814  suballocItem != m_Suballocations.end();
5815  ++suballocItem)
5816  {
5817  VmaSuballocation& suballoc = *suballocItem;
5818  if(suballoc.offset == offset)
5819  {
5820  FreeSuballocation(suballocItem);
5821  return;
5822  }
5823  }
5824  VMA_ASSERT(0 && "Not found!");
5825 }
5826 
5827 bool VmaBlockMetadata::ValidateFreeSuballocationList() const
5828 {
5829  VkDeviceSize lastSize = 0;
5830  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
5831  {
5832  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
5833 
5834  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
5835  {
5836  VMA_ASSERT(0);
5837  return false;
5838  }
5839  if(it->size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
5840  {
5841  VMA_ASSERT(0);
5842  return false;
5843  }
5844  if(it->size < lastSize)
5845  {
5846  VMA_ASSERT(0);
5847  return false;
5848  }
5849 
5850  lastSize = it->size;
5851  }
5852  return true;
5853 }
5854 
5855 bool VmaBlockMetadata::CheckAllocation(
5856  uint32_t currentFrameIndex,
5857  uint32_t frameInUseCount,
5858  VkDeviceSize bufferImageGranularity,
5859  VkDeviceSize allocSize,
5860  VkDeviceSize allocAlignment,
5861  VmaSuballocationType allocType,
5862  VmaSuballocationList::const_iterator suballocItem,
5863  bool canMakeOtherLost,
5864  VkDeviceSize* pOffset,
5865  size_t* itemsToMakeLostCount,
5866  VkDeviceSize* pSumFreeSize,
5867  VkDeviceSize* pSumItemSize) const
5868 {
5869  VMA_ASSERT(allocSize > 0);
5870  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
5871  VMA_ASSERT(suballocItem != m_Suballocations.cend());
5872  VMA_ASSERT(pOffset != VMA_NULL);
5873 
5874  *itemsToMakeLostCount = 0;
5875  *pSumFreeSize = 0;
5876  *pSumItemSize = 0;
5877 
5878  if(canMakeOtherLost)
5879  {
5880  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5881  {
5882  *pSumFreeSize = suballocItem->size;
5883  }
5884  else
5885  {
5886  if(suballocItem->hAllocation->CanBecomeLost() &&
5887  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5888  {
5889  ++*itemsToMakeLostCount;
5890  *pSumItemSize = suballocItem->size;
5891  }
5892  else
5893  {
5894  return false;
5895  }
5896  }
5897 
5898  // Remaining size is too small for this request: Early return.
5899  if(m_Size - suballocItem->offset < allocSize)
5900  {
5901  return false;
5902  }
5903 
5904  // Start from offset equal to beginning of this suballocation.
5905  *pOffset = suballocItem->offset;
5906 
5907  // Apply VMA_DEBUG_MARGIN at the beginning.
5908  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
5909  {
5910  *pOffset += VMA_DEBUG_MARGIN;
5911  }
5912 
5913  // Apply alignment.
5914  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
5915  *pOffset = VmaAlignUp(*pOffset, alignment);
5916 
5917  // Check previous suballocations for BufferImageGranularity conflicts.
5918  // Make bigger alignment if necessary.
5919  if(bufferImageGranularity > 1)
5920  {
5921  bool bufferImageGranularityConflict = false;
5922  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
5923  while(prevSuballocItem != m_Suballocations.cbegin())
5924  {
5925  --prevSuballocItem;
5926  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
5927  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
5928  {
5929  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
5930  {
5931  bufferImageGranularityConflict = true;
5932  break;
5933  }
5934  }
5935  else
5936  // Already on previous page.
5937  break;
5938  }
5939  if(bufferImageGranularityConflict)
5940  {
5941  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
5942  }
5943  }
5944 
5945  // Now that we have final *pOffset, check if we are past suballocItem.
5946  // If yes, return false - this function should be called for another suballocItem as starting point.
5947  if(*pOffset >= suballocItem->offset + suballocItem->size)
5948  {
5949  return false;
5950  }
5951 
5952  // Calculate padding at the beginning based on current offset.
5953  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
5954 
5955  // Calculate required margin at the end if this is not last suballocation.
5956  VmaSuballocationList::const_iterator next = suballocItem;
5957  ++next;
5958  const VkDeviceSize requiredEndMargin =
5959  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
5960 
5961  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
5962  // Another early return check.
5963  if(suballocItem->offset + totalSize > m_Size)
5964  {
5965  return false;
5966  }
5967 
5968  // Advance lastSuballocItem until desired size is reached.
5969  // Update itemsToMakeLostCount.
5970  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
5971  if(totalSize > suballocItem->size)
5972  {
5973  VkDeviceSize remainingSize = totalSize - suballocItem->size;
5974  while(remainingSize > 0)
5975  {
5976  ++lastSuballocItem;
5977  if(lastSuballocItem == m_Suballocations.cend())
5978  {
5979  return false;
5980  }
5981  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
5982  {
5983  *pSumFreeSize += lastSuballocItem->size;
5984  }
5985  else
5986  {
5987  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
5988  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
5989  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
5990  {
5991  ++*itemsToMakeLostCount;
5992  *pSumItemSize += lastSuballocItem->size;
5993  }
5994  else
5995  {
5996  return false;
5997  }
5998  }
5999  remainingSize = (lastSuballocItem->size < remainingSize) ?
6000  remainingSize - lastSuballocItem->size : 0;
6001  }
6002  }
6003 
6004  // Check next suballocations for BufferImageGranularity conflicts.
6005  // If conflict exists, we must mark more allocations lost or fail.
6006  if(bufferImageGranularity > 1)
6007  {
6008  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
6009  ++nextSuballocItem;
6010  while(nextSuballocItem != m_Suballocations.cend())
6011  {
6012  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6013  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6014  {
6015  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6016  {
6017  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
6018  if(nextSuballoc.hAllocation->CanBecomeLost() &&
6019  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
6020  {
6021  ++*itemsToMakeLostCount;
6022  }
6023  else
6024  {
6025  return false;
6026  }
6027  }
6028  }
6029  else
6030  {
6031  // Already on next page.
6032  break;
6033  }
6034  ++nextSuballocItem;
6035  }
6036  }
6037  }
6038  else
6039  {
6040  const VmaSuballocation& suballoc = *suballocItem;
6041  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
6042 
6043  *pSumFreeSize = suballoc.size;
6044 
6045  // Size of this suballocation is too small for this request: Early return.
6046  if(suballoc.size < allocSize)
6047  {
6048  return false;
6049  }
6050 
6051  // Start from offset equal to beginning of this suballocation.
6052  *pOffset = suballoc.offset;
6053 
6054  // Apply VMA_DEBUG_MARGIN at the beginning.
6055  if((VMA_DEBUG_MARGIN > 0) && suballocItem != m_Suballocations.cbegin())
6056  {
6057  *pOffset += VMA_DEBUG_MARGIN;
6058  }
6059 
6060  // Apply alignment.
6061  const VkDeviceSize alignment = VMA_MAX(allocAlignment, static_cast<VkDeviceSize>(VMA_DEBUG_ALIGNMENT));
6062  *pOffset = VmaAlignUp(*pOffset, alignment);
6063 
6064  // Check previous suballocations for BufferImageGranularity conflicts.
6065  // Make bigger alignment if necessary.
6066  if(bufferImageGranularity > 1)
6067  {
6068  bool bufferImageGranularityConflict = false;
6069  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
6070  while(prevSuballocItem != m_Suballocations.cbegin())
6071  {
6072  --prevSuballocItem;
6073  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
6074  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
6075  {
6076  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
6077  {
6078  bufferImageGranularityConflict = true;
6079  break;
6080  }
6081  }
6082  else
6083  // Already on previous page.
6084  break;
6085  }
6086  if(bufferImageGranularityConflict)
6087  {
6088  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
6089  }
6090  }
6091 
6092  // Calculate padding at the beginning based on current offset.
6093  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
6094 
6095  // Calculate required margin at the end if this is not last suballocation.
6096  VmaSuballocationList::const_iterator next = suballocItem;
6097  ++next;
6098  const VkDeviceSize requiredEndMargin =
6099  (next != m_Suballocations.cend()) ? VMA_DEBUG_MARGIN : 0;
6100 
6101  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
6102  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
6103  {
6104  return false;
6105  }
6106 
6107  // Check next suballocations for BufferImageGranularity conflicts.
6108  // If conflict exists, allocation cannot be made here.
6109  if(bufferImageGranularity > 1)
6110  {
6111  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
6112  ++nextSuballocItem;
6113  while(nextSuballocItem != m_Suballocations.cend())
6114  {
6115  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
6116  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
6117  {
6118  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
6119  {
6120  return false;
6121  }
6122  }
6123  else
6124  {
6125  // Already on next page.
6126  break;
6127  }
6128  ++nextSuballocItem;
6129  }
6130  }
6131  }
6132 
6133  // All tests passed: Success. pOffset is already filled.
6134  return true;
6135 }
6136 
6137 void VmaBlockMetadata::MergeFreeWithNext(VmaSuballocationList::iterator item)
6138 {
6139  VMA_ASSERT(item != m_Suballocations.end());
6140  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6141 
6142  VmaSuballocationList::iterator nextItem = item;
6143  ++nextItem;
6144  VMA_ASSERT(nextItem != m_Suballocations.end());
6145  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
6146 
6147  item->size += nextItem->size;
6148  --m_FreeCount;
6149  m_Suballocations.erase(nextItem);
6150 }
6151 
6152 VmaSuballocationList::iterator VmaBlockMetadata::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
6153 {
6154  // Change this suballocation to be marked as free.
6155  VmaSuballocation& suballoc = *suballocItem;
6156  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
6157  suballoc.hAllocation = VK_NULL_HANDLE;
6158 
6159  // Update totals.
6160  ++m_FreeCount;
6161  m_SumFreeSize += suballoc.size;
6162 
6163  // Merge with previous and/or next suballocation if it's also free.
6164  bool mergeWithNext = false;
6165  bool mergeWithPrev = false;
6166 
6167  VmaSuballocationList::iterator nextItem = suballocItem;
6168  ++nextItem;
6169  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
6170  {
6171  mergeWithNext = true;
6172  }
6173 
6174  VmaSuballocationList::iterator prevItem = suballocItem;
6175  if(suballocItem != m_Suballocations.begin())
6176  {
6177  --prevItem;
6178  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
6179  {
6180  mergeWithPrev = true;
6181  }
6182  }
6183 
6184  if(mergeWithNext)
6185  {
6186  UnregisterFreeSuballocation(nextItem);
6187  MergeFreeWithNext(suballocItem);
6188  }
6189 
6190  if(mergeWithPrev)
6191  {
6192  UnregisterFreeSuballocation(prevItem);
6193  MergeFreeWithNext(prevItem);
6194  RegisterFreeSuballocation(prevItem);
6195  return prevItem;
6196  }
6197  else
6198  {
6199  RegisterFreeSuballocation(suballocItem);
6200  return suballocItem;
6201  }
6202 }
6203 
6204 void VmaBlockMetadata::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
6205 {
6206  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6207  VMA_ASSERT(item->size > 0);
6208 
6209  // You may want to enable this validation at the beginning or at the end of
6210  // this function, depending on what do you want to check.
6211  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6212 
6213  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6214  {
6215  if(m_FreeSuballocationsBySize.empty())
6216  {
6217  m_FreeSuballocationsBySize.push_back(item);
6218  }
6219  else
6220  {
6221  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
6222  }
6223  }
6224 
6225  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6226 }
6227 
6228 
6229 void VmaBlockMetadata::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
6230 {
6231  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
6232  VMA_ASSERT(item->size > 0);
6233 
6234  // You may want to enable this validation at the beginning or at the end of
6235  // this function, depending on what do you want to check.
6236  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6237 
6238  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6239  {
6240  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
6241  m_FreeSuballocationsBySize.data(),
6242  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
6243  item,
6244  VmaSuballocationItemSizeLess());
6245  for(size_t index = it - m_FreeSuballocationsBySize.data();
6246  index < m_FreeSuballocationsBySize.size();
6247  ++index)
6248  {
6249  if(m_FreeSuballocationsBySize[index] == item)
6250  {
6251  VmaVectorRemove(m_FreeSuballocationsBySize, index);
6252  return;
6253  }
6254  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
6255  }
6256  VMA_ASSERT(0 && "Not found.");
6257  }
6258 
6259  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
6260 }
6261 
6263 // class VmaDeviceMemoryBlock
6264 
6265 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
6266  m_Metadata(hAllocator),
6267  m_MemoryTypeIndex(UINT32_MAX),
6268  m_hMemory(VK_NULL_HANDLE),
6269  m_MapCount(0),
6270  m_pMappedData(VMA_NULL)
6271 {
6272 }
6273 
6274 void VmaDeviceMemoryBlock::Init(
6275  uint32_t newMemoryTypeIndex,
6276  VkDeviceMemory newMemory,
6277  VkDeviceSize newSize)
6278 {
6279  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6280 
6281  m_MemoryTypeIndex = newMemoryTypeIndex;
6282  m_hMemory = newMemory;
6283 
6284  m_Metadata.Init(newSize);
6285 }
6286 
6287 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
6288 {
6289  // This is the most important assert in the entire library.
6290  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
6291  VMA_ASSERT(m_Metadata.IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
6292 
6293  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
6294  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_Metadata.GetSize(), m_hMemory);
6295  m_hMemory = VK_NULL_HANDLE;
6296 }
6297 
6298 bool VmaDeviceMemoryBlock::Validate() const
6299 {
6300  if((m_hMemory == VK_NULL_HANDLE) ||
6301  (m_Metadata.GetSize() == 0))
6302  {
6303  return false;
6304  }
6305 
6306  return m_Metadata.Validate();
6307 }
6308 
6309 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
6310 {
6311  if(count == 0)
6312  {
6313  return VK_SUCCESS;
6314  }
6315 
6316  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6317  if(m_MapCount != 0)
6318  {
6319  m_MapCount += count;
6320  VMA_ASSERT(m_pMappedData != VMA_NULL);
6321  if(ppData != VMA_NULL)
6322  {
6323  *ppData = m_pMappedData;
6324  }
6325  return VK_SUCCESS;
6326  }
6327  else
6328  {
6329  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
6330  hAllocator->m_hDevice,
6331  m_hMemory,
6332  0, // offset
6333  VK_WHOLE_SIZE,
6334  0, // flags
6335  &m_pMappedData);
6336  if(result == VK_SUCCESS)
6337  {
6338  if(ppData != VMA_NULL)
6339  {
6340  *ppData = m_pMappedData;
6341  }
6342  m_MapCount = count;
6343  }
6344  return result;
6345  }
6346 }
6347 
6348 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
6349 {
6350  if(count == 0)
6351  {
6352  return;
6353  }
6354 
6355  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6356  if(m_MapCount >= count)
6357  {
6358  m_MapCount -= count;
6359  if(m_MapCount == 0)
6360  {
6361  m_pMappedData = VMA_NULL;
6362  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
6363  }
6364  }
6365  else
6366  {
6367  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
6368  }
6369 }
6370 
6371 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
6372  const VmaAllocator hAllocator,
6373  const VmaAllocation hAllocation,
6374  VkBuffer hBuffer)
6375 {
6376  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6377  hAllocation->GetBlock() == this);
6378  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6379  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6380  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
6381  hAllocator->m_hDevice,
6382  hBuffer,
6383  m_hMemory,
6384  hAllocation->GetOffset());
6385 }
6386 
6387 VkResult VmaDeviceMemoryBlock::BindImageMemory(
6388  const VmaAllocator hAllocator,
6389  const VmaAllocation hAllocation,
6390  VkImage hImage)
6391 {
6392  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
6393  hAllocation->GetBlock() == this);
6394  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
6395  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
6396  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
6397  hAllocator->m_hDevice,
6398  hImage,
6399  m_hMemory,
6400  hAllocation->GetOffset());
6401 }
6402 
6403 static void InitStatInfo(VmaStatInfo& outInfo)
6404 {
6405  memset(&outInfo, 0, sizeof(outInfo));
6406  outInfo.allocationSizeMin = UINT64_MAX;
6407  outInfo.unusedRangeSizeMin = UINT64_MAX;
6408 }
6409 
6410 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
6411 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
6412 {
6413  inoutInfo.blockCount += srcInfo.blockCount;
6414  inoutInfo.allocationCount += srcInfo.allocationCount;
6415  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
6416  inoutInfo.usedBytes += srcInfo.usedBytes;
6417  inoutInfo.unusedBytes += srcInfo.unusedBytes;
6418  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
6419  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
6420  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
6421  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
6422 }
6423 
6424 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
6425 {
6426  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
6427  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
6428  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
6429  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
6430 }
6431 
6432 VmaPool_T::VmaPool_T(
6433  VmaAllocator hAllocator,
6434  const VmaPoolCreateInfo& createInfo) :
6435  m_BlockVector(
6436  hAllocator,
6437  createInfo.memoryTypeIndex,
6438  createInfo.blockSize,
6439  createInfo.minBlockCount,
6440  createInfo.maxBlockCount,
6441  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
6442  createInfo.frameInUseCount,
6443  true) // isCustomPool
6444 {
6445 }
6446 
6447 VmaPool_T::~VmaPool_T()
6448 {
6449 }
6450 
6451 #if VMA_STATS_STRING_ENABLED
6452 
6453 #endif // #if VMA_STATS_STRING_ENABLED
6454 
6455 VmaBlockVector::VmaBlockVector(
6456  VmaAllocator hAllocator,
6457  uint32_t memoryTypeIndex,
6458  VkDeviceSize preferredBlockSize,
6459  size_t minBlockCount,
6460  size_t maxBlockCount,
6461  VkDeviceSize bufferImageGranularity,
6462  uint32_t frameInUseCount,
6463  bool isCustomPool) :
6464  m_hAllocator(hAllocator),
6465  m_MemoryTypeIndex(memoryTypeIndex),
6466  m_PreferredBlockSize(preferredBlockSize),
6467  m_MinBlockCount(minBlockCount),
6468  m_MaxBlockCount(maxBlockCount),
6469  m_BufferImageGranularity(bufferImageGranularity),
6470  m_FrameInUseCount(frameInUseCount),
6471  m_IsCustomPool(isCustomPool),
6472  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
6473  m_HasEmptyBlock(false),
6474  m_pDefragmentator(VMA_NULL)
6475 {
6476 }
6477 
6478 VmaBlockVector::~VmaBlockVector()
6479 {
6480  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
6481 
6482  for(size_t i = m_Blocks.size(); i--; )
6483  {
6484  m_Blocks[i]->Destroy(m_hAllocator);
6485  vma_delete(m_hAllocator, m_Blocks[i]);
6486  }
6487 }
6488 
6489 VkResult VmaBlockVector::CreateMinBlocks()
6490 {
6491  for(size_t i = 0; i < m_MinBlockCount; ++i)
6492  {
6493  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
6494  if(res != VK_SUCCESS)
6495  {
6496  return res;
6497  }
6498  }
6499  return VK_SUCCESS;
6500 }
6501 
6502 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
6503 {
6504  pStats->size = 0;
6505  pStats->unusedSize = 0;
6506  pStats->allocationCount = 0;
6507  pStats->unusedRangeCount = 0;
6508  pStats->unusedRangeSizeMax = 0;
6509 
6510  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6511 
6512  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6513  {
6514  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
6515  VMA_ASSERT(pBlock);
6516  VMA_HEAVY_ASSERT(pBlock->Validate());
6517  pBlock->m_Metadata.AddPoolStats(*pStats);
6518  }
6519 }
6520 
6521 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
6522 
6523 VkResult VmaBlockVector::Allocate(
6524  VmaPool hCurrentPool,
6525  uint32_t currentFrameIndex,
6526  const VkMemoryRequirements& vkMemReq,
6527  const VmaAllocationCreateInfo& createInfo,
6528  VmaSuballocationType suballocType,
6529  VmaAllocation* pAllocation)
6530 {
6531  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
6532  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
6533 
6534  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6535 
6536  // 1. Search existing allocations. Try to allocate without making other allocations lost.
6537  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6538  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6539  {
6540  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6541  VMA_ASSERT(pCurrBlock);
6542  VmaAllocationRequest currRequest = {};
6543  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6544  currentFrameIndex,
6545  m_FrameInUseCount,
6546  m_BufferImageGranularity,
6547  vkMemReq.size,
6548  vkMemReq.alignment,
6549  suballocType,
6550  false, // canMakeOtherLost
6551  &currRequest))
6552  {
6553  // Allocate from pCurrBlock.
6554  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
6555 
6556  if(mapped)
6557  {
6558  VkResult res = pCurrBlock->Map(m_hAllocator, 1, VMA_NULL);
6559  if(res != VK_SUCCESS)
6560  {
6561  return res;
6562  }
6563  }
6564 
6565  // We no longer have an empty Allocation.
6566  if(pCurrBlock->m_Metadata.IsEmpty())
6567  {
6568  m_HasEmptyBlock = false;
6569  }
6570 
6571  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6572  pCurrBlock->m_Metadata.Alloc(currRequest, suballocType, vkMemReq.size, *pAllocation);
6573  (*pAllocation)->InitBlockAllocation(
6574  hCurrentPool,
6575  pCurrBlock,
6576  currRequest.offset,
6577  vkMemReq.alignment,
6578  vkMemReq.size,
6579  suballocType,
6580  mapped,
6581  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6582  VMA_HEAVY_ASSERT(pCurrBlock->Validate());
6583  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6584  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6585  return VK_SUCCESS;
6586  }
6587  }
6588 
6589  const bool canCreateNewBlock =
6590  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
6591  (m_Blocks.size() < m_MaxBlockCount);
6592 
6593  // 2. Try to create new block.
6594  if(canCreateNewBlock)
6595  {
6596  // Calculate optimal size for new block.
6597  VkDeviceSize newBlockSize = m_PreferredBlockSize;
6598  uint32_t newBlockSizeShift = 0;
6599  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
6600 
6601  // Allocating blocks of other sizes is allowed only in default pools.
6602  // In custom pools block size is fixed.
6603  if(m_IsCustomPool == false)
6604  {
6605  // Allocate 1/8, 1/4, 1/2 as first blocks.
6606  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
6607  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
6608  {
6609  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6610  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= vkMemReq.size * 2)
6611  {
6612  newBlockSize = smallerNewBlockSize;
6613  ++newBlockSizeShift;
6614  }
6615  else
6616  {
6617  break;
6618  }
6619  }
6620  }
6621 
6622  size_t newBlockIndex = 0;
6623  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
6624  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
6625  if(m_IsCustomPool == false)
6626  {
6627  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
6628  {
6629  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
6630  if(smallerNewBlockSize >= vkMemReq.size)
6631  {
6632  newBlockSize = smallerNewBlockSize;
6633  ++newBlockSizeShift;
6634  res = CreateBlock(newBlockSize, &newBlockIndex);
6635  }
6636  else
6637  {
6638  break;
6639  }
6640  }
6641  }
6642 
6643  if(res == VK_SUCCESS)
6644  {
6645  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
6646  VMA_ASSERT(pBlock->m_Metadata.GetSize() >= vkMemReq.size);
6647 
6648  if(mapped)
6649  {
6650  res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
6651  if(res != VK_SUCCESS)
6652  {
6653  return res;
6654  }
6655  }
6656 
6657  // Allocate from pBlock. Because it is empty, dstAllocRequest can be trivially filled.
6658  VmaAllocationRequest allocRequest;
6659  pBlock->m_Metadata.CreateFirstAllocationRequest(&allocRequest);
6660  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6661  pBlock->m_Metadata.Alloc(allocRequest, suballocType, vkMemReq.size, *pAllocation);
6662  (*pAllocation)->InitBlockAllocation(
6663  hCurrentPool,
6664  pBlock,
6665  allocRequest.offset,
6666  vkMemReq.alignment,
6667  vkMemReq.size,
6668  suballocType,
6669  mapped,
6670  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6671  VMA_HEAVY_ASSERT(pBlock->Validate());
6672  VMA_DEBUG_LOG(" Created new allocation Size=%llu", allocInfo.allocationSize);
6673  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6674  return VK_SUCCESS;
6675  }
6676  }
6677 
6678  const bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
6679 
6680  // 3. Try to allocate from existing blocks with making other allocations lost.
6681  if(canMakeOtherLost)
6682  {
6683  uint32_t tryIndex = 0;
6684  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
6685  {
6686  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
6687  VmaAllocationRequest bestRequest = {};
6688  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
6689 
6690  // 1. Search existing allocations.
6691  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
6692  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
6693  {
6694  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
6695  VMA_ASSERT(pCurrBlock);
6696  VmaAllocationRequest currRequest = {};
6697  if(pCurrBlock->m_Metadata.CreateAllocationRequest(
6698  currentFrameIndex,
6699  m_FrameInUseCount,
6700  m_BufferImageGranularity,
6701  vkMemReq.size,
6702  vkMemReq.alignment,
6703  suballocType,
6704  canMakeOtherLost,
6705  &currRequest))
6706  {
6707  const VkDeviceSize currRequestCost = currRequest.CalcCost();
6708  if(pBestRequestBlock == VMA_NULL ||
6709  currRequestCost < bestRequestCost)
6710  {
6711  pBestRequestBlock = pCurrBlock;
6712  bestRequest = currRequest;
6713  bestRequestCost = currRequestCost;
6714 
6715  if(bestRequestCost == 0)
6716  {
6717  break;
6718  }
6719  }
6720  }
6721  }
6722 
6723  if(pBestRequestBlock != VMA_NULL)
6724  {
6725  if(mapped)
6726  {
6727  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
6728  if(res != VK_SUCCESS)
6729  {
6730  return res;
6731  }
6732  }
6733 
6734  if(pBestRequestBlock->m_Metadata.MakeRequestedAllocationsLost(
6735  currentFrameIndex,
6736  m_FrameInUseCount,
6737  &bestRequest))
6738  {
6739  // We no longer have an empty Allocation.
6740  if(pBestRequestBlock->m_Metadata.IsEmpty())
6741  {
6742  m_HasEmptyBlock = false;
6743  }
6744  // Allocate from this pBlock.
6745  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
6746  pBestRequestBlock->m_Metadata.Alloc(bestRequest, suballocType, vkMemReq.size, *pAllocation);
6747  (*pAllocation)->InitBlockAllocation(
6748  hCurrentPool,
6749  pBestRequestBlock,
6750  bestRequest.offset,
6751  vkMemReq.alignment,
6752  vkMemReq.size,
6753  suballocType,
6754  mapped,
6755  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
6756  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
6757  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
6758  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
6759  return VK_SUCCESS;
6760  }
6761  // else: Some allocations must have been touched while we are here. Next try.
6762  }
6763  else
6764  {
6765  // Could not find place in any of the blocks - break outer loop.
6766  break;
6767  }
6768  }
6769  /* Maximum number of tries exceeded - a very unlike event when many other
6770  threads are simultaneously touching allocations making it impossible to make
6771  lost at the same time as we try to allocate. */
6772  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
6773  {
6774  return VK_ERROR_TOO_MANY_OBJECTS;
6775  }
6776  }
6777 
6778  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
6779 }
6780 
6781 void VmaBlockVector::Free(
6782  VmaAllocation hAllocation)
6783 {
6784  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
6785 
6786  // Scope for lock.
6787  {
6788  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6789 
6790  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
6791 
6792  if(hAllocation->IsPersistentMap())
6793  {
6794  pBlock->Unmap(m_hAllocator, 1);
6795  }
6796 
6797  pBlock->m_Metadata.Free(hAllocation);
6798  VMA_HEAVY_ASSERT(pBlock->Validate());
6799 
6800  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
6801 
6802  // pBlock became empty after this deallocation.
6803  if(pBlock->m_Metadata.IsEmpty())
6804  {
6805  // Already has empty Allocation. We don't want to have two, so delete this one.
6806  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
6807  {
6808  pBlockToDelete = pBlock;
6809  Remove(pBlock);
6810  }
6811  // We now have first empty Allocation.
6812  else
6813  {
6814  m_HasEmptyBlock = true;
6815  }
6816  }
6817  // pBlock didn't become empty, but we have another empty block - find and free that one.
6818  // (This is optional, heuristics.)
6819  else if(m_HasEmptyBlock)
6820  {
6821  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
6822  if(pLastBlock->m_Metadata.IsEmpty() && m_Blocks.size() > m_MinBlockCount)
6823  {
6824  pBlockToDelete = pLastBlock;
6825  m_Blocks.pop_back();
6826  m_HasEmptyBlock = false;
6827  }
6828  }
6829 
6830  IncrementallySortBlocks();
6831  }
6832 
6833  // Destruction of a free Allocation. Deferred until this point, outside of mutex
6834  // lock, for performance reason.
6835  if(pBlockToDelete != VMA_NULL)
6836  {
6837  VMA_DEBUG_LOG(" Deleted empty allocation");
6838  pBlockToDelete->Destroy(m_hAllocator);
6839  vma_delete(m_hAllocator, pBlockToDelete);
6840  }
6841 }
6842 
6843 size_t VmaBlockVector::CalcMaxBlockSize() const
6844 {
6845  size_t result = 0;
6846  for(size_t i = m_Blocks.size(); i--; )
6847  {
6848  result = VMA_MAX((uint64_t)result, (uint64_t)m_Blocks[i]->m_Metadata.GetSize());
6849  if(result >= m_PreferredBlockSize)
6850  {
6851  break;
6852  }
6853  }
6854  return result;
6855 }
6856 
6857 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
6858 {
6859  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
6860  {
6861  if(m_Blocks[blockIndex] == pBlock)
6862  {
6863  VmaVectorRemove(m_Blocks, blockIndex);
6864  return;
6865  }
6866  }
6867  VMA_ASSERT(0);
6868 }
6869 
6870 void VmaBlockVector::IncrementallySortBlocks()
6871 {
6872  // Bubble sort only until first swap.
6873  for(size_t i = 1; i < m_Blocks.size(); ++i)
6874  {
6875  if(m_Blocks[i - 1]->m_Metadata.GetSumFreeSize() > m_Blocks[i]->m_Metadata.GetSumFreeSize())
6876  {
6877  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
6878  return;
6879  }
6880  }
6881 }
6882 
6883 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
6884 {
6885  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
6886  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
6887  allocInfo.allocationSize = blockSize;
6888  VkDeviceMemory mem = VK_NULL_HANDLE;
6889  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
6890  if(res < 0)
6891  {
6892  return res;
6893  }
6894 
6895  // New VkDeviceMemory successfully created.
6896 
6897  // Create new Allocation for it.
6898  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
6899  pBlock->Init(
6900  m_MemoryTypeIndex,
6901  mem,
6902  allocInfo.allocationSize);
6903 
6904  m_Blocks.push_back(pBlock);
6905  if(pNewBlockIndex != VMA_NULL)
6906  {
6907  *pNewBlockIndex = m_Blocks.size() - 1;
6908  }
6909 
6910  return VK_SUCCESS;
6911 }
6912 
6913 #if VMA_STATS_STRING_ENABLED
6914 
6915 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
6916 {
6917  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6918 
6919  json.BeginObject();
6920 
6921  if(m_IsCustomPool)
6922  {
6923  json.WriteString("MemoryTypeIndex");
6924  json.WriteNumber(m_MemoryTypeIndex);
6925 
6926  json.WriteString("BlockSize");
6927  json.WriteNumber(m_PreferredBlockSize);
6928 
6929  json.WriteString("BlockCount");
6930  json.BeginObject(true);
6931  if(m_MinBlockCount > 0)
6932  {
6933  json.WriteString("Min");
6934  json.WriteNumber((uint64_t)m_MinBlockCount);
6935  }
6936  if(m_MaxBlockCount < SIZE_MAX)
6937  {
6938  json.WriteString("Max");
6939  json.WriteNumber((uint64_t)m_MaxBlockCount);
6940  }
6941  json.WriteString("Cur");
6942  json.WriteNumber((uint64_t)m_Blocks.size());
6943  json.EndObject();
6944 
6945  if(m_FrameInUseCount > 0)
6946  {
6947  json.WriteString("FrameInUseCount");
6948  json.WriteNumber(m_FrameInUseCount);
6949  }
6950  }
6951  else
6952  {
6953  json.WriteString("PreferredBlockSize");
6954  json.WriteNumber(m_PreferredBlockSize);
6955  }
6956 
6957  json.WriteString("Blocks");
6958  json.BeginArray();
6959  for(size_t i = 0; i < m_Blocks.size(); ++i)
6960  {
6961  m_Blocks[i]->m_Metadata.PrintDetailedMap(json);
6962  }
6963  json.EndArray();
6964 
6965  json.EndObject();
6966 }
6967 
6968 #endif // #if VMA_STATS_STRING_ENABLED
6969 
6970 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
6971  VmaAllocator hAllocator,
6972  uint32_t currentFrameIndex)
6973 {
6974  if(m_pDefragmentator == VMA_NULL)
6975  {
6976  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
6977  hAllocator,
6978  this,
6979  currentFrameIndex);
6980  }
6981 
6982  return m_pDefragmentator;
6983 }
6984 
6985 VkResult VmaBlockVector::Defragment(
6986  VmaDefragmentationStats* pDefragmentationStats,
6987  VkDeviceSize& maxBytesToMove,
6988  uint32_t& maxAllocationsToMove)
6989 {
6990  if(m_pDefragmentator == VMA_NULL)
6991  {
6992  return VK_SUCCESS;
6993  }
6994 
6995  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
6996 
6997  // Defragment.
6998  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
6999 
7000  // Accumulate statistics.
7001  if(pDefragmentationStats != VMA_NULL)
7002  {
7003  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
7004  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
7005  pDefragmentationStats->bytesMoved += bytesMoved;
7006  pDefragmentationStats->allocationsMoved += allocationsMoved;
7007  VMA_ASSERT(bytesMoved <= maxBytesToMove);
7008  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
7009  maxBytesToMove -= bytesMoved;
7010  maxAllocationsToMove -= allocationsMoved;
7011  }
7012 
7013  // Free empty blocks.
7014  m_HasEmptyBlock = false;
7015  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
7016  {
7017  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
7018  if(pBlock->m_Metadata.IsEmpty())
7019  {
7020  if(m_Blocks.size() > m_MinBlockCount)
7021  {
7022  if(pDefragmentationStats != VMA_NULL)
7023  {
7024  ++pDefragmentationStats->deviceMemoryBlocksFreed;
7025  pDefragmentationStats->bytesFreed += pBlock->m_Metadata.GetSize();
7026  }
7027 
7028  VmaVectorRemove(m_Blocks, blockIndex);
7029  pBlock->Destroy(m_hAllocator);
7030  vma_delete(m_hAllocator, pBlock);
7031  }
7032  else
7033  {
7034  m_HasEmptyBlock = true;
7035  }
7036  }
7037  }
7038 
7039  return result;
7040 }
7041 
7042 void VmaBlockVector::DestroyDefragmentator()
7043 {
7044  if(m_pDefragmentator != VMA_NULL)
7045  {
7046  vma_delete(m_hAllocator, m_pDefragmentator);
7047  m_pDefragmentator = VMA_NULL;
7048  }
7049 }
7050 
7051 void VmaBlockVector::MakePoolAllocationsLost(
7052  uint32_t currentFrameIndex,
7053  size_t* pLostAllocationCount)
7054 {
7055  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7056  size_t lostAllocationCount = 0;
7057  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7058  {
7059  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7060  VMA_ASSERT(pBlock);
7061  lostAllocationCount += pBlock->m_Metadata.MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
7062  }
7063  if(pLostAllocationCount != VMA_NULL)
7064  {
7065  *pLostAllocationCount = lostAllocationCount;
7066  }
7067 }
7068 
7069 void VmaBlockVector::AddStats(VmaStats* pStats)
7070 {
7071  const uint32_t memTypeIndex = m_MemoryTypeIndex;
7072  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
7073 
7074  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
7075 
7076  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
7077  {
7078  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
7079  VMA_ASSERT(pBlock);
7080  VMA_HEAVY_ASSERT(pBlock->Validate());
7081  VmaStatInfo allocationStatInfo;
7082  pBlock->m_Metadata.CalcAllocationStatInfo(allocationStatInfo);
7083  VmaAddStatInfo(pStats->total, allocationStatInfo);
7084  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7085  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7086  }
7087 }
7088 
7090 // VmaDefragmentator members definition
7091 
7092 VmaDefragmentator::VmaDefragmentator(
7093  VmaAllocator hAllocator,
7094  VmaBlockVector* pBlockVector,
7095  uint32_t currentFrameIndex) :
7096  m_hAllocator(hAllocator),
7097  m_pBlockVector(pBlockVector),
7098  m_CurrentFrameIndex(currentFrameIndex),
7099  m_BytesMoved(0),
7100  m_AllocationsMoved(0),
7101  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
7102  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
7103 {
7104 }
7105 
7106 VmaDefragmentator::~VmaDefragmentator()
7107 {
7108  for(size_t i = m_Blocks.size(); i--; )
7109  {
7110  vma_delete(m_hAllocator, m_Blocks[i]);
7111  }
7112 }
7113 
7114 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
7115 {
7116  AllocationInfo allocInfo;
7117  allocInfo.m_hAllocation = hAlloc;
7118  allocInfo.m_pChanged = pChanged;
7119  m_Allocations.push_back(allocInfo);
7120 }
7121 
7122 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
7123 {
7124  // It has already been mapped for defragmentation.
7125  if(m_pMappedDataForDefragmentation)
7126  {
7127  *ppMappedData = m_pMappedDataForDefragmentation;
7128  return VK_SUCCESS;
7129  }
7130 
7131  // It is originally mapped.
7132  if(m_pBlock->GetMappedData())
7133  {
7134  *ppMappedData = m_pBlock->GetMappedData();
7135  return VK_SUCCESS;
7136  }
7137 
7138  // Map on first usage.
7139  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
7140  *ppMappedData = m_pMappedDataForDefragmentation;
7141  return res;
7142 }
7143 
7144 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
7145 {
7146  if(m_pMappedDataForDefragmentation != VMA_NULL)
7147  {
7148  m_pBlock->Unmap(hAllocator, 1);
7149  }
7150 }
7151 
7152 VkResult VmaDefragmentator::DefragmentRound(
7153  VkDeviceSize maxBytesToMove,
7154  uint32_t maxAllocationsToMove)
7155 {
7156  if(m_Blocks.empty())
7157  {
7158  return VK_SUCCESS;
7159  }
7160 
7161  size_t srcBlockIndex = m_Blocks.size() - 1;
7162  size_t srcAllocIndex = SIZE_MAX;
7163  for(;;)
7164  {
7165  // 1. Find next allocation to move.
7166  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
7167  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
7168  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
7169  {
7170  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
7171  {
7172  // Finished: no more allocations to process.
7173  if(srcBlockIndex == 0)
7174  {
7175  return VK_SUCCESS;
7176  }
7177  else
7178  {
7179  --srcBlockIndex;
7180  srcAllocIndex = SIZE_MAX;
7181  }
7182  }
7183  else
7184  {
7185  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
7186  }
7187  }
7188 
7189  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
7190  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
7191 
7192  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
7193  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
7194  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
7195  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
7196 
7197  // 2. Try to find new place for this allocation in preceding or current block.
7198  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
7199  {
7200  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
7201  VmaAllocationRequest dstAllocRequest;
7202  if(pDstBlockInfo->m_pBlock->m_Metadata.CreateAllocationRequest(
7203  m_CurrentFrameIndex,
7204  m_pBlockVector->GetFrameInUseCount(),
7205  m_pBlockVector->GetBufferImageGranularity(),
7206  size,
7207  alignment,
7208  suballocType,
7209  false, // canMakeOtherLost
7210  &dstAllocRequest) &&
7211  MoveMakesSense(
7212  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
7213  {
7214  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
7215 
7216  // Reached limit on number of allocations or bytes to move.
7217  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
7218  (m_BytesMoved + size > maxBytesToMove))
7219  {
7220  return VK_INCOMPLETE;
7221  }
7222 
7223  void* pDstMappedData = VMA_NULL;
7224  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
7225  if(res != VK_SUCCESS)
7226  {
7227  return res;
7228  }
7229 
7230  void* pSrcMappedData = VMA_NULL;
7231  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
7232  if(res != VK_SUCCESS)
7233  {
7234  return res;
7235  }
7236 
7237  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
7238  memcpy(
7239  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
7240  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
7241  static_cast<size_t>(size));
7242 
7243  pDstBlockInfo->m_pBlock->m_Metadata.Alloc(dstAllocRequest, suballocType, size, allocInfo.m_hAllocation);
7244  pSrcBlockInfo->m_pBlock->m_Metadata.FreeAtOffset(srcOffset);
7245 
7246  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
7247 
7248  if(allocInfo.m_pChanged != VMA_NULL)
7249  {
7250  *allocInfo.m_pChanged = VK_TRUE;
7251  }
7252 
7253  ++m_AllocationsMoved;
7254  m_BytesMoved += size;
7255 
7256  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
7257 
7258  break;
7259  }
7260  }
7261 
7262  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
7263 
7264  if(srcAllocIndex > 0)
7265  {
7266  --srcAllocIndex;
7267  }
7268  else
7269  {
7270  if(srcBlockIndex > 0)
7271  {
7272  --srcBlockIndex;
7273  srcAllocIndex = SIZE_MAX;
7274  }
7275  else
7276  {
7277  return VK_SUCCESS;
7278  }
7279  }
7280  }
7281 }
7282 
7283 VkResult VmaDefragmentator::Defragment(
7284  VkDeviceSize maxBytesToMove,
7285  uint32_t maxAllocationsToMove)
7286 {
7287  if(m_Allocations.empty())
7288  {
7289  return VK_SUCCESS;
7290  }
7291 
7292  // Create block info for each block.
7293  const size_t blockCount = m_pBlockVector->m_Blocks.size();
7294  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7295  {
7296  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
7297  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
7298  m_Blocks.push_back(pBlockInfo);
7299  }
7300 
7301  // Sort them by m_pBlock pointer value.
7302  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
7303 
7304  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
7305  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
7306  {
7307  AllocationInfo& allocInfo = m_Allocations[blockIndex];
7308  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
7309  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7310  {
7311  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
7312  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
7313  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
7314  {
7315  (*it)->m_Allocations.push_back(allocInfo);
7316  }
7317  else
7318  {
7319  VMA_ASSERT(0);
7320  }
7321  }
7322  }
7323  m_Allocations.clear();
7324 
7325  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7326  {
7327  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
7328  pBlockInfo->CalcHasNonMovableAllocations();
7329  pBlockInfo->SortAllocationsBySizeDescecnding();
7330  }
7331 
7332  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
7333  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
7334 
7335  // Execute defragmentation rounds (the main part).
7336  VkResult result = VK_SUCCESS;
7337  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
7338  {
7339  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
7340  }
7341 
7342  // Unmap blocks that were mapped for defragmentation.
7343  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
7344  {
7345  m_Blocks[blockIndex]->Unmap(m_hAllocator);
7346  }
7347 
7348  return result;
7349 }
7350 
7351 bool VmaDefragmentator::MoveMakesSense(
7352  size_t dstBlockIndex, VkDeviceSize dstOffset,
7353  size_t srcBlockIndex, VkDeviceSize srcOffset)
7354 {
7355  if(dstBlockIndex < srcBlockIndex)
7356  {
7357  return true;
7358  }
7359  if(dstBlockIndex > srcBlockIndex)
7360  {
7361  return false;
7362  }
7363  if(dstOffset < srcOffset)
7364  {
7365  return true;
7366  }
7367  return false;
7368 }
7369 
7371 // VmaAllocator_T
7372 
7373 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
7374  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
7375  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
7376  m_hDevice(pCreateInfo->device),
7377  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
7378  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
7379  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
7380  m_PreferredLargeHeapBlockSize(0),
7381  m_PhysicalDevice(pCreateInfo->physicalDevice),
7382  m_CurrentFrameIndex(0),
7383  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks()))
7384 {
7385  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
7386 
7387  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
7388  memset(&m_MemProps, 0, sizeof(m_MemProps));
7389  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
7390 
7391  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
7392  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
7393 
7394  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7395  {
7396  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
7397  }
7398 
7399  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
7400  {
7401  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
7402  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
7403  }
7404 
7405  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
7406 
7407  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
7408  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
7409 
7410  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
7411  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
7412 
7413  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
7414  {
7415  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
7416  {
7417  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
7418  if(limit != VK_WHOLE_SIZE)
7419  {
7420  m_HeapSizeLimit[heapIndex] = limit;
7421  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
7422  {
7423  m_MemProps.memoryHeaps[heapIndex].size = limit;
7424  }
7425  }
7426  }
7427  }
7428 
7429  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7430  {
7431  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
7432 
7433  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
7434  this,
7435  memTypeIndex,
7436  preferredBlockSize,
7437  0,
7438  SIZE_MAX,
7439  GetBufferImageGranularity(),
7440  pCreateInfo->frameInUseCount,
7441  false); // isCustomPool
7442  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
7443  // becase minBlockCount is 0.
7444  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
7445  }
7446 }
7447 
7448 VmaAllocator_T::~VmaAllocator_T()
7449 {
7450  VMA_ASSERT(m_Pools.empty());
7451 
7452  for(size_t i = GetMemoryTypeCount(); i--; )
7453  {
7454  vma_delete(this, m_pDedicatedAllocations[i]);
7455  vma_delete(this, m_pBlockVectors[i]);
7456  }
7457 }
7458 
7459 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
7460 {
7461 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7462  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
7463  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
7464  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
7465  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
7466  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
7467  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
7468  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
7469  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
7470  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
7471  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
7472  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
7473  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
7474  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
7475  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
7476  if(m_UseKhrDedicatedAllocation)
7477  {
7478  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
7479  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
7480  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
7481  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
7482  }
7483 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
7484 
7485 #define VMA_COPY_IF_NOT_NULL(funcName) \
7486  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
7487 
7488  if(pVulkanFunctions != VMA_NULL)
7489  {
7490  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
7491  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
7492  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
7493  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
7494  VMA_COPY_IF_NOT_NULL(vkMapMemory);
7495  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
7496  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
7497  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
7498  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
7499  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
7500  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
7501  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
7502  VMA_COPY_IF_NOT_NULL(vkCreateImage);
7503  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
7504  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
7505  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
7506  }
7507 
7508 #undef VMA_COPY_IF_NOT_NULL
7509 
7510  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
7511  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
7512  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
7513  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
7514  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
7515  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
7516  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
7517  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
7518  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
7519  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
7520  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
7521  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
7522  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
7523  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
7524  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
7525  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
7526  if(m_UseKhrDedicatedAllocation)
7527  {
7528  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
7529  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
7530  }
7531 }
7532 
7533 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
7534 {
7535  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7536  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
7537  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
7538  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
7539 }
7540 
7541 VkResult VmaAllocator_T::AllocateMemoryOfType(
7542  const VkMemoryRequirements& vkMemReq,
7543  bool dedicatedAllocation,
7544  VkBuffer dedicatedBuffer,
7545  VkImage dedicatedImage,
7546  const VmaAllocationCreateInfo& createInfo,
7547  uint32_t memTypeIndex,
7548  VmaSuballocationType suballocType,
7549  VmaAllocation* pAllocation)
7550 {
7551  VMA_ASSERT(pAllocation != VMA_NULL);
7552  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
7553 
7554  VmaAllocationCreateInfo finalCreateInfo = createInfo;
7555 
7556  // If memory type is not HOST_VISIBLE, disable MAPPED.
7557  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7558  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
7559  {
7560  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
7561  }
7562 
7563  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
7564  VMA_ASSERT(blockVector);
7565 
7566  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
7567  bool preferDedicatedMemory =
7568  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
7569  dedicatedAllocation ||
7570  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
7571  vkMemReq.size > preferredBlockSize / 2;
7572 
7573  if(preferDedicatedMemory &&
7574  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
7575  finalCreateInfo.pool == VK_NULL_HANDLE)
7576  {
7578  }
7579 
7580  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
7581  {
7582  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7583  {
7584  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7585  }
7586  else
7587  {
7588  return AllocateDedicatedMemory(
7589  vkMemReq.size,
7590  suballocType,
7591  memTypeIndex,
7592  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7593  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7594  finalCreateInfo.pUserData,
7595  dedicatedBuffer,
7596  dedicatedImage,
7597  pAllocation);
7598  }
7599  }
7600  else
7601  {
7602  VkResult res = blockVector->Allocate(
7603  VK_NULL_HANDLE, // hCurrentPool
7604  m_CurrentFrameIndex.load(),
7605  vkMemReq,
7606  finalCreateInfo,
7607  suballocType,
7608  pAllocation);
7609  if(res == VK_SUCCESS)
7610  {
7611  return res;
7612  }
7613 
7614  // 5. Try dedicated memory.
7615  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7616  {
7617  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7618  }
7619  else
7620  {
7621  res = AllocateDedicatedMemory(
7622  vkMemReq.size,
7623  suballocType,
7624  memTypeIndex,
7625  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
7626  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
7627  finalCreateInfo.pUserData,
7628  dedicatedBuffer,
7629  dedicatedImage,
7630  pAllocation);
7631  if(res == VK_SUCCESS)
7632  {
7633  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
7634  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
7635  return VK_SUCCESS;
7636  }
7637  else
7638  {
7639  // Everything failed: Return error code.
7640  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7641  return res;
7642  }
7643  }
7644  }
7645 }
7646 
7647 VkResult VmaAllocator_T::AllocateDedicatedMemory(
7648  VkDeviceSize size,
7649  VmaSuballocationType suballocType,
7650  uint32_t memTypeIndex,
7651  bool map,
7652  bool isUserDataString,
7653  void* pUserData,
7654  VkBuffer dedicatedBuffer,
7655  VkImage dedicatedImage,
7656  VmaAllocation* pAllocation)
7657 {
7658  VMA_ASSERT(pAllocation);
7659 
7660  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
7661  allocInfo.memoryTypeIndex = memTypeIndex;
7662  allocInfo.allocationSize = size;
7663 
7664  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
7665  if(m_UseKhrDedicatedAllocation)
7666  {
7667  if(dedicatedBuffer != VK_NULL_HANDLE)
7668  {
7669  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
7670  dedicatedAllocInfo.buffer = dedicatedBuffer;
7671  allocInfo.pNext = &dedicatedAllocInfo;
7672  }
7673  else if(dedicatedImage != VK_NULL_HANDLE)
7674  {
7675  dedicatedAllocInfo.image = dedicatedImage;
7676  allocInfo.pNext = &dedicatedAllocInfo;
7677  }
7678  }
7679 
7680  // Allocate VkDeviceMemory.
7681  VkDeviceMemory hMemory = VK_NULL_HANDLE;
7682  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
7683  if(res < 0)
7684  {
7685  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
7686  return res;
7687  }
7688 
7689  void* pMappedData = VMA_NULL;
7690  if(map)
7691  {
7692  res = (*m_VulkanFunctions.vkMapMemory)(
7693  m_hDevice,
7694  hMemory,
7695  0,
7696  VK_WHOLE_SIZE,
7697  0,
7698  &pMappedData);
7699  if(res < 0)
7700  {
7701  VMA_DEBUG_LOG(" vkMapMemory FAILED");
7702  FreeVulkanMemory(memTypeIndex, size, hMemory);
7703  return res;
7704  }
7705  }
7706 
7707  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
7708  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
7709  (*pAllocation)->SetUserData(this, pUserData);
7710 
7711  // Register it in m_pDedicatedAllocations.
7712  {
7713  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7714  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
7715  VMA_ASSERT(pDedicatedAllocations);
7716  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
7717  }
7718 
7719  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
7720 
7721  return VK_SUCCESS;
7722 }
7723 
7724 void VmaAllocator_T::GetBufferMemoryRequirements(
7725  VkBuffer hBuffer,
7726  VkMemoryRequirements& memReq,
7727  bool& requiresDedicatedAllocation,
7728  bool& prefersDedicatedAllocation) const
7729 {
7730  if(m_UseKhrDedicatedAllocation)
7731  {
7732  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
7733  memReqInfo.buffer = hBuffer;
7734 
7735  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7736 
7737  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7738  memReq2.pNext = &memDedicatedReq;
7739 
7740  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7741 
7742  memReq = memReq2.memoryRequirements;
7743  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7744  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7745  }
7746  else
7747  {
7748  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
7749  requiresDedicatedAllocation = false;
7750  prefersDedicatedAllocation = false;
7751  }
7752 }
7753 
7754 void VmaAllocator_T::GetImageMemoryRequirements(
7755  VkImage hImage,
7756  VkMemoryRequirements& memReq,
7757  bool& requiresDedicatedAllocation,
7758  bool& prefersDedicatedAllocation) const
7759 {
7760  if(m_UseKhrDedicatedAllocation)
7761  {
7762  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
7763  memReqInfo.image = hImage;
7764 
7765  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
7766 
7767  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
7768  memReq2.pNext = &memDedicatedReq;
7769 
7770  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
7771 
7772  memReq = memReq2.memoryRequirements;
7773  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
7774  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
7775  }
7776  else
7777  {
7778  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
7779  requiresDedicatedAllocation = false;
7780  prefersDedicatedAllocation = false;
7781  }
7782 }
7783 
7784 VkResult VmaAllocator_T::AllocateMemory(
7785  const VkMemoryRequirements& vkMemReq,
7786  bool requiresDedicatedAllocation,
7787  bool prefersDedicatedAllocation,
7788  VkBuffer dedicatedBuffer,
7789  VkImage dedicatedImage,
7790  const VmaAllocationCreateInfo& createInfo,
7791  VmaSuballocationType suballocType,
7792  VmaAllocation* pAllocation)
7793 {
7794  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
7795  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7796  {
7797  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
7798  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7799  }
7800  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
7802  {
7803  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
7804  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7805  }
7806  if(requiresDedicatedAllocation)
7807  {
7808  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
7809  {
7810  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
7811  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7812  }
7813  if(createInfo.pool != VK_NULL_HANDLE)
7814  {
7815  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
7816  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7817  }
7818  }
7819  if((createInfo.pool != VK_NULL_HANDLE) &&
7820  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
7821  {
7822  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
7823  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7824  }
7825 
7826  if(createInfo.pool != VK_NULL_HANDLE)
7827  {
7828  return createInfo.pool->m_BlockVector.Allocate(
7829  createInfo.pool,
7830  m_CurrentFrameIndex.load(),
7831  vkMemReq,
7832  createInfo,
7833  suballocType,
7834  pAllocation);
7835  }
7836  else
7837  {
7838  // Bit mask of memory Vulkan types acceptable for this allocation.
7839  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
7840  uint32_t memTypeIndex = UINT32_MAX;
7841  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7842  if(res == VK_SUCCESS)
7843  {
7844  res = AllocateMemoryOfType(
7845  vkMemReq,
7846  requiresDedicatedAllocation || prefersDedicatedAllocation,
7847  dedicatedBuffer,
7848  dedicatedImage,
7849  createInfo,
7850  memTypeIndex,
7851  suballocType,
7852  pAllocation);
7853  // Succeeded on first try.
7854  if(res == VK_SUCCESS)
7855  {
7856  return res;
7857  }
7858  // Allocation from this memory type failed. Try other compatible memory types.
7859  else
7860  {
7861  for(;;)
7862  {
7863  // Remove old memTypeIndex from list of possibilities.
7864  memoryTypeBits &= ~(1u << memTypeIndex);
7865  // Find alternative memTypeIndex.
7866  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
7867  if(res == VK_SUCCESS)
7868  {
7869  res = AllocateMemoryOfType(
7870  vkMemReq,
7871  requiresDedicatedAllocation || prefersDedicatedAllocation,
7872  dedicatedBuffer,
7873  dedicatedImage,
7874  createInfo,
7875  memTypeIndex,
7876  suballocType,
7877  pAllocation);
7878  // Allocation from this alternative memory type succeeded.
7879  if(res == VK_SUCCESS)
7880  {
7881  return res;
7882  }
7883  // else: Allocation from this memory type failed. Try next one - next loop iteration.
7884  }
7885  // No other matching memory type index could be found.
7886  else
7887  {
7888  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
7889  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
7890  }
7891  }
7892  }
7893  }
7894  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
7895  else
7896  return res;
7897  }
7898 }
7899 
7900 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
7901 {
7902  VMA_ASSERT(allocation);
7903 
7904  if(allocation->CanBecomeLost() == false ||
7905  allocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
7906  {
7907  switch(allocation->GetType())
7908  {
7909  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
7910  {
7911  VmaBlockVector* pBlockVector = VMA_NULL;
7912  VmaPool hPool = allocation->GetPool();
7913  if(hPool != VK_NULL_HANDLE)
7914  {
7915  pBlockVector = &hPool->m_BlockVector;
7916  }
7917  else
7918  {
7919  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
7920  pBlockVector = m_pBlockVectors[memTypeIndex];
7921  }
7922  pBlockVector->Free(allocation);
7923  }
7924  break;
7925  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
7926  FreeDedicatedMemory(allocation);
7927  break;
7928  default:
7929  VMA_ASSERT(0);
7930  }
7931  }
7932 
7933  allocation->SetUserData(this, VMA_NULL);
7934  vma_delete(this, allocation);
7935 }
7936 
7937 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
7938 {
7939  // Initialize.
7940  InitStatInfo(pStats->total);
7941  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
7942  InitStatInfo(pStats->memoryType[i]);
7943  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
7944  InitStatInfo(pStats->memoryHeap[i]);
7945 
7946  // Process default pools.
7947  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7948  {
7949  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
7950  VMA_ASSERT(pBlockVector);
7951  pBlockVector->AddStats(pStats);
7952  }
7953 
7954  // Process custom pools.
7955  {
7956  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
7957  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
7958  {
7959  m_Pools[poolIndex]->GetBlockVector().AddStats(pStats);
7960  }
7961  }
7962 
7963  // Process dedicated allocations.
7964  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
7965  {
7966  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
7967  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
7968  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
7969  VMA_ASSERT(pDedicatedAllocVector);
7970  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
7971  {
7972  VmaStatInfo allocationStatInfo;
7973  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
7974  VmaAddStatInfo(pStats->total, allocationStatInfo);
7975  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
7976  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
7977  }
7978  }
7979 
7980  // Postprocess.
7981  VmaPostprocessCalcStatInfo(pStats->total);
7982  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
7983  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
7984  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
7985  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
7986 }
7987 
7988 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
7989 
7990 VkResult VmaAllocator_T::Defragment(
7991  VmaAllocation* pAllocations,
7992  size_t allocationCount,
7993  VkBool32* pAllocationsChanged,
7994  const VmaDefragmentationInfo* pDefragmentationInfo,
7995  VmaDefragmentationStats* pDefragmentationStats)
7996 {
7997  if(pAllocationsChanged != VMA_NULL)
7998  {
7999  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
8000  }
8001  if(pDefragmentationStats != VMA_NULL)
8002  {
8003  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
8004  }
8005 
8006  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
8007 
8008  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
8009 
8010  const size_t poolCount = m_Pools.size();
8011 
8012  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
8013  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
8014  {
8015  VmaAllocation hAlloc = pAllocations[allocIndex];
8016  VMA_ASSERT(hAlloc);
8017  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
8018  // DedicatedAlloc cannot be defragmented.
8019  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
8020  // Only HOST_VISIBLE memory types can be defragmented.
8021  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) &&
8022  // Lost allocation cannot be defragmented.
8023  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
8024  {
8025  VmaBlockVector* pAllocBlockVector = VMA_NULL;
8026 
8027  const VmaPool hAllocPool = hAlloc->GetPool();
8028  // This allocation belongs to custom pool.
8029  if(hAllocPool != VK_NULL_HANDLE)
8030  {
8031  pAllocBlockVector = &hAllocPool->GetBlockVector();
8032  }
8033  // This allocation belongs to general pool.
8034  else
8035  {
8036  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
8037  }
8038 
8039  VmaDefragmentator* const pDefragmentator = pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
8040 
8041  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
8042  &pAllocationsChanged[allocIndex] : VMA_NULL;
8043  pDefragmentator->AddAllocation(hAlloc, pChanged);
8044  }
8045  }
8046 
8047  VkResult result = VK_SUCCESS;
8048 
8049  // ======== Main processing.
8050 
8051  VkDeviceSize maxBytesToMove = SIZE_MAX;
8052  uint32_t maxAllocationsToMove = UINT32_MAX;
8053  if(pDefragmentationInfo != VMA_NULL)
8054  {
8055  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
8056  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
8057  }
8058 
8059  // Process standard memory.
8060  for(uint32_t memTypeIndex = 0;
8061  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
8062  ++memTypeIndex)
8063  {
8064  // Only HOST_VISIBLE memory types can be defragmented.
8065  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8066  {
8067  result = m_pBlockVectors[memTypeIndex]->Defragment(
8068  pDefragmentationStats,
8069  maxBytesToMove,
8070  maxAllocationsToMove);
8071  }
8072  }
8073 
8074  // Process custom pools.
8075  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
8076  {
8077  result = m_Pools[poolIndex]->GetBlockVector().Defragment(
8078  pDefragmentationStats,
8079  maxBytesToMove,
8080  maxAllocationsToMove);
8081  }
8082 
8083  // ======== Destroy defragmentators.
8084 
8085  // Process custom pools.
8086  for(size_t poolIndex = poolCount; poolIndex--; )
8087  {
8088  m_Pools[poolIndex]->GetBlockVector().DestroyDefragmentator();
8089  }
8090 
8091  // Process standard memory.
8092  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
8093  {
8094  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8095  {
8096  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
8097  }
8098  }
8099 
8100  return result;
8101 }
8102 
8103 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
8104 {
8105  if(hAllocation->CanBecomeLost())
8106  {
8107  /*
8108  Warning: This is a carefully designed algorithm.
8109  Do not modify unless you really know what you're doing :)
8110  */
8111  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8112  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8113  for(;;)
8114  {
8115  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8116  {
8117  pAllocationInfo->memoryType = UINT32_MAX;
8118  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
8119  pAllocationInfo->offset = 0;
8120  pAllocationInfo->size = hAllocation->GetSize();
8121  pAllocationInfo->pMappedData = VMA_NULL;
8122  pAllocationInfo->pUserData = hAllocation->GetUserData();
8123  return;
8124  }
8125  else if(localLastUseFrameIndex == localCurrFrameIndex)
8126  {
8127  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8128  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8129  pAllocationInfo->offset = hAllocation->GetOffset();
8130  pAllocationInfo->size = hAllocation->GetSize();
8131  pAllocationInfo->pMappedData = VMA_NULL;
8132  pAllocationInfo->pUserData = hAllocation->GetUserData();
8133  return;
8134  }
8135  else // Last use time earlier than current time.
8136  {
8137  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8138  {
8139  localLastUseFrameIndex = localCurrFrameIndex;
8140  }
8141  }
8142  }
8143  }
8144  else
8145  {
8146  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
8147  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
8148  pAllocationInfo->offset = hAllocation->GetOffset();
8149  pAllocationInfo->size = hAllocation->GetSize();
8150  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
8151  pAllocationInfo->pUserData = hAllocation->GetUserData();
8152  }
8153 }
8154 
8155 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
8156 {
8157  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
8158  if(hAllocation->CanBecomeLost())
8159  {
8160  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
8161  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
8162  for(;;)
8163  {
8164  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8165  {
8166  return false;
8167  }
8168  else if(localLastUseFrameIndex == localCurrFrameIndex)
8169  {
8170  return true;
8171  }
8172  else // Last use time earlier than current time.
8173  {
8174  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
8175  {
8176  localLastUseFrameIndex = localCurrFrameIndex;
8177  }
8178  }
8179  }
8180  }
8181  else
8182  {
8183  return true;
8184  }
8185 }
8186 
8187 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
8188 {
8189  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u", pCreateInfo->memoryTypeIndex);
8190 
8191  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
8192 
8193  if(newCreateInfo.maxBlockCount == 0)
8194  {
8195  newCreateInfo.maxBlockCount = SIZE_MAX;
8196  }
8197  if(newCreateInfo.blockSize == 0)
8198  {
8199  newCreateInfo.blockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
8200  }
8201 
8202  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
8203 
8204  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
8205  if(res != VK_SUCCESS)
8206  {
8207  vma_delete(this, *pPool);
8208  *pPool = VMA_NULL;
8209  return res;
8210  }
8211 
8212  // Add to m_Pools.
8213  {
8214  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8215  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
8216  }
8217 
8218  return VK_SUCCESS;
8219 }
8220 
8221 void VmaAllocator_T::DestroyPool(VmaPool pool)
8222 {
8223  // Remove from m_Pools.
8224  {
8225  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8226  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
8227  VMA_ASSERT(success && "Pool not found in Allocator.");
8228  }
8229 
8230  vma_delete(this, pool);
8231 }
8232 
8233 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
8234 {
8235  pool->m_BlockVector.GetPoolStats(pPoolStats);
8236 }
8237 
8238 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
8239 {
8240  m_CurrentFrameIndex.store(frameIndex);
8241 }
8242 
8243 void VmaAllocator_T::MakePoolAllocationsLost(
8244  VmaPool hPool,
8245  size_t* pLostAllocationCount)
8246 {
8247  hPool->m_BlockVector.MakePoolAllocationsLost(
8248  m_CurrentFrameIndex.load(),
8249  pLostAllocationCount);
8250 }
8251 
8252 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
8253 {
8254  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
8255  (*pAllocation)->InitLost();
8256 }
8257 
8258 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
8259 {
8260  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
8261 
8262  VkResult res;
8263  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8264  {
8265  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8266  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
8267  {
8268  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8269  if(res == VK_SUCCESS)
8270  {
8271  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
8272  }
8273  }
8274  else
8275  {
8276  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
8277  }
8278  }
8279  else
8280  {
8281  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
8282  }
8283 
8284  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
8285  {
8286  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
8287  }
8288 
8289  return res;
8290 }
8291 
8292 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
8293 {
8294  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
8295  {
8296  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
8297  }
8298 
8299  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
8300 
8301  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
8302  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
8303  {
8304  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
8305  m_HeapSizeLimit[heapIndex] += size;
8306  }
8307 }
8308 
8309 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
8310 {
8311  if(hAllocation->CanBecomeLost())
8312  {
8313  return VK_ERROR_MEMORY_MAP_FAILED;
8314  }
8315 
8316  switch(hAllocation->GetType())
8317  {
8318  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8319  {
8320  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8321  char *pBytes = VMA_NULL;
8322  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
8323  if(res == VK_SUCCESS)
8324  {
8325  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
8326  hAllocation->BlockAllocMap();
8327  }
8328  return res;
8329  }
8330  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8331  return hAllocation->DedicatedAllocMap(this, ppData);
8332  default:
8333  VMA_ASSERT(0);
8334  return VK_ERROR_MEMORY_MAP_FAILED;
8335  }
8336 }
8337 
8338 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
8339 {
8340  switch(hAllocation->GetType())
8341  {
8342  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8343  {
8344  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
8345  hAllocation->BlockAllocUnmap();
8346  pBlock->Unmap(this, 1);
8347  }
8348  break;
8349  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8350  hAllocation->DedicatedAllocUnmap(this);
8351  break;
8352  default:
8353  VMA_ASSERT(0);
8354  }
8355 }
8356 
8357 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
8358 {
8359  VkResult res = VK_SUCCESS;
8360  switch(hAllocation->GetType())
8361  {
8362  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8363  res = GetVulkanFunctions().vkBindBufferMemory(
8364  m_hDevice,
8365  hBuffer,
8366  hAllocation->GetMemory(),
8367  0); //memoryOffset
8368  break;
8369  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8370  {
8371  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8372  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
8373  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
8374  break;
8375  }
8376  default:
8377  VMA_ASSERT(0);
8378  }
8379  return res;
8380 }
8381 
8382 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
8383 {
8384  VkResult res = VK_SUCCESS;
8385  switch(hAllocation->GetType())
8386  {
8387  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
8388  res = GetVulkanFunctions().vkBindImageMemory(
8389  m_hDevice,
8390  hImage,
8391  hAllocation->GetMemory(),
8392  0); //memoryOffset
8393  break;
8394  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
8395  {
8396  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
8397  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
8398  res = pBlock->BindImageMemory(this, hAllocation, hImage);
8399  break;
8400  }
8401  default:
8402  VMA_ASSERT(0);
8403  }
8404  return res;
8405 }
8406 
8407 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
8408 {
8409  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
8410 
8411  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
8412  {
8413  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8414  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
8415  VMA_ASSERT(pDedicatedAllocations);
8416  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
8417  VMA_ASSERT(success);
8418  }
8419 
8420  VkDeviceMemory hMemory = allocation->GetMemory();
8421 
8422  if(allocation->GetMappedData() != VMA_NULL)
8423  {
8424  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
8425  }
8426 
8427  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
8428 
8429  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
8430 }
8431 
8432 #if VMA_STATS_STRING_ENABLED
8433 
8434 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
8435 {
8436  bool dedicatedAllocationsStarted = false;
8437  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8438  {
8439  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
8440  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
8441  VMA_ASSERT(pDedicatedAllocVector);
8442  if(pDedicatedAllocVector->empty() == false)
8443  {
8444  if(dedicatedAllocationsStarted == false)
8445  {
8446  dedicatedAllocationsStarted = true;
8447  json.WriteString("DedicatedAllocations");
8448  json.BeginObject();
8449  }
8450 
8451  json.BeginString("Type ");
8452  json.ContinueString(memTypeIndex);
8453  json.EndString();
8454 
8455  json.BeginArray();
8456 
8457  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
8458  {
8459  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
8460  json.BeginObject(true);
8461 
8462  json.WriteString("Type");
8463  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[hAlloc->GetSuballocationType()]);
8464 
8465  json.WriteString("Size");
8466  json.WriteNumber(hAlloc->GetSize());
8467 
8468  const void* pUserData = hAlloc->GetUserData();
8469  if(pUserData != VMA_NULL)
8470  {
8471  json.WriteString("UserData");
8472  if(hAlloc->IsUserDataString())
8473  {
8474  json.WriteString((const char*)pUserData);
8475  }
8476  else
8477  {
8478  json.BeginString();
8479  json.ContinueString_Pointer(pUserData);
8480  json.EndString();
8481  }
8482  }
8483 
8484  json.EndObject();
8485  }
8486 
8487  json.EndArray();
8488  }
8489  }
8490  if(dedicatedAllocationsStarted)
8491  {
8492  json.EndObject();
8493  }
8494 
8495  {
8496  bool allocationsStarted = false;
8497  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
8498  {
8499  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
8500  {
8501  if(allocationsStarted == false)
8502  {
8503  allocationsStarted = true;
8504  json.WriteString("DefaultPools");
8505  json.BeginObject();
8506  }
8507 
8508  json.BeginString("Type ");
8509  json.ContinueString(memTypeIndex);
8510  json.EndString();
8511 
8512  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
8513  }
8514  }
8515  if(allocationsStarted)
8516  {
8517  json.EndObject();
8518  }
8519  }
8520 
8521  {
8522  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
8523  const size_t poolCount = m_Pools.size();
8524  if(poolCount > 0)
8525  {
8526  json.WriteString("Pools");
8527  json.BeginArray();
8528  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
8529  {
8530  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
8531  }
8532  json.EndArray();
8533  }
8534  }
8535 }
8536 
8537 #endif // #if VMA_STATS_STRING_ENABLED
8538 
8539 static VkResult AllocateMemoryForImage(
8540  VmaAllocator allocator,
8541  VkImage image,
8542  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8543  VmaSuballocationType suballocType,
8544  VmaAllocation* pAllocation)
8545 {
8546  VMA_ASSERT(allocator && (image != VK_NULL_HANDLE) && pAllocationCreateInfo && pAllocation);
8547 
8548  VkMemoryRequirements vkMemReq = {};
8549  bool requiresDedicatedAllocation = false;
8550  bool prefersDedicatedAllocation = false;
8551  allocator->GetImageMemoryRequirements(image, vkMemReq,
8552  requiresDedicatedAllocation, prefersDedicatedAllocation);
8553 
8554  return allocator->AllocateMemory(
8555  vkMemReq,
8556  requiresDedicatedAllocation,
8557  prefersDedicatedAllocation,
8558  VK_NULL_HANDLE, // dedicatedBuffer
8559  image, // dedicatedImage
8560  *pAllocationCreateInfo,
8561  suballocType,
8562  pAllocation);
8563 }
8564 
8566 // Public interface
8567 
8568 VkResult vmaCreateAllocator(
8569  const VmaAllocatorCreateInfo* pCreateInfo,
8570  VmaAllocator* pAllocator)
8571 {
8572  VMA_ASSERT(pCreateInfo && pAllocator);
8573  VMA_DEBUG_LOG("vmaCreateAllocator");
8574  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
8575  return VK_SUCCESS;
8576 }
8577 
8578 void vmaDestroyAllocator(
8579  VmaAllocator allocator)
8580 {
8581  if(allocator != VK_NULL_HANDLE)
8582  {
8583  VMA_DEBUG_LOG("vmaDestroyAllocator");
8584  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
8585  vma_delete(&allocationCallbacks, allocator);
8586  }
8587 }
8588 
8590  VmaAllocator allocator,
8591  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
8592 {
8593  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
8594  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
8595 }
8596 
8598  VmaAllocator allocator,
8599  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
8600 {
8601  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
8602  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
8603 }
8604 
8606  VmaAllocator allocator,
8607  uint32_t memoryTypeIndex,
8608  VkMemoryPropertyFlags* pFlags)
8609 {
8610  VMA_ASSERT(allocator && pFlags);
8611  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
8612  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
8613 }
8614 
8616  VmaAllocator allocator,
8617  uint32_t frameIndex)
8618 {
8619  VMA_ASSERT(allocator);
8620  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
8621 
8622  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8623 
8624  allocator->SetCurrentFrameIndex(frameIndex);
8625 }
8626 
8627 void vmaCalculateStats(
8628  VmaAllocator allocator,
8629  VmaStats* pStats)
8630 {
8631  VMA_ASSERT(allocator && pStats);
8632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8633  allocator->CalculateStats(pStats);
8634 }
8635 
8636 #if VMA_STATS_STRING_ENABLED
8637 
8638 void vmaBuildStatsString(
8639  VmaAllocator allocator,
8640  char** ppStatsString,
8641  VkBool32 detailedMap)
8642 {
8643  VMA_ASSERT(allocator && ppStatsString);
8644  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8645 
8646  VmaStringBuilder sb(allocator);
8647  {
8648  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
8649  json.BeginObject();
8650 
8651  VmaStats stats;
8652  allocator->CalculateStats(&stats);
8653 
8654  json.WriteString("Total");
8655  VmaPrintStatInfo(json, stats.total);
8656 
8657  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
8658  {
8659  json.BeginString("Heap ");
8660  json.ContinueString(heapIndex);
8661  json.EndString();
8662  json.BeginObject();
8663 
8664  json.WriteString("Size");
8665  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
8666 
8667  json.WriteString("Flags");
8668  json.BeginArray(true);
8669  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
8670  {
8671  json.WriteString("DEVICE_LOCAL");
8672  }
8673  json.EndArray();
8674 
8675  if(stats.memoryHeap[heapIndex].blockCount > 0)
8676  {
8677  json.WriteString("Stats");
8678  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
8679  }
8680 
8681  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
8682  {
8683  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
8684  {
8685  json.BeginString("Type ");
8686  json.ContinueString(typeIndex);
8687  json.EndString();
8688 
8689  json.BeginObject();
8690 
8691  json.WriteString("Flags");
8692  json.BeginArray(true);
8693  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
8694  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
8695  {
8696  json.WriteString("DEVICE_LOCAL");
8697  }
8698  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
8699  {
8700  json.WriteString("HOST_VISIBLE");
8701  }
8702  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
8703  {
8704  json.WriteString("HOST_COHERENT");
8705  }
8706  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
8707  {
8708  json.WriteString("HOST_CACHED");
8709  }
8710  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
8711  {
8712  json.WriteString("LAZILY_ALLOCATED");
8713  }
8714  json.EndArray();
8715 
8716  if(stats.memoryType[typeIndex].blockCount > 0)
8717  {
8718  json.WriteString("Stats");
8719  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
8720  }
8721 
8722  json.EndObject();
8723  }
8724  }
8725 
8726  json.EndObject();
8727  }
8728  if(detailedMap == VK_TRUE)
8729  {
8730  allocator->PrintDetailedMap(json);
8731  }
8732 
8733  json.EndObject();
8734  }
8735 
8736  const size_t len = sb.GetLength();
8737  char* const pChars = vma_new_array(allocator, char, len + 1);
8738  if(len > 0)
8739  {
8740  memcpy(pChars, sb.GetData(), len);
8741  }
8742  pChars[len] = '\0';
8743  *ppStatsString = pChars;
8744 }
8745 
8746 void vmaFreeStatsString(
8747  VmaAllocator allocator,
8748  char* pStatsString)
8749 {
8750  if(pStatsString != VMA_NULL)
8751  {
8752  VMA_ASSERT(allocator);
8753  size_t len = strlen(pStatsString);
8754  vma_delete_array(allocator, pStatsString, len + 1);
8755  }
8756 }
8757 
8758 #endif // #if VMA_STATS_STRING_ENABLED
8759 
8760 /*
8761 This function is not protected by any mutex because it just reads immutable data.
8762 */
8763 VkResult vmaFindMemoryTypeIndex(
8764  VmaAllocator allocator,
8765  uint32_t memoryTypeBits,
8766  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8767  uint32_t* pMemoryTypeIndex)
8768 {
8769  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8770  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8771  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8772 
8773  if(pAllocationCreateInfo->memoryTypeBits != 0)
8774  {
8775  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
8776  }
8777 
8778  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
8779  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
8780 
8781  // Convert usage to requiredFlags and preferredFlags.
8782  switch(pAllocationCreateInfo->usage)
8783  {
8785  break;
8787  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8788  break;
8790  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
8791  break;
8793  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8794  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
8795  break;
8797  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8798  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
8799  break;
8800  default:
8801  break;
8802  }
8803 
8804  *pMemoryTypeIndex = UINT32_MAX;
8805  uint32_t minCost = UINT32_MAX;
8806  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
8807  memTypeIndex < allocator->GetMemoryTypeCount();
8808  ++memTypeIndex, memTypeBit <<= 1)
8809  {
8810  // This memory type is acceptable according to memoryTypeBits bitmask.
8811  if((memTypeBit & memoryTypeBits) != 0)
8812  {
8813  const VkMemoryPropertyFlags currFlags =
8814  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
8815  // This memory type contains requiredFlags.
8816  if((requiredFlags & ~currFlags) == 0)
8817  {
8818  // Calculate cost as number of bits from preferredFlags not present in this memory type.
8819  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
8820  // Remember memory type with lowest cost.
8821  if(currCost < minCost)
8822  {
8823  *pMemoryTypeIndex = memTypeIndex;
8824  if(currCost == 0)
8825  {
8826  return VK_SUCCESS;
8827  }
8828  minCost = currCost;
8829  }
8830  }
8831  }
8832  }
8833  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
8834 }
8835 
8837  VmaAllocator allocator,
8838  const VkBufferCreateInfo* pBufferCreateInfo,
8839  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8840  uint32_t* pMemoryTypeIndex)
8841 {
8842  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8843  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
8844  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8845  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8846 
8847  const VkDevice hDev = allocator->m_hDevice;
8848  VkBuffer hBuffer = VK_NULL_HANDLE;
8849  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
8850  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
8851  if(res == VK_SUCCESS)
8852  {
8853  VkMemoryRequirements memReq = {};
8854  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
8855  hDev, hBuffer, &memReq);
8856 
8857  res = vmaFindMemoryTypeIndex(
8858  allocator,
8859  memReq.memoryTypeBits,
8860  pAllocationCreateInfo,
8861  pMemoryTypeIndex);
8862 
8863  allocator->GetVulkanFunctions().vkDestroyBuffer(
8864  hDev, hBuffer, allocator->GetAllocationCallbacks());
8865  }
8866  return res;
8867 }
8868 
8870  VmaAllocator allocator,
8871  const VkImageCreateInfo* pImageCreateInfo,
8872  const VmaAllocationCreateInfo* pAllocationCreateInfo,
8873  uint32_t* pMemoryTypeIndex)
8874 {
8875  VMA_ASSERT(allocator != VK_NULL_HANDLE);
8876  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
8877  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
8878  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
8879 
8880  const VkDevice hDev = allocator->m_hDevice;
8881  VkImage hImage = VK_NULL_HANDLE;
8882  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
8883  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
8884  if(res == VK_SUCCESS)
8885  {
8886  VkMemoryRequirements memReq = {};
8887  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
8888  hDev, hImage, &memReq);
8889 
8890  res = vmaFindMemoryTypeIndex(
8891  allocator,
8892  memReq.memoryTypeBits,
8893  pAllocationCreateInfo,
8894  pMemoryTypeIndex);
8895 
8896  allocator->GetVulkanFunctions().vkDestroyImage(
8897  hDev, hImage, allocator->GetAllocationCallbacks());
8898  }
8899  return res;
8900 }
8901 
8902 VkResult vmaCreatePool(
8903  VmaAllocator allocator,
8904  const VmaPoolCreateInfo* pCreateInfo,
8905  VmaPool* pPool)
8906 {
8907  VMA_ASSERT(allocator && pCreateInfo && pPool);
8908 
8909  VMA_DEBUG_LOG("vmaCreatePool");
8910 
8911  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8912 
8913  return allocator->CreatePool(pCreateInfo, pPool);
8914 }
8915 
8916 void vmaDestroyPool(
8917  VmaAllocator allocator,
8918  VmaPool pool)
8919 {
8920  VMA_ASSERT(allocator);
8921 
8922  if(pool == VK_NULL_HANDLE)
8923  {
8924  return;
8925  }
8926 
8927  VMA_DEBUG_LOG("vmaDestroyPool");
8928 
8929  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8930 
8931  allocator->DestroyPool(pool);
8932 }
8933 
8934 void vmaGetPoolStats(
8935  VmaAllocator allocator,
8936  VmaPool pool,
8937  VmaPoolStats* pPoolStats)
8938 {
8939  VMA_ASSERT(allocator && pool && pPoolStats);
8940 
8941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8942 
8943  allocator->GetPoolStats(pool, pPoolStats);
8944 }
8945 
8947  VmaAllocator allocator,
8948  VmaPool pool,
8949  size_t* pLostAllocationCount)
8950 {
8951  VMA_ASSERT(allocator && pool);
8952 
8953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8954 
8955  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
8956 }
8957 
8958 VkResult vmaAllocateMemory(
8959  VmaAllocator allocator,
8960  const VkMemoryRequirements* pVkMemoryRequirements,
8961  const VmaAllocationCreateInfo* pCreateInfo,
8962  VmaAllocation* pAllocation,
8963  VmaAllocationInfo* pAllocationInfo)
8964 {
8965  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
8966 
8967  VMA_DEBUG_LOG("vmaAllocateMemory");
8968 
8969  VMA_DEBUG_GLOBAL_MUTEX_LOCK
8970 
8971  VkResult result = allocator->AllocateMemory(
8972  *pVkMemoryRequirements,
8973  false, // requiresDedicatedAllocation
8974  false, // prefersDedicatedAllocation
8975  VK_NULL_HANDLE, // dedicatedBuffer
8976  VK_NULL_HANDLE, // dedicatedImage
8977  *pCreateInfo,
8978  VMA_SUBALLOCATION_TYPE_UNKNOWN,
8979  pAllocation);
8980 
8981  if(pAllocationInfo && result == VK_SUCCESS)
8982  {
8983  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
8984  }
8985 
8986  return result;
8987 }
8988 
8990  VmaAllocator allocator,
8991  VkBuffer buffer,
8992  const VmaAllocationCreateInfo* pCreateInfo,
8993  VmaAllocation* pAllocation,
8994  VmaAllocationInfo* pAllocationInfo)
8995 {
8996  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
8997 
8998  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
8999 
9000  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9001 
9002  VkMemoryRequirements vkMemReq = {};
9003  bool requiresDedicatedAllocation = false;
9004  bool prefersDedicatedAllocation = false;
9005  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
9006  requiresDedicatedAllocation,
9007  prefersDedicatedAllocation);
9008 
9009  VkResult result = allocator->AllocateMemory(
9010  vkMemReq,
9011  requiresDedicatedAllocation,
9012  prefersDedicatedAllocation,
9013  buffer, // dedicatedBuffer
9014  VK_NULL_HANDLE, // dedicatedImage
9015  *pCreateInfo,
9016  VMA_SUBALLOCATION_TYPE_BUFFER,
9017  pAllocation);
9018 
9019  if(pAllocationInfo && result == VK_SUCCESS)
9020  {
9021  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9022  }
9023 
9024  return result;
9025 }
9026 
9027 VkResult vmaAllocateMemoryForImage(
9028  VmaAllocator allocator,
9029  VkImage image,
9030  const VmaAllocationCreateInfo* pCreateInfo,
9031  VmaAllocation* pAllocation,
9032  VmaAllocationInfo* pAllocationInfo)
9033 {
9034  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
9035 
9036  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
9037 
9038  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9039 
9040  VkResult result = AllocateMemoryForImage(
9041  allocator,
9042  image,
9043  pCreateInfo,
9044  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
9045  pAllocation);
9046 
9047  if(pAllocationInfo && result == VK_SUCCESS)
9048  {
9049  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9050  }
9051 
9052  return result;
9053 }
9054 
9055 void vmaFreeMemory(
9056  VmaAllocator allocator,
9057  VmaAllocation allocation)
9058 {
9059  VMA_ASSERT(allocator && allocation);
9060 
9061  VMA_DEBUG_LOG("vmaFreeMemory");
9062 
9063  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9064 
9065  allocator->FreeMemory(allocation);
9066 }
9067 
9069  VmaAllocator allocator,
9070  VmaAllocation allocation,
9071  VmaAllocationInfo* pAllocationInfo)
9072 {
9073  VMA_ASSERT(allocator && allocation && pAllocationInfo);
9074 
9075  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9076 
9077  allocator->GetAllocationInfo(allocation, pAllocationInfo);
9078 }
9079 
9080 VkBool32 vmaTouchAllocation(
9081  VmaAllocator allocator,
9082  VmaAllocation allocation)
9083 {
9084  VMA_ASSERT(allocator && allocation);
9085 
9086  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9087 
9088  return allocator->TouchAllocation(allocation);
9089 }
9090 
9092  VmaAllocator allocator,
9093  VmaAllocation allocation,
9094  void* pUserData)
9095 {
9096  VMA_ASSERT(allocator && allocation);
9097 
9098  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9099 
9100  allocation->SetUserData(allocator, pUserData);
9101 }
9102 
9104  VmaAllocator allocator,
9105  VmaAllocation* pAllocation)
9106 {
9107  VMA_ASSERT(allocator && pAllocation);
9108 
9109  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
9110 
9111  allocator->CreateLostAllocation(pAllocation);
9112 }
9113 
9114 VkResult vmaMapMemory(
9115  VmaAllocator allocator,
9116  VmaAllocation allocation,
9117  void** ppData)
9118 {
9119  VMA_ASSERT(allocator && allocation && ppData);
9120 
9121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9122 
9123  return allocator->Map(allocation, ppData);
9124 }
9125 
9126 void vmaUnmapMemory(
9127  VmaAllocator allocator,
9128  VmaAllocation allocation)
9129 {
9130  VMA_ASSERT(allocator && allocation);
9131 
9132  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9133 
9134  allocator->Unmap(allocation);
9135 }
9136 
9137 VkResult vmaDefragment(
9138  VmaAllocator allocator,
9139  VmaAllocation* pAllocations,
9140  size_t allocationCount,
9141  VkBool32* pAllocationsChanged,
9142  const VmaDefragmentationInfo *pDefragmentationInfo,
9143  VmaDefragmentationStats* pDefragmentationStats)
9144 {
9145  VMA_ASSERT(allocator && pAllocations);
9146 
9147  VMA_DEBUG_LOG("vmaDefragment");
9148 
9149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9150 
9151  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
9152 }
9153 
9154 VkResult vmaBindBufferMemory(
9155  VmaAllocator allocator,
9156  VmaAllocation allocation,
9157  VkBuffer buffer)
9158 {
9159  VMA_ASSERT(allocator && allocation && buffer);
9160 
9161  VMA_DEBUG_LOG("vmaBindBufferMemory");
9162 
9163  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9164 
9165  return allocator->BindBufferMemory(allocation, buffer);
9166 }
9167 
9168 VkResult vmaBindImageMemory(
9169  VmaAllocator allocator,
9170  VmaAllocation allocation,
9171  VkImage image)
9172 {
9173  VMA_ASSERT(allocator && allocation && image);
9174 
9175  VMA_DEBUG_LOG("vmaBindImageMemory");
9176 
9177  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9178 
9179  return allocator->BindImageMemory(allocation, image);
9180 }
9181 
9182 VkResult vmaCreateBuffer(
9183  VmaAllocator allocator,
9184  const VkBufferCreateInfo* pBufferCreateInfo,
9185  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9186  VkBuffer* pBuffer,
9187  VmaAllocation* pAllocation,
9188  VmaAllocationInfo* pAllocationInfo)
9189 {
9190  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
9191 
9192  VMA_DEBUG_LOG("vmaCreateBuffer");
9193 
9194  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9195 
9196  *pBuffer = VK_NULL_HANDLE;
9197  *pAllocation = VK_NULL_HANDLE;
9198 
9199  // 1. Create VkBuffer.
9200  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
9201  allocator->m_hDevice,
9202  pBufferCreateInfo,
9203  allocator->GetAllocationCallbacks(),
9204  pBuffer);
9205  if(res >= 0)
9206  {
9207  // 2. vkGetBufferMemoryRequirements.
9208  VkMemoryRequirements vkMemReq = {};
9209  bool requiresDedicatedAllocation = false;
9210  bool prefersDedicatedAllocation = false;
9211  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
9212  requiresDedicatedAllocation, prefersDedicatedAllocation);
9213 
9214  // Make sure alignment requirements for specific buffer usages reported
9215  // in Physical Device Properties are included in alignment reported by memory requirements.
9216  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
9217  {
9218  VMA_ASSERT(vkMemReq.alignment %
9219  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
9220  }
9221  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
9222  {
9223  VMA_ASSERT(vkMemReq.alignment %
9224  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
9225  }
9226  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
9227  {
9228  VMA_ASSERT(vkMemReq.alignment %
9229  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
9230  }
9231 
9232  // 3. Allocate memory using allocator.
9233  res = allocator->AllocateMemory(
9234  vkMemReq,
9235  requiresDedicatedAllocation,
9236  prefersDedicatedAllocation,
9237  *pBuffer, // dedicatedBuffer
9238  VK_NULL_HANDLE, // dedicatedImage
9239  *pAllocationCreateInfo,
9240  VMA_SUBALLOCATION_TYPE_BUFFER,
9241  pAllocation);
9242  if(res >= 0)
9243  {
9244  // 3. Bind buffer with memory.
9245  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
9246  if(res >= 0)
9247  {
9248  // All steps succeeded.
9249  if(pAllocationInfo != VMA_NULL)
9250  {
9251  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9252  }
9253  return VK_SUCCESS;
9254  }
9255  allocator->FreeMemory(*pAllocation);
9256  *pAllocation = VK_NULL_HANDLE;
9257  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9258  *pBuffer = VK_NULL_HANDLE;
9259  return res;
9260  }
9261  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
9262  *pBuffer = VK_NULL_HANDLE;
9263  return res;
9264  }
9265  return res;
9266 }
9267 
9268 void vmaDestroyBuffer(
9269  VmaAllocator allocator,
9270  VkBuffer buffer,
9271  VmaAllocation allocation)
9272 {
9273  if(buffer != VK_NULL_HANDLE)
9274  {
9275  VMA_ASSERT(allocator);
9276 
9277  VMA_DEBUG_LOG("vmaDestroyBuffer");
9278 
9279  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9280 
9281  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
9282 
9283  allocator->FreeMemory(allocation);
9284  }
9285 }
9286 
9287 VkResult vmaCreateImage(
9288  VmaAllocator allocator,
9289  const VkImageCreateInfo* pImageCreateInfo,
9290  const VmaAllocationCreateInfo* pAllocationCreateInfo,
9291  VkImage* pImage,
9292  VmaAllocation* pAllocation,
9293  VmaAllocationInfo* pAllocationInfo)
9294 {
9295  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
9296 
9297  VMA_DEBUG_LOG("vmaCreateImage");
9298 
9299  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9300 
9301  *pImage = VK_NULL_HANDLE;
9302  *pAllocation = VK_NULL_HANDLE;
9303 
9304  // 1. Create VkImage.
9305  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
9306  allocator->m_hDevice,
9307  pImageCreateInfo,
9308  allocator->GetAllocationCallbacks(),
9309  pImage);
9310  if(res >= 0)
9311  {
9312  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
9313  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
9314  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
9315 
9316  // 2. Allocate memory using allocator.
9317  res = AllocateMemoryForImage(allocator, *pImage, pAllocationCreateInfo, suballocType, pAllocation);
9318  if(res >= 0)
9319  {
9320  // 3. Bind image with memory.
9321  res = allocator->BindImageMemory(*pAllocation, *pImage);
9322  if(res >= 0)
9323  {
9324  // All steps succeeded.
9325  if(pAllocationInfo != VMA_NULL)
9326  {
9327  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
9328  }
9329  return VK_SUCCESS;
9330  }
9331  allocator->FreeMemory(*pAllocation);
9332  *pAllocation = VK_NULL_HANDLE;
9333  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9334  *pImage = VK_NULL_HANDLE;
9335  return res;
9336  }
9337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
9338  *pImage = VK_NULL_HANDLE;
9339  return res;
9340  }
9341  return res;
9342 }
9343 
9344 void vmaDestroyImage(
9345  VmaAllocator allocator,
9346  VkImage image,
9347  VmaAllocation allocation)
9348 {
9349  if(image != VK_NULL_HANDLE)
9350  {
9351  VMA_ASSERT(allocator);
9352 
9353  VMA_DEBUG_LOG("vmaDestroyImage");
9354 
9355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
9356 
9357  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
9358 
9359  allocator->FreeMemory(allocation);
9360  }
9361 }
9362 
9363 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1153
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:1415
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1178
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Compacts memory by moving allocations.
Represents single memory allocation.
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1163
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1372
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1157
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:1745
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1175
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:1944
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:1591
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:1645
Definition: vk_mem_alloc.h:1452
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1146
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:1490
Definition: vk_mem_alloc.h:1399
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1187
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1240
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1172
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:1403
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1305
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1160
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1304
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR
Definition: vk_mem_alloc.h:1168