├── .gitignore
├── LICENSE
├── README.md
├── WebAssemblySample
├── GameAllocator.css
├── GameAllocator.wasm
├── build-debug.bat
├── build-linux.sh
├── build-windows.bat
├── index.html
└── mem.js
├── Win32Sample
├── Win32.cpp
├── Win32.png
├── Win32.sln
├── Win32.vcxproj
├── Win32.vcxproj.filters
└── Win32Small.png
├── mem.cpp
├── mem.h
├── mem.js
└── notes.md
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Mono auto generated files
17 | mono_crash.*
18 |
19 | # Build results
20 | [Dd]ebug/
21 | [Dd]ebugPublic/
22 | [Rr]elease/
23 | [Rr]eleases/
24 | x64/
25 | x86/
26 | [Aa][Rr][Mm]/
27 | [Aa][Rr][Mm]64/
28 | bld/
29 | [Bb]in/
30 | [Oo]bj/
31 | [Ll]og/
32 | [Ll]ogs/
33 |
34 | # Visual Studio 2015/2017 cache/options directory
35 | .vs/
36 | # Uncomment if you have tasks that create the project's static files in wwwroot
37 | #wwwroot/
38 |
39 | # Visual Studio 2017 auto generated files
40 | Generated\ Files/
41 |
42 | # MSTest test Results
43 | [Tt]est[Rr]esult*/
44 | [Bb]uild[Ll]og.*
45 |
46 | # NUnit
47 | *.VisualState.xml
48 | TestResult.xml
49 | nunit-*.xml
50 |
51 | # Build Results of an ATL Project
52 | [Dd]ebugPS/
53 | [Rr]eleasePS/
54 | dlldata.c
55 |
56 | # Benchmark Results
57 | BenchmarkDotNet.Artifacts/
58 |
59 | # .NET Core
60 | project.lock.json
61 | project.fragment.lock.json
62 | artifacts/
63 |
64 | # StyleCop
65 | StyleCopReport.xml
66 |
67 | # Files built by Visual Studio
68 | *_i.c
69 | *_p.c
70 | *_h.h
71 | *.ilk
72 | *.meta
73 | *.obj
74 | *.iobj
75 | *.pch
76 | *.pdb
77 | *.ipdb
78 | *.pgc
79 | *.pgd
80 | *.rsp
81 | *.sbr
82 | *.tlb
83 | *.tli
84 | *.tlh
85 | *.tmp
86 | *.tmp_proj
87 | *_wpftmp.csproj
88 | *.log
89 | *.vspscc
90 | *.vssscc
91 | .builds
92 | *.pidb
93 | *.svclog
94 | *.scc
95 |
96 | # Chutzpah Test files
97 | _Chutzpah*
98 |
99 | # Visual C++ cache files
100 | ipch/
101 | *.aps
102 | *.ncb
103 | *.opendb
104 | *.opensdf
105 | *.sdf
106 | *.cachefile
107 | *.VC.db
108 | *.VC.VC.opendb
109 |
110 | # Visual Studio profiler
111 | *.psess
112 | *.vsp
113 | *.vspx
114 | *.sap
115 |
116 | # Visual Studio Trace Files
117 | *.e2e
118 |
119 | # TFS 2012 Local Workspace
120 | $tf/
121 |
122 | # Guidance Automation Toolkit
123 | *.gpState
124 |
125 | # ReSharper is a .NET coding add-in
126 | _ReSharper*/
127 | *.[Rr]e[Ss]harper
128 | *.DotSettings.user
129 |
130 | # TeamCity is a build add-in
131 | _TeamCity*
132 |
133 | # DotCover is a Code Coverage Tool
134 | *.dotCover
135 |
136 | # AxoCover is a Code Coverage Tool
137 | .axoCover/*
138 | !.axoCover/settings.json
139 |
140 | # Visual Studio code coverage results
141 | *.coverage
142 | *.coveragexml
143 |
144 | # NCrunch
145 | _NCrunch_*
146 | .*crunch*.local.xml
147 | nCrunchTemp_*
148 |
149 | # MightyMoose
150 | *.mm.*
151 | AutoTest.Net/
152 |
153 | # Web workbench (sass)
154 | .sass-cache/
155 |
156 | # Installshield output folder
157 | [Ee]xpress/
158 |
159 | # DocProject is a documentation generator add-in
160 | DocProject/buildhelp/
161 | DocProject/Help/*.HxT
162 | DocProject/Help/*.HxC
163 | DocProject/Help/*.hhc
164 | DocProject/Help/*.hhk
165 | DocProject/Help/*.hhp
166 | DocProject/Help/Html2
167 | DocProject/Help/html
168 |
169 | # Click-Once directory
170 | publish/
171 |
172 | # Publish Web Output
173 | *.[Pp]ublish.xml
174 | *.azurePubxml
175 | # Note: Comment the next line if you want to checkin your web deploy settings,
176 | # but database connection strings (with potential passwords) will be unencrypted
177 | *.pubxml
178 | *.publishproj
179 |
180 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
181 | # checkin your Azure Web App publish settings, but sensitive information contained
182 | # in these scripts will be unencrypted
183 | PublishScripts/
184 |
185 | # NuGet Packages
186 | *.nupkg
187 | # NuGet Symbol Packages
188 | *.snupkg
189 | # The packages folder can be ignored because of Package Restore
190 | **/[Pp]ackages/*
191 | # except build/, which is used as an MSBuild target.
192 | !**/[Pp]ackages/build/
193 | # Uncomment if necessary however generally it will be regenerated when needed
194 | #!**/[Pp]ackages/repositories.config
195 | # NuGet v3's project.json files produces more ignorable files
196 | *.nuget.props
197 | *.nuget.targets
198 |
199 | # Microsoft Azure Build Output
200 | csx/
201 | *.build.csdef
202 |
203 | # Microsoft Azure Emulator
204 | ecf/
205 | rcf/
206 |
207 | # Windows Store app package directories and files
208 | AppPackages/
209 | BundleArtifacts/
210 | Package.StoreAssociation.xml
211 | _pkginfo.txt
212 | *.appx
213 | *.appxbundle
214 | *.appxupload
215 |
216 | # Visual Studio cache files
217 | # files ending in .cache can be ignored
218 | *.[Cc]ache
219 | # but keep track of directories ending in .cache
220 | !?*.[Cc]ache/
221 |
222 | # Others
223 | ClientBin/
224 | ~$*
225 | *~
226 | *.dbmdl
227 | *.dbproj.schemaview
228 | *.jfm
229 | *.pfx
230 | *.publishsettings
231 | orleans.codegen.cs
232 |
233 | # Including strong name files can present a security risk
234 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
235 | #*.snk
236 |
237 | # Since there are multiple workflows, uncomment next line to ignore bower_components
238 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
239 | #bower_components/
240 |
241 | # RIA/Silverlight projects
242 | Generated_Code/
243 |
244 | # Backup & report files from converting an old project file
245 | # to a newer Visual Studio version. Backup files are not needed,
246 | # because we have git ;-)
247 | _UpgradeReport_Files/
248 | Backup*/
249 | UpgradeLog*.XML
250 | UpgradeLog*.htm
251 | ServiceFabricBackup/
252 | *.rptproj.bak
253 |
254 | # SQL Server files
255 | *.mdf
256 | *.ldf
257 | *.ndf
258 |
259 | # Business Intelligence projects
260 | *.rdl.data
261 | *.bim.layout
262 | *.bim_*.settings
263 | *.rptproj.rsuser
264 | *- [Bb]ackup.rdl
265 | *- [Bb]ackup ([0-9]).rdl
266 | *- [Bb]ackup ([0-9][0-9]).rdl
267 |
268 | # Microsoft Fakes
269 | FakesAssemblies/
270 |
271 | # GhostDoc plugin setting file
272 | *.GhostDoc.xml
273 |
274 | # Node.js Tools for Visual Studio
275 | .ntvs_analysis.dat
276 | node_modules/
277 |
278 | # Visual Studio 6 build log
279 | *.plg
280 |
281 | # Visual Studio 6 workspace options file
282 | *.opt
283 |
284 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
285 | *.vbw
286 |
287 | # Visual Studio LightSwitch build output
288 | **/*.HTMLClient/GeneratedArtifacts
289 | **/*.DesktopClient/GeneratedArtifacts
290 | **/*.DesktopClient/ModelManifest.xml
291 | **/*.Server/GeneratedArtifacts
292 | **/*.Server/ModelManifest.xml
293 | _Pvt_Extensions
294 |
295 | # Paket dependency manager
296 | .paket/paket.exe
297 | paket-files/
298 |
299 | # FAKE - F# Make
300 | .fake/
301 |
302 | # CodeRush personal settings
303 | .cr/personal
304 |
305 | # Python Tools for Visual Studio (PTVS)
306 | __pycache__/
307 | *.pyc
308 |
309 | # Cake - Uncomment if you are using it
310 | # tools/**
311 | # !tools/packages.config
312 |
313 | # Tabs Studio
314 | *.tss
315 |
316 | # Telerik's JustMock configuration file
317 | *.jmconfig
318 |
319 | # BizTalk build output
320 | *.btp.cs
321 | *.btm.cs
322 | *.odx.cs
323 | *.xsd.cs
324 |
325 | # OpenCover UI analysis results
326 | OpenCover/
327 |
328 | # Azure Stream Analytics local run output
329 | ASALocalRun/
330 |
331 | # MSBuild Binary and Structured Log
332 | *.binlog
333 |
334 | # NVidia Nsight GPU debugger configuration file
335 | *.nvuser
336 |
337 | # MFractors (Xamarin productivity tool) working folder
338 | .mfractor/
339 |
340 | # Local History for Visual Studio
341 | .localhistory/
342 |
343 | # BeatPulse healthcheck temp database
344 | healthchecksdb
345 |
346 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
347 | MigrationBackup/
348 |
349 | # Ionide (cross platform F# VS Code tools) working folder
350 | .ionide/
351 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Gabor Szauer
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Game Allocator
2 |
3 | [Game Allocator](https://gabormakesgames.com/blog_memory.html) is a generic memory manager intended for games, embedded devices, and web assembly. Given a large array of memory, the library provides functions to allocate and release that memory similar to malloc / free.
4 |
5 | The memory will be broken up into pages (4 KiB by default) and tracked at the page granularity.
6 | A sub-allocator provided which breaks the page up into a fast free list for smaller allocation.
7 |
8 | 
9 |
10 | ## Usage
11 |
12 | Let's assume you have a ```void*``` to some large area of memory and know how many bytes large that area is. Call the ```Memory::Initialize``` function to create an allocator. The first two arguments are the memory and size, the third argument is the page size with which the memory should be managed. The default page size is 4 KiB. The pointer being passed to ```Memory::Initialize``` should be 8 byte aligned, and the size of the memory should be a multiple of the ```pageSize``` argument.
13 |
14 | The ```Memory::AlignAndTrim``` helper function will align a region of memory so it's ready for initialize. This function modifies the ```memory``` and ```size``` variables that are passed to the function. ```Memory::AlignAndTrim``` returns the number of bytes lost.
15 |
16 | You can allocate memory with the ```Allocate``` function of the allocator, and release memory with its ```Release``` function. Alloctions that don't specify an alignment can take advantage of a faster pool allocator. The allocator struct also provides a ```New``` and ```Delete``` method to call constructors and destructors similarly to new and delete. ```New``` is set up to forward up to 3 arguments, adding additional arguments is trivial.
17 |
18 | When you are finished with an allocator, clean it up by calling ```Memory::Shutdown```. The shutdown function will assert in debug builds if there are any memory leaks.
19 |
20 | # Example
21 |
22 | ```
23 | void run() {
24 | // Declare how much memory to use
25 | unsigned int size = MB(512);
26 |
27 | // Allocate memory from the operating system
28 | LPVOID memory = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); // Windows
29 |
30 | // Initialize the global allocator
31 | u32 lost = Memory::AlignAndTrim(&memory, &size, Memory::AllocatorAlignment, Memory::DefaultPageSize);
32 | Memory::Allocator* allocator = Memory::Initialize(memory, size, Memory::DefaultPageSize);
33 |
34 | // Allocate & release memory (could also call new / delete)
35 | int* number = allocator->Allocate(sizeof(int)); // Only the number of bytes is required
36 | allocator->Release(number); // Only the void* is required
37 |
38 | // Cleanup the global allocator
39 | Memory::Shutdown(allocator);
40 |
41 | // Release memory back to operating system
42 | VirtualFree(memory, 0, MEM_RELEASE);
43 | }
44 | ```
45 |
46 | # Compile flags
47 |
48 | * ```MEM_FIRST_FIT```: This affects how fast memory is allocated. If it's set then every allocation searches for the first available page from the start of the memory. If it's not set, then an allocation header is maintained. It's advanced with each allocation, and new allocations search for memory from the allocation header.
49 | * ```MEM_CLEAR_ON_ALLOC```: When set, memory will be cleared to 0 before being returned from ```Memory::Allocate```: If both clear and debug on alloc are set, clear will take precedence
50 | * ```MEM_DEBUG_ON_ALLOC```: If set, full page allocations will fill the padding of the page with "```-MEMORY```"
51 | * ```MEM_USE_SUBALLOCATORS```: If set, small allocations will be made using a free list allocaotr. There are free list allocators for 64, 128, 256, 512, 1024 and 2049 byte allocations. Only allocations that don't specify an alignment can use the fast free list allocator. The sub-allocator will provide better page utilization, for example a 4096 KiB page can hold 32 128 bit allocations.
52 | * ```MEM_TRACK_LOCATION```: If set, a ```const char*``` will be added to ```Memory::Allocation``` which tracks the ```__LINE__``` and ```__FILE__``` of each allocation. Setting this bit will add 8 bytes to the ```Memory::Allocation``` struct.
53 |
54 | # Debugging
55 |
56 | There are a few debug functions exposed in the ```Memory::Debug``` namespace. When an allocator is initialized, the page immediateley before the first allocatable page is reserved as a debug page. You can fill this page with whatever data is needed. Any function in ```Memory::Debug``` might overwrite the contents of the debug page. You can get a pointer to the debug page of an allocator with the ```RequestDbgPage``` function of the allocator. Be sure to release the page with ```ReleaseDbgPage``` after you are done using it..
57 |
58 | The ```Memory::Debug::MemInfo``` function can be used to retrieve information about the state of the memory allocator. It provides meta data like how many pages are in use, a list of active allocations, and a visual bitmap chart to make debugging the memory bitmask easy. You can write this information to a file like so:
59 |
60 | ```
61 | DeleteFile(L"MemInfo.txt");
62 | HANDLE hFile = CreateFile(L"MemInfo.txt", GENERIC_WRITE, FILE_SHARE_READ, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL);
63 | Memory::Debug::MemInfo(Memory::GlobalAllocator, [](const u8* mem, u32 size, void* fileHandle) {
64 | HANDLE file = *(HANDLE*)fileHandle;
65 | DWORD bytesWritten;
66 | WriteFile(file, mem, size, &bytesWritten, nullptr);
67 | }, &hFile);
68 | CloseHandle(hFile);
69 | ```
70 |
71 | There is a similar ```Memory::Debug::PageContent```, which given a page number will dump the binary conent of a page. The outputof the above log will look something like this:
72 |
73 | ```
74 | Tracking 256 pages, Page size: 4096 bytes
75 | Total memory size: 1024 KiB (1 MiB)
76 | Page state: 231 free, 23 used, 2 overhead
77 | Requested: 60656 bytes, Served: 94208 bytes
78 |
79 | Active allocations:
80 | 114688, size: 4096, padded: 4120, alignment: 0, first page: 28,
81 | prev: 86016, next: 110592, location: On line: 881, in file:
82 | C:\Users\Gabor\Git\GameAllocator\GameAllocator\WinMain.cpp
83 | 12160, size: 64, padded: 88, alignment: 0, first page: 2,
84 | prev: 16320, next: 0, location: On line: 1064, in file:
85 | C:\Users\Gabor\Git\GameAllocator\GameAllocator\WinMain.cpp
86 | [Rest of active list removed for brevity]
87 |
88 | Page chart:
89 | 000000-0000000-0-00-0000-00000--------------------------------------------------
90 | --------------------------------------------------------------------------------
91 | --------------------------------------------------------------------------------
92 | ----------------
93 | ```
94 |
95 | # Resources
96 |
97 | * [Compile without CRT](https://yal.cc/cpp-a-very-tiny-dll/)
98 | * Ready Set Allocate: [Part 1](https://web.archive.org/web/20120419125628/http://www.altdevblogaday.com/2011/04/11/ready-set-allocate-part-1/), [Part 2](https://web.archive.org/web/20120419125404/http://www.altdevblogaday.com/2011/04/26/ready-set-allocate-part-2/), [Part 3](https://web.archive.org/web/20120419010208/http://www.altdevblogaday.com/2011/05/15/ready-set-allocate-part-3/), [Part 4](https://web.archive.org/web/20120418212016/http://www.altdevblogaday.com/2011/05/26/ready-set-allocate-part-4/), [Part 5](https://web.archive.org/web/20120413201435/http://www.altdevblogaday.com/2011/06/08/ready-set-allocate-part-5/), [Part 6](https://web.archive.org/web/20120321205231/http://www.altdevblogaday.com/2011/06/30/ready-set-allocate-part-6/)
99 | * [Concatenate \_\_LINE\_\_ and \_\_FILE\_\_](https://stackoverflow.com/questions/2653214/stringification-of-a-macro-value)
100 | * C++ overload [new](https://cplusplus.com/reference/new/operator%20new/), [new[]](https://cplusplus.com/reference/new/operator%20new[]/), [delete](https://cplusplus.com/reference/new/operator%20delete/), and [delete[]](https://cplusplus.com/reference/new/operator%20delete[]/)
101 | * [Stack overflow memory alignment discussion](https://stackoverflow.com/questions/227897/how-to-allocate-aligned-memory-only-using-the-standard-library)
102 | * [Scott Schurr's const string](https://www.youtube.com/watch?v=BUnNA2dLRsU)
103 | * [Surma, C to WASM without emscripten](https://surma.dev/things/c-to-webassembly/)
104 | * [Schell, C to WASM without emscripten](https://schellcode.github.io/webassembly-without-emscripten#getting-wasm-opt)
--------------------------------------------------------------------------------
/WebAssemblySample/GameAllocator.css:
--------------------------------------------------------------------------------
1 | html, body {
2 | min-height: 100% !important;
3 | height: 100%;
4 | border: 0px;
5 | padding: 0px;
6 | margin: 0px;
7 | background-color: rgb(30, 30, 30);
8 | color: rgb(220, 220, 220);
9 |
10 |
11 | text-decoration: none;
12 | }
13 |
14 | a:link {
15 | color: rgb(220, 220, 220);
16 | text-decoration: none;
17 | }
18 |
19 | a:visited {
20 | color: rgb(200, 200, 200);
21 | text-decoration: none;
22 | }
23 |
24 | a:hover {
25 | color: rgb(180, 180, 180);
26 | text-decoration: underline;
27 | }
28 |
29 | a:active {
30 | color: rgb(160, 160, 160);
31 | text-decoration: underline;
32 | }
33 |
34 | #mem {
35 | border: 0px;
36 | padding: 0px;
37 | margin: 0px;
38 | width: 100%;
39 | height: 100%;
40 | overflow: hidden;
41 | display: none;
42 | }
43 | #bottom {
44 | height: 40%;
45 | width: 100%;
46 | border: 0px;
47 | padding: 0px;
48 | margin: 0px;
49 | display:flex;
50 | }
51 | #command {
52 | height: 100%;
53 | width: 30%;
54 | border: 0px;
55 | padding: 0px;
56 | margin: 0px;
57 | background-color: rgb(40, 40, 40);
58 | font-weight: bold;
59 | }
60 | #allocations {
61 | height: 100%;
62 | width: 70%;
63 | border: 0px;
64 | padding: 5px;
65 | margin: 0px;
66 | background-color: rgb(30, 30, 30);
67 | color: rgb(220, 220, 220);
68 | }
69 | .commandrow {
70 | display:flex;
71 | width: auto;
72 | height: 30px;
73 | margin: 5px;
74 | }
75 | .onethird {
76 | padding: 0px;
77 | border: 0px;
78 | margin: 5px;
79 | margin-bottom: 0px;
80 | width: 33%;
81 | min-width: 50px;
82 | max-width: 400px;
83 | overflow: hidden;
84 | }
85 | .fullrow {
86 | padding: 0px;
87 | border: 0px;
88 | margin: 0px;
89 |
90 | width: 95%;
91 | min-width: 95%;
92 | max-width: 95%;
93 |
94 | height: 22px;
95 | min-height: 22px;
96 | max-height: 22px;
97 | margin-left: 5px;
98 | }
99 | #chart {
100 | height: 60%;
101 | width: 100%;
102 | background-color: rgb(55, 55, 55);
103 | border: 0px;
104 | padding: 0px;
105 | margin: 0px;
106 | display: flex;
107 | overflow: scroll;
108 | overflow-x: hidden;
109 | flex-wrap: wrap;
110 | align-content: start;
111 | }
112 | .red,
113 | .green,
114 | .blue {
115 | background-color: aquamarine;
116 | margin: 1px;
117 | padding: 0px;
118 | border: 0px;
119 | width: 5px;
120 | min-width: 5px;
121 | max-width: 5px;
122 | height: 7px;
123 | min-height: 7px;
124 | max-height: 7px;
125 | }
126 | .red {
127 | background-color: rgb(200, 50, 25);
128 | }
129 | .green {
130 | background-color: rgb(50, 200, 25);
131 | }
132 | .blue {
133 | background-color: rgb(50, 25, 200);
134 | }
135 | #config {
136 | position: absolute;
137 |
138 | width: 400px;
139 | min-width: 400px;
140 | max-width: 400px;
141 | height: 50px;
142 | max-height: 25px;
143 | min-height: 25px;
144 |
145 | left: 50%;
146 | top: 50%;
147 |
148 | margin: 0px;
149 | padding: 0px;
150 | border: 0px;
151 | margin-left: -200px;
152 | margin-top: -25px;
153 | }
--------------------------------------------------------------------------------
/WebAssemblySample/GameAllocator.wasm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gszauer/GameAllocator/3906f35f7502baffbe2b5c4f16456abce83a00db/WebAssemblySample/GameAllocator.wasm
--------------------------------------------------------------------------------
/WebAssemblySample/build-debug.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | C:/WASM/clang.exe -x c++ ^
4 | --target=wasm32 ^
5 | -nostdinc ^
6 | -nostdlib ^
7 | -O0 ^
8 | -g ^
9 | -fno-threadsafe-statics ^
10 | -Wl,--allow-undefined ^
11 | -Wl,--import-memory ^
12 | -Wl,--no-entry ^
13 | -Wl,--export-dynamic ^
14 | -Wl,-z,stack-size=4194304 ^
15 | -D WASM32=1 ^
16 | -D _WASM32=1 ^
17 | -D DEBUG=1 ^
18 | -D _DEBUG=1 ^
19 | -o GameAllocator.wasm ^
20 | WebAssembly.cpp ../mem.cpp
--------------------------------------------------------------------------------
/WebAssemblySample/build-linux.sh:
--------------------------------------------------------------------------------
1 |
2 |
3 | clang -x c++ \
4 | --target=wasm32 \
5 | -nostdinc \
6 | -nostdlib \
7 | -O3 \
8 | -flto \
9 | -Wl,--allow-undefined \
10 | -Wl,--import-memory \
11 | -Wl,--no-entry \
12 | -Wl,--export-dynamic \
13 | -Wl,--lto-O3 \
14 | -Wl,-z,stack-size=4194304 \
15 | -D WASM32=1 \
16 | -D _WASM32=1 \
17 | -o GameAllocator.wasm \
18 | ../mem.cpp
--------------------------------------------------------------------------------
/WebAssemblySample/build-windows.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | C:/WASM/clang.exe -x c++ ^
4 | --target=wasm32 ^
5 | -nostdinc ^
6 | -nostdlib ^
7 | -O3 ^
8 | -flto ^
9 | -Wl,--allow-undefined ^
10 | -Wl,--import-memory ^
11 | -Wl,--no-entry ^
12 | -Wl,--export-dynamic ^
13 | -Wl,--lto-O3 ^
14 | -Wl,-z,stack-size=4194304 ^
15 | -D WASM32=1 ^
16 | -D _WASM32=1 ^
17 | -o GameAllocator.wasm ^
18 | WebAssembly.cpp ../mem.cpp
--------------------------------------------------------------------------------
/WebAssemblySample/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Game Memory Allocator, WASM
7 |
8 |
9 |
10 |
226 |
227 |
228 |
229 |
230 |
Configure allocator
231 |
232 |
233 |
238 |
239 |
240 |
241 |
242 |
247 |
248 |
249 |
250 |
251 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
Tracking XX Pages, XX KiB (XX MiB)
266 |
267 |
268 |
Pages: XX free, XX used, XX overhead
269 |
270 |
271 |
Requested: XX bytes (~XX MiB)
272 |
273 |
274 |
Served: XX bytes (~XX MiB)
275 |
276 |
279 |
280 |
283 |
284 |
285 |
286 |
--------------------------------------------------------------------------------
/WebAssemblySample/mem.js:
--------------------------------------------------------------------------------
1 | class GameAllocator {
2 |
3 | constructor(totalMemoryBytes, heapSize) {
4 | this.WebAssemblyMemory = null;
5 | this.memory = null // Alias for WebAssemblyMemory
6 | this.AllocatorPtr = null;
7 | this.WasmExports = null;
8 | this.RequestedBytes = totalMemoryBytes;
9 | this.HeapSizeBytes = 0;
10 | this.RequestedHeapSize = heapSize;
11 | this.GlobalDumpState = "";
12 |
13 | let self = this;
14 |
15 | // WASM is 64 KiB / page (our allocator is 4 KiB);
16 | let wasmPageSize = 64 * 1024; // 64 KiB
17 | let wasmNumPages = Math.ceil(totalMemoryBytes / wasmPageSize);
18 | self.WebAssemblyMemory = new WebAssembly.Memory( {
19 | initial: wasmNumPages,
20 | maximum: wasmNumPages
21 | });
22 | self.memory = this.WebAssemblyMemory;
23 | }
24 |
25 | logError(str) {
26 | console.logError(str);
27 | }
28 |
29 | logInfo(str) {
30 | console.log(str);
31 | }
32 |
33 | InjectWebAssemblyImportObject(importObject) {
34 | if (!importObject.hasOwnProperty("env")) {
35 | importObject.env = {};
36 | }
37 | importObject.env.memory = this.WebAssemblyMemory;
38 | let self = this;
39 |
40 | importObject.env["GameAllocator_jsBuildMemState"] = function(ptr, len) {
41 | const array = new Uint8Array(self.WebAssemblyMemory.buffer, ptr, len);
42 | const decoder = new TextDecoder()
43 | const string = decoder.decode(array);
44 | self.GlobalDumpState += string;
45 | };
46 | }
47 |
48 | InitializeWebAssembly(wasmExports) {
49 | if (!wasmExports) {
50 | this.logError("invalid exports object");
51 | }
52 | this.WasmExports = wasmExports;
53 | this.AllocatorPtr = this.WasmExports.GameAllocator_wasmInitialize(this.RequestedHeapSize);
54 | this.HeapSizeBytes = this.WasmExports.GameAllocator_wasmHeapSize(this.RequestedBytes);
55 | this.logInfo("Requested heap: " + this.RequestedHeapSize + ", actual heap: " + this.HeapSizeBytes);
56 | if (this.HeapSizeBytes < this.RequestedHeapSize) {
57 | console.logError("Actual heap size is less than requested heap size");
58 | }
59 | }
60 |
61 | ShutdownWebAssembly() {
62 | this.WasmExports.GameAllocator_wasmShutdown(this.AllocatorPtr);
63 | this.AllocatorPtr = 0;
64 | }
65 |
66 | Allocte(bytes, alignment) {
67 | if (!alignment) {
68 | alignment = 0;
69 | }
70 | if (!bytes || bytes <= 0) {
71 | this.logError("Can't allocate <=0 bytes!");
72 | bytes = 1;
73 | }
74 |
75 | return this.WasmExports.GameAllocator_wasmAllocate(this.AllocatorPtr, bytes, alignment);
76 | }
77 |
78 | Release(ptr) {
79 | this.WasmExports.GameAllocator_wasmRelease(this.AllocatorPtr, ptr);
80 | }
81 |
82 | Set(ptr, value, size) {
83 | this.WasmExports.GameAllocator_wasmSet(ptr, value, size);
84 | }
85 |
86 | Copy(dest_ptr, src_ptr, size) {
87 | this.WasmExports.GameAllocator_wasmCopy(dest_ptr, src_ptr, size);
88 | }
89 |
90 | GetNumPages() {
91 | return this.WasmExports.GameAllocator_wasmGetNumPages(this.AllocatorPtr);
92 | }
93 |
94 | GetNumPagesInUse() {
95 | return this.WasmExports.GameAllocator_wasmGetNumPagesInUse(this.AllocatorPtr);
96 | }
97 |
98 | GetPeekPagesUsed() {
99 | return this.WasmExports.GameAllocator_wasmGetPeekPagesUsed(this.AllocatorPtr);
100 | }
101 |
102 | GetRequestedBytes() {
103 | return this.WasmExports.GameAllocator_wasmGetRequestedBytes(this.AllocatorPtr);
104 | }
105 |
106 | GetServedBytes() {
107 | return this.WasmExports.GameAllocator_wasmGetServedBytes(this.AllocatorPtr);
108 | }
109 |
110 | IsPageInUse(page) {
111 | if (page < 0) {
112 | this.logError("invalid page");
113 | page = 0;
114 | }
115 | let result = this.WasmExports.GameAllocator_wasmIsPageInUse(this.AllocatorPtr, page);
116 | return result != 0;
117 | }
118 |
119 | Size() {
120 | return this.WasmExports.GameAllocator_wasmGetSize(this.AllocatorPtr);
121 | }
122 |
123 | GetNumOverheadPages() {
124 | return this.WasmExports.GameAllocator_wasmGetNumOverheadPages(this.AllocatorPtr);
125 | }
126 |
127 | StrLen(str_ptr) {
128 | return this.WasmExports.GameAllocator_wasmStrLen(str_ptr);
129 | }
130 |
131 | GetAllocationDebugInfo(alloc_ptr) {
132 | return this.WasmExports.GameAllocator_wasmGetAllocationDebugName(this.AllocatorPtr, alloc_ptr);
133 | }
134 |
135 | DebugDumpState() {
136 | this.GlobalDumpState = "";
137 | this.WasmExports.GameAllocator_wasmDumpState(this.AllocatorPtr);
138 | return this.GlobalDumpState;
139 | }
140 | }
--------------------------------------------------------------------------------
/Win32Sample/Win32.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "../mem.h"
5 |
6 | #pragma warning(disable:6011)
7 |
8 | void runtime_assert(bool condition, const char* file, int line) {
9 | char* data = (char*)((void*)0);
10 | if (condition == false) {
11 | *data = '\0';
12 | }
13 | }
14 |
15 | #define WinAssert(condition) runtime_assert(condition, __FILE__, __LINE__)
16 |
17 | #define IDT_TIMER1 1001
18 | #define IDC_LIST 1
19 | #define IDC_STATIC 2
20 | #define ID_UPDOWN 3
21 | #define ID_EDIT 4
22 | #define ID_ALLOCATE_MEM 5
23 | #define ID_FREE_MEM 6
24 | #define ID_FREE_MEM_ALL 7
25 | #define ID_REFRESH_MEM 8
26 | #define ID_DUMP_ALLOC 9
27 |
28 | #define UD_MAX_POS (4096*4)
29 | #define UD_MIN_POS 0
30 |
31 | #ifndef HINST_THISCOMPONENT
32 | EXTERN_C IMAGE_DOS_HEADER __ImageBase;
33 | #define HINST_THISCOMPONENT ((HINSTANCE)&__ImageBase)
34 | #endif
35 |
36 | #pragma comment(lib, "kernel32.lib")
37 | #pragma comment(lib, "user32.lib")
38 | #pragma comment(lib, "gdi32.lib")
39 |
40 | extern "C" int _fltused = 0;
41 |
42 | i32 scrollY;
43 | u32 oldNumRows;
44 | HWND gMemoryWindow;
45 | struct FrameBuffer* gFrameBuffer;
46 | struct Win32Color* bgColor;
47 | struct Win32Color* freeMemoryColor;
48 | struct Win32Color* usedMemoryColor;
49 | struct Win32Color* trackMemoryColor;
50 | struct Win32Color* boxColor;
51 | struct Win32Color* textColor;
52 |
53 | #define KB(x) ((size_t) (x) << 10)
54 | #define MB(x) ((size_t) (x) << 20)
55 | #define GB(x) ((size_t) (x) << 30)
56 |
57 | namespace Memory {
58 | Allocator* GlobalAllocator = 0;
59 | }
60 |
61 | struct MemoryDebugInfo {
62 | u8* PageMask;
63 | u32 NumberOfPages;
64 |
65 | u32 NumFreePages;
66 | u32 NumUsedPages;
67 | u32 NumOverheadPages;
68 |
69 | MemoryDebugInfo(Memory::Allocator* allocator) {
70 | #if ATLAS_64
71 | u64 allocatorHeaderSize = sizeof(Memory::Allocator);
72 | #elif ATLAS_32
73 | u32 allocatorHeaderSize = sizeof(Memory::Allocator);
74 | #else
75 | #error Unknown Platform
76 | #endif
77 | PageMask = ((u8*)allocator) + allocatorHeaderSize;
78 |
79 | NumberOfPages = allocator->size / allocator->pageSize; // 1 page = 4096 bytes, how many are needed
80 | WinAssert(allocator->size % allocator->pageSize == 0); // Allocator size should line up with page size
81 |
82 | u32 maskSize = AllocatorPageMaskSize(allocator) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
83 | u32 metaDataSizeBytes = AllocatorPaddedSize() + (maskSize * sizeof(u32));
84 | u32 numberOfMasksUsed = metaDataSizeBytes / allocator->pageSize;
85 | if (metaDataSizeBytes % allocator->pageSize != 0) {
86 | numberOfMasksUsed += 1;
87 | }
88 | metaDataSizeBytes = numberOfMasksUsed * allocator->pageSize; // This way, allocatable will start on a page boundary
89 |
90 | // Account for meta data
91 | metaDataSizeBytes += allocator->pageSize;
92 | numberOfMasksUsed += 1;
93 |
94 | u32 allocatorOverheadBytes = metaDataSizeBytes;
95 | WinAssert(allocatorOverheadBytes % allocator->pageSize == 0); // Offset to allocatable should always line up with page size
96 |
97 | NumFreePages = 0;
98 | NumUsedPages = 0;
99 | NumOverheadPages = allocatorOverheadBytes / allocator->pageSize; // No need for a +1 padding allocatorOverheadBytes should be aligned to Memory::PageSize
100 | //NumOverheadPages += 1; // Debug tracker page
101 |
102 | u32* mask = (u32*)PageMask;
103 | for (u32 page = NumOverheadPages; page < NumberOfPages; ++page) { // Don't start at page 0?
104 | const u32 block = page / Memory::TrackingUnitSize;
105 | const int bit = page % Memory::TrackingUnitSize;
106 |
107 | const bool used = mask[block] & (1 << bit);
108 | if (!used) {
109 | NumFreePages += 1;
110 | }
111 | else {
112 | NumUsedPages += 1;
113 | }
114 | }
115 |
116 | // These are super useless right now
117 | WinAssert(NumFreePages + NumUsedPages + NumOverheadPages == NumberOfPages);// Page number does not add up
118 | WinAssert(NumUsedPages + NumOverheadPages == allocator->numPagesUsed);// Added up wrong number of used pages?!!
119 | }
120 | private:
121 | inline u32 AllocatorPageMaskSize(Memory::Allocator* allocator) { // This is the number of u8's that make up the AllocatorPageMask array
122 | u32 allocatorNumberOfPages = allocator->size / allocator->pageSize; // 1 page = 4096 bytes, how many are needed
123 | //assert(allocator->size % PageSize == 0, "Allocator size should line up with page size");
124 | // allocatorNumberOfPages is the number of bits that are required to track memory
125 |
126 | // Pad out to sizeof(32) (if MaskTrackerSize is 32). This is because AllocatorPageMask will often be used as a u32 array
127 | // and we want to make sure that enough space is reserved.
128 | u32 allocatorPageArraySize = allocatorNumberOfPages / Memory::TrackingUnitSize + (allocatorNumberOfPages % Memory::TrackingUnitSize ? 1 : 0);
129 | //assert(allocatorPageArraySize % (TrackingUnitSize / 8) == 0, "allocatorPageArraySize should always be a multiple of 8");
130 | return allocatorPageArraySize * (Memory::TrackingUnitSize / 8); // In bytes, not bits
131 | }
132 |
133 | inline u32 AllocatorPaddedSize() {
134 | u32 allocatorHeaderSize = sizeof(Memory::Allocator);
135 | return allocatorHeaderSize;
136 | }
137 | };
138 |
139 | struct Win32Color {
140 | HBRUSH brush;
141 | COLORREF color;
142 | unsigned char r;
143 | unsigned char g;
144 | unsigned char b;
145 | unsigned char a;
146 |
147 | Win32Color() {
148 | color = RGB(0, 0, 0);
149 | brush = 0;
150 | r = 0;
151 | g = 0;
152 | b = 0;
153 | a = 255;
154 | CreateBrushObject();
155 | }
156 |
157 | Win32Color(unsigned char _r, unsigned char _g, unsigned char _b) {
158 | color = RGB(_r, _g, _b);
159 | r = _r;
160 | g = _g;
161 | b = _b;
162 | a = 255;
163 | CreateBrushObject();
164 | }
165 |
166 | Win32Color(unsigned char v) {
167 | color = RGB(v, v, v);
168 | r = v;
169 | g = v;
170 | b = v;
171 | a = 255;
172 | CreateBrushObject();
173 | }
174 |
175 | ~Win32Color() {
176 | DestroyBrushObject();
177 | }
178 |
179 | void Init(unsigned char _r, unsigned char _g, unsigned char _b) {
180 | color = RGB(_r, _g, _b);
181 | r = _r;
182 | g = _g;
183 | b = _b;
184 | a = 255;
185 | DestroyBrushObject();
186 | brush = CreateSolidBrush(color);
187 | }
188 |
189 | protected:
190 | void CreateBrushObject() {
191 | WinAssert(brush == 0);
192 | brush = CreateSolidBrush(color);
193 | }
194 |
195 | void DestroyBrushObject() {
196 | WinAssert(brush != 0);
197 | DeleteObject(brush);
198 | brush = 0;
199 | }
200 |
201 | void Copy(const Win32Color& other) {
202 | if (brush != 0) {
203 | DeleteObject(brush);
204 | }
205 | brush = 0;
206 |
207 | color = RGB(other.r, other.g, other.b);
208 | if (other.brush != 0) {
209 | brush = CreateSolidBrush(color);
210 | }
211 |
212 | r = other.r;
213 | g = other.g;
214 | b = other.b;
215 | a = 255;
216 | }
217 | };
218 |
219 | struct FrameBuffer { // For double buffered window
220 | BITMAPINFO RenderBufferInfo;
221 | unsigned char* Memory;
222 | unsigned int Width;
223 | unsigned int Height;
224 |
225 | FrameBuffer() {
226 | Memory = 0;
227 | Width = 0;
228 | Height = 0;
229 | Memory::Set(&RenderBufferInfo, 0, sizeof(BITMAPINFO), 0);
230 | }
231 |
232 | void Initialize() {
233 | WinAssert(Memory == 0);
234 |
235 | Width = GetSystemMetrics(SM_CXSCREEN);
236 | Height = GetSystemMetrics(SM_CYSCREEN);
237 |
238 | RenderBufferInfo.bmiHeader.biSize = sizeof(RenderBufferInfo.bmiHeader);
239 | RenderBufferInfo.bmiHeader.biWidth = Width;
240 | RenderBufferInfo.bmiHeader.biHeight = -((int)Height);
241 | RenderBufferInfo.bmiHeader.biPlanes = 1;
242 | RenderBufferInfo.bmiHeader.biBitCount = 32;
243 | RenderBufferInfo.bmiHeader.biCompression = BI_RGB;
244 |
245 | int bitmapMemorySize = (Width * Height) * 4;
246 | Memory = (unsigned char*)VirtualAlloc(0, bitmapMemorySize, MEM_COMMIT, PAGE_READWRITE);
247 | //memset(Memory, 0, bitmapMemorySize);
248 | }
249 |
250 | void Destroy() {
251 | WinAssert(Memory != 0);
252 | VirtualFree(Memory, 0, MEM_RELEASE);
253 | Memory = 0;
254 | }
255 | };
256 |
257 | void log(const char* pszFormat, ...) {
258 | char buf[1024];
259 | va_list argList;
260 | va_start(argList, pszFormat);
261 | wvsprintfA(buf, pszFormat, argList);
262 | va_end(argList);
263 | DWORD done;
264 | unsigned int sLen = 0;
265 | for (int i = 0; i < 1024; ++i) {
266 | if (buf[i] == '\0') {
267 | break;
268 | }
269 | sLen += 1;
270 | }
271 | WriteFile(GetStdHandle(STD_OUTPUT_HANDLE), buf, /*strlen(buf)*/sLen, &done, NULL);
272 | }
273 |
274 | #define COPY_RECT(a, b) \
275 | a.left = b.left; a.right = b.right; a.top = b.top; a.bottom = b.bottom;
276 |
277 | #define CLEAR_RECT(a) \
278 | a.left = 0; a.right = 0; a.top = 0; a.bottom = 0;
279 |
280 | struct Win32WindowLayout {
281 | RECT topArea;
282 | RECT bottomArea;
283 | RECT bottomLeftArea;
284 | RECT bottomCenterArea;
285 |
286 | Win32WindowLayout() {
287 | CLEAR_RECT(topArea);
288 | CLEAR_RECT(bottomArea);
289 | CLEAR_RECT(bottomLeftArea);
290 | CLEAR_RECT(bottomCenterArea);
291 | }
292 |
293 | Win32WindowLayout(const RECT& _top, const RECT& _bottom, const RECT& _bottomLeft, const RECT& _bottomCenter) {
294 | COPY_RECT(topArea, _top);
295 | COPY_RECT(bottomArea, _bottom);
296 | COPY_RECT(bottomLeftArea, _bottomLeft);
297 | COPY_RECT(bottomCenterArea, _bottomCenter);
298 | }
299 | };
300 |
301 | void SetWindowLayout(const Win32WindowLayout& layout, HWND chart, HWND* labels, HWND list, HWND* buttons,HWND upDown, HWND upDownEdit, HWND combo) {
302 | MemoryDebugInfo memInfo(Memory::GlobalAllocator);
303 |
304 | SetWindowPos(chart, 0, layout.topArea.left, layout.topArea.top, layout.topArea.right - layout.topArea.left, layout.topArea.bottom - layout.topArea.top, /*SWP_NOZORDER*/0);
305 |
306 | const u32 labelHeight = 18;
307 | const u32 buttonHeight = 25;
308 |
309 | RECT labelRect;
310 | COPY_RECT(labelRect, layout.bottomLeftArea);
311 | labelRect.top += 125;
312 |
313 | for (u32 i = 0; i < 9; ++i) {
314 | labelRect.top = layout.bottomLeftArea.top + 125 + i * labelHeight;
315 | labelRect.bottom = (i == 10)? layout.bottomCenterArea.bottom : labelRect.top + labelHeight;
316 |
317 | SetWindowPos(labels[i], 0, labelRect.left, labelRect.top, labelRect.right - labelRect.left, labelRect.bottom - labelRect.top, /*SWP_NOZORDER*/0);
318 | }
319 |
320 | Memory::Allocator* allocator = Memory::GlobalAllocator;
321 | wchar_t displaybuffer[1024];
322 |
323 | int kib = allocator->size / 1024;
324 | int mib = kib / 1024;// +(kib % 1024 ? 1 : 0);
325 | //kib = allocator->size / 1024 + (allocator->size % 1024 ? 1 : 0);
326 |
327 | wsprintfW(displaybuffer, L"Tracking %d Pages, %d KiB (%d MiB)", memInfo.NumberOfPages, kib, mib); // Removed with %.2f %% overhead
328 | SetWindowText(labels[0], displaybuffer);
329 |
330 | wsprintfW(displaybuffer, L"Pages: %d free, %d used, %d overhead", memInfo.NumFreePages, memInfo.NumUsedPages, memInfo.NumOverheadPages);
331 | SetWindowText(labels[1], displaybuffer);
332 |
333 | kib = allocator->requested / 1024;
334 | mib = kib / 1024;// + (kib % 1024 ? 1 : 0);
335 | //kib = allocator->requested / 1024 + (allocator->requested % 1024 ? 1 : 0);
336 |
337 | wsprintfW(displaybuffer, L"Requested: %d bytes (~%d MiB)", allocator->requested, mib);
338 | SetWindowText(labels[2], displaybuffer);
339 |
340 | kib = (memInfo.NumUsedPages * allocator->pageSize) / 1024;
341 | mib = kib / 1024;// +(kib % 1024 ? 1 : 0);
342 | // kib = (memInfo.NumUsedPages * Memory::PageSize) / 1024 + ((memInfo.NumUsedPages * Memory::PageSize) % 1024 ? 1 : 0);
343 |
344 | wsprintfW(displaybuffer, L"Served: %d bytes (~%d MiB)", memInfo.NumUsedPages * allocator->pageSize, mib);
345 | SetWindowText(labels[3], displaybuffer);
346 |
347 | SetWindowPos(list, 0, layout.bottomCenterArea.left, layout.bottomCenterArea.top, layout.bottomCenterArea.right - layout.bottomCenterArea.left, layout.bottomCenterArea.bottom - layout.bottomCenterArea.top, /*SWP_NOZORDER*/0);
348 |
349 | //SetWindowPos(right, 0, layout.bottomRightArea.left, layout.bottomRightArea.top, layout.bottomRightArea.right - layout.bottomRightArea.left, layout.bottomRightArea.bottom - layout.bottomRightArea.top, /*SWP_NOZORDER*/0);
350 |
351 | RECT allocationRect = { 0 };
352 | COPY_RECT(allocationRect, layout.bottomLeftArea);
353 | allocationRect.left += 5;
354 | allocationRect.right -= 5;
355 | allocationRect.top += 5;
356 |
357 | allocationRect.right = allocationRect.left + 75 + 50;
358 | allocationRect.bottom = allocationRect.top + buttonHeight;
359 | SetWindowPos(upDownEdit, 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top, /*SWP_NOZORDER*/0);
360 | SendMessageW(upDown, UDM_SETBUDDY, (WPARAM)upDownEdit, 0);
361 |
362 | allocationRect.left = allocationRect.right + 5;
363 | allocationRect.right = allocationRect.left + 65 + 25;
364 | SetWindowPos(combo, 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top + 150, /*SWP_NOZORDER*/0);
365 |
366 | allocationRect.left = allocationRect.right + 5;
367 | allocationRect.right = allocationRect.left + 100 + 30;
368 | SetWindowPos(buttons[0], 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top, /*SWP_NOZORDER*/0);
369 |
370 | allocationRect.top += buttonHeight + 5;
371 | allocationRect.bottom = allocationRect.top + buttonHeight;
372 | allocationRect.left = layout.bottomLeftArea.left + 5;
373 | allocationRect.right = allocationRect.left + 75 + 50 + 65 + 25 + 5;
374 | SetWindowPos(buttons[1], 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top, /*SWP_NOZORDER*/0);
375 |
376 | //allocationRect.top += buttonHeight + 5;
377 | //allocationRect.bottom = allocationRect.top + buttonHeight;
378 | allocationRect.left = allocationRect.right + 5;
379 | allocationRect.right = allocationRect.left + 100 + 30;
380 | SetWindowPos(buttons[4], 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top, /*SWP_NOZORDER*/0);
381 |
382 | allocationRect.left = layout.bottomLeftArea.left + 5;
383 | allocationRect.top += buttonHeight + 5;
384 | allocationRect.bottom = allocationRect.top + buttonHeight;
385 | SetWindowPos(buttons[3], 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top, /*SWP_NOZORDER*/0);
386 |
387 | allocationRect.top += buttonHeight + 5;
388 | allocationRect.bottom = allocationRect.top + buttonHeight;
389 | SetWindowPos(buttons[2], 0, allocationRect.left, allocationRect.top, allocationRect.right - allocationRect.left, allocationRect.bottom - allocationRect.top, /*SWP_NOZORDER*/0);
390 | }
391 |
392 | void ResetListBoxContent(Memory::Allocator* allocator, HWND list) {
393 | auto selection = SendMessage(list, LB_GETCURSEL, 0, 0);
394 | SendMessage(list, LB_RESETCONTENT, 0, 0);
395 |
396 | wchar_t displaybuffer[1024];
397 | for (Memory::Allocation* iter = allocator->active; iter != 0; iter = (iter->nextOffset == 0) ? 0 : (Memory::Allocation*)((u8*)allocator + iter->nextOffset)) {
398 | size_t len = 1; // To account for '\0' at the end of the string.
399 | char* it = 0;
400 | #if MEM_TRACK_LOCATION
401 | it =(char*)iter->location;
402 | #endif
403 | while (it != 0 && len < 1024 - 256) {
404 | it += 1;
405 | len += 1;
406 | }
407 |
408 | u32 allocationHeaderPadding = 0;
409 | if (iter->alignment != 0) { // Add padding to the header to compensate for alignment
410 | allocationHeaderPadding = iter->alignment - 1; // Somewhere in this range, we will be aligned
411 | }
412 |
413 | u32 paddedSize = iter->size + sizeof(Memory::Allocation) + allocationHeaderPadding;
414 | u32 pages = paddedSize / allocator->pageSize + (paddedSize % allocator->pageSize ? 1 : 0);
415 | wsprintfW(displaybuffer, L"Size: %d/%d bytes, Pages: %d, >", iter->size, paddedSize, pages);
416 | wchar_t* print_to = displaybuffer;
417 | while (*print_to != L'>') {
418 | print_to++;
419 | }
420 | #if MEM_TRACK_LOCATION
421 | if (iter->location != 0) {
422 | MultiByteToWideChar(0, 0, iter->location, (int)len, print_to, (int)len);
423 | }
424 | #endif
425 |
426 | SendMessageW(list, LB_ADDSTRING, 0, (LPARAM)displaybuffer);
427 | }
428 |
429 | SendMessage(list, LB_SETCURSEL, selection, 0);
430 | }
431 |
432 | void FillBox(RECT& rect, Win32Color& c) {
433 | const unsigned char r = c.r;
434 | const unsigned char g = c.g;
435 | const unsigned char b = c.b;
436 |
437 | const unsigned int BufferSize = gFrameBuffer->Width * gFrameBuffer->Height * 4;
438 |
439 | i32 top = rect.top < 0 ? 0 : rect.top;
440 | i32 bottom = rect.bottom < 0 ? 0 : rect.bottom;
441 | i32 left = rect.left < 0 ? 0 : rect.left;
442 | i32 right = rect.right < 0 ? 0 : rect.right;
443 |
444 | for (int row = top; row < bottom; ++row) {
445 | for (int col = left; col < right; ++col) {
446 | unsigned int pixel = (row * gFrameBuffer->Width + col) * 4;
447 | if (pixel >= BufferSize) {
448 | break;
449 | }
450 | gFrameBuffer->Memory[pixel + 0] = b;
451 | gFrameBuffer->Memory[pixel + 1] = g;
452 | gFrameBuffer->Memory[pixel + 2] = r;
453 | gFrameBuffer->Memory[pixel + 3] = 255;
454 | }
455 | }
456 | }
457 |
458 | void RedrawMemoryChart(HWND hwnd, Win32Color& bgColor, Win32Color& trackMemoryColor, Win32Color& usedMemoryColor, Win32Color& freeMemoryColor) {
459 | RECT clientRect;
460 | GetClientRect(hwnd, &clientRect);
461 | int clientWidth = clientRect.right - clientRect.left;
462 | int clientHeight = clientRect.bottom - clientRect.top;
463 |
464 | const u32 pageWidth = 3;
465 | const u32 pageHeight = 5;
466 | const u32 pagePadding = 1;
467 |
468 | const u32 numColumns = clientWidth / (pagePadding + pageWidth + pagePadding);
469 | const u32 numRows = clientHeight / (pagePadding + pageHeight + pagePadding) + (clientHeight % (pagePadding + pageHeight + pagePadding) ? 1 : 0);
470 |
471 | MemoryDebugInfo memInfo(Memory::GlobalAllocator);
472 | u32* mask = (u32*)memInfo.PageMask;
473 |
474 | u32 firstVisibleRow = scrollY / (pagePadding + pageHeight + pagePadding);
475 | if (scrollY % (pagePadding + pageHeight + pagePadding) != 0 && firstVisibleRow >= 1) {
476 | firstVisibleRow -= 1; // the row above is partially visible.
477 | }
478 | u32 lastVisibleRow = (memInfo.NumberOfPages) / numColumns;
479 | if ((memInfo.NumberOfPages) % numColumns != 0) {
480 | lastVisibleRow += 1;
481 | }
482 |
483 | if (lastVisibleRow - firstVisibleRow > numRows) {
484 | lastVisibleRow = firstVisibleRow + numRows;
485 | }
486 |
487 | FillBox(clientRect, bgColor);
488 |
489 | RECT draw;
490 | for (u32 row = firstVisibleRow; row <= lastVisibleRow; ++row) {
491 | for (u32 col = 0; col < numColumns; ++col) {
492 | u32 index = row * numColumns + col;
493 | if (index > memInfo.NumberOfPages) {
494 | break;
495 | }
496 |
497 | // Get memory, see if it's in use
498 | const u32 m = index / Memory::TrackingUnitSize;
499 | const u32 b = index % Memory::TrackingUnitSize;
500 | const bool used = mask[m] & (1 << b);
501 |
502 | draw.left = col * (pagePadding + pageWidth + pagePadding) + pagePadding;
503 | draw.right = draw.left + pageWidth;
504 | draw.top = row * (pagePadding + pageHeight + pagePadding) + pagePadding;
505 | draw.bottom = draw.top + pageHeight;
506 |
507 | draw.top -= scrollY;
508 | draw.bottom -= scrollY;
509 |
510 | if (index < memInfo.NumOverheadPages) {
511 | FillBox(draw, trackMemoryColor);
512 | }
513 | else if (used) {
514 | FillBox(draw, usedMemoryColor);
515 | }
516 | else {
517 | FillBox(draw, freeMemoryColor);
518 | }
519 | }
520 | }
521 | }
522 |
523 | Win32WindowLayout GetWindowLayout(HWND hwnd) {
524 | const i32 sideMargin = 50;
525 | const i32 topMargin = 25;
526 |
527 | const i32 middleSeperator = 40;
528 | const i32 minChartWidth = 150;
529 | const i32 minChartHeight = 150;
530 |
531 | const i32 bottomAreaHeight = 300;
532 | const i32 bottomSeperator = 10;
533 |
534 | const i32 bottomLeftWidth = 365;
535 |
536 | RECT clientRect = {0};
537 | GetClientRect(hwnd, &clientRect);
538 | const i32 clientHeight = clientRect.bottom - clientRect.top;
539 | const i32 clientWidth = clientRect.right - clientRect.left;
540 |
541 | // Layout info
542 | RECT topArea;
543 | RECT bottomArea;
544 | RECT bottomLeft;
545 | RECT bottomRight;
546 |
547 | // Set left and right
548 | topArea.left = clientRect.left + sideMargin;
549 | if (clientRect.right < sideMargin || (clientRect.right - sideMargin) < topArea.left || (clientRect.right - sideMargin) - topArea.left < minChartWidth) {
550 | topArea.right = clientRect.left + minChartWidth;
551 | }
552 | else {
553 | topArea.right = clientRect.right - sideMargin;
554 | }
555 | bottomArea.left = topArea.left;
556 | bottomArea.right = topArea.right;
557 | const u32 chartWidth = topArea.right - topArea.left;
558 |
559 | // Set top and bottom
560 | topArea.top = clientRect.top + topMargin;
561 | i32 maybeBottom = (i32)(clientRect.top + clientHeight);
562 | maybeBottom -= (i32)topMargin + (i32)bottomAreaHeight + (i32)middleSeperator;
563 |
564 | if (maybeBottom <= 0 || maybeBottom < topArea.top || maybeBottom - topArea.top < minChartHeight) {
565 | topArea.bottom = topArea.top + minChartHeight;
566 | }
567 | else {
568 | topArea.bottom = maybeBottom;
569 | }
570 | bottomArea.top = topArea.bottom + middleSeperator / 2;
571 | bottomArea.bottom = bottomArea.top + bottomAreaHeight;
572 |
573 | // Sub-divide the bottom area
574 | WinAssert(minChartWidth / 3 > bottomSeperator /2);
575 | const u32 bottomSectorMinWidth = minChartWidth / 3 - bottomSeperator /2;
576 | u32 bottomSectorWidth = bottomSectorMinWidth;
577 | if (chartWidth / 3 > bottomSeperator /2 && (chartWidth / 3 - bottomSeperator /2) > bottomSectorMinWidth) {
578 | bottomSectorWidth = chartWidth / 3 - bottomSeperator /2;
579 | }
580 |
581 | COPY_RECT(bottomLeft, bottomArea);
582 | COPY_RECT(bottomRight, bottomArea);
583 |
584 | bottomLeft.right = bottomLeft.left + bottomLeftWidth;
585 | bottomRight.left = bottomLeft.right + bottomSeperator;
586 | bottomRight.right = bottomArea.right;
587 |
588 | if (bottomRight.left >= bottomRight.right || bottomRight.right - bottomRight.left < bottomLeftWidth) {
589 | bottomRight.right = bottomRight.left + bottomLeftWidth;
590 | }
591 |
592 | return Win32WindowLayout(topArea, bottomArea, bottomLeft, bottomRight);
593 | }
594 |
595 | LRESULT CALLBACK MemoryChartProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) {
596 | // Great orange: (255, 125, 64);
597 |
598 | switch (iMsg) {
599 | case WM_CREATE:
600 | scrollY = 0;
601 | SetClassLongPtr(hwnd, GCLP_HBRBACKGROUND, (LONG_PTR)bgColor->brush);
602 | break;
603 | case WM_DESTROY:
604 |
605 | break;
606 | case WM_VSCROLL:
607 | {
608 | auto action = LOWORD(wParam);
609 | HWND hScroll = (HWND)lParam;
610 | int pos = -1;
611 | if (action == SB_THUMBPOSITION || action == SB_THUMBTRACK) {
612 | pos = HIWORD(wParam);
613 | }
614 | else if (action == SB_LINEDOWN) {
615 | pos = scrollY + 30;
616 | }
617 | else if (action == SB_LINEUP) {
618 | pos = scrollY - 30;
619 | }
620 | if (pos == -1) {
621 | break;
622 | }
623 | SCROLLINFO si = { 0 };
624 | si.cbSize = sizeof(SCROLLINFO);
625 | si.fMask = SIF_POS;
626 | si.nPos = pos;
627 | si.nTrackPos = 0;
628 | SetScrollInfo(hwnd, SB_VERT, &si, true);
629 | GetScrollInfo(hwnd, SB_VERT, &si);
630 | pos = si.nPos;
631 | POINT pt;
632 | pt.x = 0;
633 | pt.y = pos - scrollY;
634 | auto hdc = GetDC(hwnd);
635 | LPtoDP(hdc, &pt, 1);
636 | ReleaseDC(hwnd, hdc);
637 | ScrollWindow(hwnd, 0, -pt.y, NULL, NULL);
638 | scrollY = pos;
639 |
640 | RedrawMemoryChart(hwnd, *bgColor, *trackMemoryColor, *usedMemoryColor, *freeMemoryColor);
641 | InvalidateRect(hwnd, 0, false);
642 | }
643 | break;
644 | case WM_SIZE:
645 | {
646 | RECT clientRect;
647 |
648 | GetClientRect(hwnd, &clientRect);
649 | int clientWidth = clientRect.right - clientRect.left;
650 | int clientHeight = clientRect.bottom - clientRect.top;
651 |
652 | const u32 pageWidth = 3;
653 | const u32 pageHeight = 5;
654 | const u32 pagePadding = 1;
655 |
656 | const u32 numColumns = clientWidth / (pagePadding + pageWidth + pagePadding);
657 | MemoryDebugInfo memInfo(Memory::GlobalAllocator);
658 | const u32 maxRows = memInfo.NumberOfPages / numColumns + (memInfo.NumberOfPages % numColumns ? 1 : 0) + 1;
659 |
660 | if (oldNumRows != maxRows) {
661 | SCROLLINFO si = { 0 };
662 | si.cbSize = sizeof(SCROLLINFO);
663 | si.fMask = SIF_ALL;
664 | si.nMin = 0;
665 | si.nMax = maxRows * (pagePadding + pageHeight + pagePadding);
666 | si.nPage = (clientRect.bottom - clientRect.top);
667 | si.nPos = 0;
668 | si.nTrackPos = 0;
669 | SetScrollInfo(hwnd, SB_VERT, &si, true);
670 | oldNumRows = maxRows;
671 | }
672 | }
673 | break;
674 | case WM_PAINT:
675 | case WM_ERASEBKGND:
676 | {
677 | //MemoryDebugInfo memInfo(Memory::GlobalAllocator);
678 | //u32* mask = (u32*)memInfo.PageMask;
679 |
680 | PAINTSTRUCT ps;
681 | RECT clientRect;
682 |
683 | HDC hdc = BeginPaint(hwnd, &ps);
684 |
685 | GetClientRect(hwnd, &clientRect);
686 | int clientWidth = clientRect.right - clientRect.left;
687 | int clientHeight = clientRect.bottom - clientRect.top;
688 |
689 | WinAssert(clientWidth < (int)gFrameBuffer->Width);
690 | WinAssert(clientHeight < (int)gFrameBuffer->Height);
691 |
692 | if (clientWidth > (int)gFrameBuffer->Width) {
693 | clientWidth = (int)gFrameBuffer->Width;
694 | }
695 | if (clientHeight > (int)gFrameBuffer->Height) {
696 | clientHeight = (int)gFrameBuffer->Height;
697 | }
698 |
699 | StretchDIBits(hdc,
700 | 0, 0, clientWidth, clientHeight,
701 | 0, 0, clientWidth, clientHeight,
702 | gFrameBuffer->Memory, &gFrameBuffer->RenderBufferInfo,
703 | DIB_RGB_COLORS, SRCCOPY);
704 |
705 | #if 0
706 | GetClientRect(hwnd, &clientRect);
707 | clientWidth = clientRect.right - clientRect.left;
708 | clientHeight = clientRect.bottom - clientRect.top;
709 |
710 | const u32 pageWidth = 3;
711 | const u32 pageHeight = 5;
712 | const u32 pagePadding = 1;
713 |
714 | const u32 numColumns = clientWidth / (pagePadding + pageWidth + pagePadding);
715 | const u32 numRows = clientHeight / (pagePadding + pageHeight + pagePadding) + (clientHeight % (pagePadding + pageHeight + pagePadding) ? 1 : 0);
716 |
717 | if (oldNumRows != numRows) {
718 | SCROLLINFO si = { 0 };
719 | si.cbSize = sizeof(SCROLLINFO);
720 | si.fMask = SIF_ALL;
721 | si.nMin = 0;
722 | si.nMax = numRows * (pagePadding + pageHeight + pagePadding);
723 | si.nPage = (clientRect.bottom - clientRect.top);
724 | si.nPos = 0;
725 | si.nTrackPos = 0;
726 | SetScrollInfo(hwnd, SB_VERT, &si, true);
727 | oldNumRows = numRows;
728 | }
729 |
730 | u32 firstVisibleRow = scrollY / (pagePadding + pageHeight + pagePadding);
731 | if (scrollY % (pagePadding + pageHeight + pagePadding) != 0 && firstVisibleRow >= 1) {
732 | firstVisibleRow -= 1; // the row above is partially visible.
733 | }
734 | u32 lastVisibleRow = (memInfo.NumberOfPages) / numColumns;
735 | if ((memInfo.NumberOfPages) % numColumns != 0) {
736 | lastVisibleRow += 1;
737 | }
738 |
739 | if (lastVisibleRow - firstVisibleRow > numRows) {
740 | lastVisibleRow = firstVisibleRow + numRows;
741 | }
742 |
743 | FillRect(hdc, &clientRect, bgColor.brush);
744 |
745 | RECT draw;
746 | for (u32 row = firstVisibleRow; row <= lastVisibleRow; ++row) {
747 | for (u32 col = 0; col < numColumns; ++col) {
748 | u32 index = row * numColumns + col;
749 | if (index > memInfo.NumberOfPages) {
750 | break;
751 | }
752 |
753 | // Get memory, see if it's in use
754 | const u32 m = index / Memory::TrackingUnitSize;
755 | const u32 b = index % Memory::TrackingUnitSize;
756 | const bool used = mask[m] & (1 << b);
757 |
758 | draw.left = col * (pagePadding + pageWidth + pagePadding) + pagePadding;
759 | draw.right = draw.left + pageWidth;
760 | draw.top = row * (pagePadding + pageHeight + pagePadding) + pagePadding;
761 | draw.bottom = draw.top + pageHeight;
762 |
763 | if (index < memInfo.NumOverheadPages) {
764 | FillRect(hdc, &draw, trackMemoryColor.brush);
765 | }
766 | else if (used) {
767 | FillRect(hdc, &draw, usedMemoryColor.brush);
768 | }
769 | else {
770 | FillRect(hdc, &draw, freeMemoryColor.brush);
771 | }
772 | }
773 | }
774 | #endif
775 |
776 | EndPaint(hwnd, &ps);
777 | }
778 | return 0;
779 | }
780 | return DefWindowProc(hwnd, iMsg, wParam, lParam);
781 | }
782 |
783 | LRESULT CALLBACK WndProc(HWND hwnd, UINT iMsg, WPARAM wParam, LPARAM lParam) {
784 | if (iMsg != WM_GETMINMAXINFO && iMsg != WM_NCCREATE && iMsg != WM_NCCALCSIZE) {
785 | WinAssert(gMemoryWindow != 0);
786 | }
787 |
788 | static HWND hwndChart = 0;
789 | static HWND hwndLabels[9] = { 0 };
790 | static HWND hwndList = 0;
791 | static HWND hwndButtons[5] = { 0 };
792 | static HWND hwndUpDown = { 0 };
793 | static HWND hwndUpDownEdit = { 0 };
794 | static HWND hwndCombo = { 0 };
795 |
796 | switch (iMsg) {
797 | case WM_NCCREATE:
798 | bgColor->Init(30, 30, 30);
799 | boxColor->Init(50, 50, 50);
800 | textColor->Init(220, 220, 220);
801 | freeMemoryColor->Init(110, 110, 220);
802 | usedMemoryColor->Init(110, 240, 110);
803 | trackMemoryColor->Init(255, 110, 110);
804 |
805 | WinAssert(gMemoryWindow == 0);
806 | gMemoryWindow = hwnd;
807 | break;
808 | case WM_NCDESTROY:
809 | WinAssert(gMemoryWindow != 0);
810 | WinAssert(gMemoryWindow == hwnd);
811 | gMemoryWindow = 0;
812 | break;
813 | case WM_CREATE:
814 | WinAssert(gMemoryWindow == hwnd);
815 | gFrameBuffer->Initialize();
816 | { // Set up render styles
817 | SetClassLongPtr(hwnd, GCLP_HBRBACKGROUND, (LONG_PTR)bgColor->brush);
818 | }
819 | { // Create Memory chart window
820 | WNDCLASSEX wc;
821 | wc.cbSize = sizeof(WNDCLASSEX);
822 | wc.style = CS_HREDRAW | CS_VREDRAW;
823 | wc.lpfnWndProc = MemoryChartProc;
824 | wc.cbClsExtra = 0;
825 | wc.cbWndExtra = 0;
826 | wc.hInstance = GetModuleHandle(NULL);
827 | wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
828 | wc.hIconSm = LoadIcon(NULL, IDI_APPLICATION);
829 | wc.hCursor = LoadCursor(NULL, IDC_ARROW);
830 | wc.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
831 | wc.lpszMenuName = 0;
832 | wc.lpszClassName = L"MemoryDebuggerParams";
833 | RegisterClassEx(&wc);
834 |
835 | hwndChart = CreateWindowEx(WS_EX_CLIENTEDGE, L"MemoryDebuggerParams", L"",
836 | WS_CHILD | WS_VISIBLE, 10, 10, 50, 50, hwnd, NULL, GetModuleHandle(NULL), NULL);
837 | }
838 | { // Create labels for the left side of the window
839 | for (u32 i = 0; i < 9; ++i) {
840 | hwndLabels[i] = CreateWindowW(L"Static", L"",
841 | WS_CHILD | WS_VISIBLE | SS_LEFT,
842 | 10, 10, 50, 50,
843 | hwnd, (HMENU)1, NULL, NULL);
844 | }
845 | }
846 | { // Create list box to hold a visual reference to all the allocations
847 | hwndList = CreateWindowW(WC_LISTBOXW, NULL, WS_CHILD
848 | | WS_VISIBLE | LBS_NOTIFY | WS_VSCROLL, 10, 10, 50, 50, hwnd,
849 | (HMENU)IDC_LIST, NULL, NULL);
850 | }
851 | { // Create the right side
852 | hwndButtons[0] = CreateWindowW(L"Button", L"Allocate", WS_CHILD | WS_VISIBLE, 10, 10, 50, 50, hwnd, (HMENU)ID_ALLOCATE_MEM, NULL, NULL);
853 | hwndButtons[1] = CreateWindowW(L"Button", L"Free Selected", WS_CHILD | WS_VISIBLE, 10, 10, 50, 50, hwnd, (HMENU)ID_FREE_MEM, NULL, NULL);
854 | hwndButtons[2] = CreateWindowW(L"Button", L"Refresh Display", WS_CHILD | WS_VISIBLE, 10, 10, 50, 50, hwnd, (HMENU)ID_REFRESH_MEM, NULL, NULL);
855 | hwndButtons[3] = CreateWindowW(L"Button", L"Dump Allocator", WS_CHILD | WS_VISIBLE, 10, 10, 50, 50, hwnd, (HMENU)ID_DUMP_ALLOC, NULL, NULL);
856 | hwndButtons[4] = CreateWindowW(L"Button", L"Free All", WS_CHILD | WS_VISIBLE, 10, 10, 50, 50, hwnd, (HMENU)ID_FREE_MEM_ALL, NULL, NULL);
857 |
858 | hwndUpDown = CreateWindowW(UPDOWN_CLASSW, NULL, WS_CHILD | WS_VISIBLE | UDS_SETBUDDYINT | UDS_ALIGNRIGHT, 0, 0, 0, 0, hwnd, (HMENU)ID_UPDOWN, NULL, NULL);
859 | hwndUpDownEdit = CreateWindowExW(WS_EX_CLIENTEDGE, WC_EDITW, NULL, WS_CHILD | WS_VISIBLE | ES_RIGHT, 10, 10, 75, 25, hwnd, (HMENU)ID_EDIT, NULL, NULL);
860 | SendMessageW(hwndUpDown, UDM_SETBUDDY, (WPARAM)hwndUpDownEdit, 0);
861 | SendMessageW(hwndUpDown, UDM_SETRANGE, 0, MAKELPARAM(UD_MAX_POS, UD_MIN_POS));
862 | SendMessageW(hwndUpDown, UDM_SETPOS32, 0, 1);
863 |
864 | hwndCombo = CreateWindow(WC_COMBOBOX, TEXT(""), CBS_DROPDOWN | CBS_HASSTRINGS | WS_CHILD | WS_OVERLAPPED | WS_VISIBLE, 0, 0, 50, 50, hwnd, NULL, HINST_THISCOMPONENT, NULL);
865 | static const wchar_t* items[] = { L"bytes", L"KiB", L"MiB" };
866 | int debug_0 = (int)SendMessage(hwndCombo, (UINT)CB_ADDSTRING, 0, (LPARAM)items[0]);
867 | int debug_1 = (int)SendMessage(hwndCombo, (UINT)CB_ADDSTRING, 0, (LPARAM)items[1]);
868 | int debug_2 = (int)SendMessage(hwndCombo, (UINT)CB_ADDSTRING, 0, (LPARAM)items[2]);
869 | SendMessage(hwndCombo, CB_SETCURSEL, (WPARAM)1, (LPARAM)0);
870 | }
871 | { // Layout the window elements that where just created to the size of the window
872 | Win32WindowLayout layout = GetWindowLayout(hwnd);
873 | SetWindowLayout(layout, hwndChart, hwndLabels, hwndList, hwndButtons, hwndUpDown, hwndUpDownEdit, hwndCombo);
874 | ResetListBoxContent(Memory::GlobalAllocator, hwndList);
875 | RedrawMemoryChart(hwndChart, *bgColor, *trackMemoryColor, *usedMemoryColor, *freeMemoryColor);
876 | InvalidateRect(hwnd, 0, false);
877 | }
878 | break;
879 | case WM_TIMER:
880 | if (wParam == IDT_TIMER1) {
881 | }
882 | break;
883 | case WM_CLOSE:
884 | WinAssert(gMemoryWindow == hwnd);
885 | gFrameBuffer->Destroy();
886 | break;
887 | case WM_COMMAND:
888 | {
889 | bool update = false;
890 | if (LOWORD(wParam) == ID_ALLOCATE_MEM) {
891 | int howMany = (int)SendMessage(hwndUpDown, UDM_GETPOS, 0, 0);
892 | LRESULT units = SendMessage(hwndCombo, CB_GETCURSEL, 0, 0);
893 | if (units == 0) {
894 | units = 1;
895 | }
896 | else if (units == 1) {
897 | units = 1024;
898 | }
899 | else {
900 | units = 1024 * 1024;
901 | }
902 |
903 | Memory::GlobalAllocator->Allocate(howMany * (u32)units);
904 |
905 | update = true;
906 | }
907 | if (LOWORD(wParam) == ID_FREE_MEM) {
908 | int selection = (int)SendMessage(hwndList, LB_GETCURSEL, 0, 0);
909 | if (selection >= 0) {
910 | int counter = 0;
911 | Memory::Allocation* iter = Memory::GlobalAllocator->active;
912 | WinAssert(iter != 0);
913 | for (; iter != 0 && counter != selection; iter = (iter->nextOffset == 0)? 0 : (Memory::Allocation*)((u8*)Memory::GlobalAllocator + iter->nextOffset), counter++);
914 | WinAssert(counter == selection);
915 | u8* mem = (u8*)iter + sizeof(Memory::Allocation);
916 | Memory::GlobalAllocator->Release(mem);
917 | }
918 | update = true;
919 | }
920 | if (LOWORD(wParam) == ID_FREE_MEM_ALL) {
921 | Memory::Allocation* iter = Memory::GlobalAllocator->active;
922 | while (iter != 0) {
923 | Memory::Allocation* next = 0;
924 | if (iter->nextOffset != 0) {
925 | next = (Memory::Allocation*)((u8*)Memory::GlobalAllocator + iter->nextOffset);
926 | }
927 |
928 | u8* mem = (u8*)iter + sizeof(Memory::Allocation);
929 | Memory::GlobalAllocator->Release(mem);
930 |
931 | iter = next;
932 | }
933 | update = true;
934 | }
935 | if (LOWORD(wParam) == ID_REFRESH_MEM) {
936 | update = true;
937 | }
938 | if (LOWORD(wParam) == ID_DUMP_ALLOC) {
939 | update = true;
940 |
941 | DeleteFile(L"MemInfo.txt");
942 | HANDLE hFile = CreateFile(
943 | L"MemInfo.txt", // Filename
944 | GENERIC_WRITE, // Desired access
945 | FILE_SHARE_READ, // Share mode
946 | NULL, // Security attributes
947 | CREATE_NEW, // Creates a new file, only if it doesn't already exist
948 | FILE_ATTRIBUTE_NORMAL, // Flags and attributes
949 | NULL); // Template file handle
950 | WinAssert(hFile != INVALID_HANDLE_VALUE);
951 |
952 | Memory::Debug::MemInfo(Memory::GlobalAllocator, [](const u8* mem, u32 size, void* fileHandle) {
953 | HANDLE file = *(HANDLE*)fileHandle;
954 | DWORD bytesWritten;
955 | WriteFile(
956 | file, // Handle to the file
957 | mem, // Buffer to write
958 | size, // Buffer size
959 | &bytesWritten, // Bytes written
960 | nullptr); // Overlapped
961 | }, &hFile);
962 | CloseHandle(hFile);
963 | }
964 | if (update) {
965 | SetWindowLayout(GetWindowLayout(hwnd), hwndChart, hwndLabels, hwndList, hwndButtons, hwndUpDown, hwndUpDownEdit, hwndCombo);
966 | ResetListBoxContent(Memory::GlobalAllocator, hwndList);
967 | RedrawMemoryChart(hwndChart, *bgColor, *trackMemoryColor, *usedMemoryColor, *freeMemoryColor);
968 | InvalidateRect(hwnd, 0, false);
969 | return 0;
970 | }
971 | break;
972 | }
973 | case WM_NOTIFY:
974 | break;
975 | case WM_CTLCOLORSTATIC:
976 | case WM_CTLCOLORLISTBOX:
977 | for (u32 i = 0; i < 11; ++i) {
978 | HDC hdcStatic = (HDC)wParam;
979 | if (hwndLabels[i] == (HWND)lParam) {
980 | SetTextColor(hdcStatic, textColor->color);
981 | SetBkColor(hdcStatic, boxColor->color);
982 | return (INT_PTR)boxColor->brush;
983 | }
984 | }
985 | if (hwndList == (HWND)lParam) {
986 | HDC hdcStatic = (HDC)wParam;
987 | SetTextColor(hdcStatic, textColor->color);
988 | SetBkColor(hdcStatic, boxColor->color);
989 | return (INT_PTR)boxColor->brush;
990 | }
991 | break;
992 | case WM_SIZE:
993 | {
994 | UINT width = LOWORD(lParam);
995 | UINT height = HIWORD(lParam);
996 |
997 | #if _DEBUG
998 | RECT clientRect = { 0 };
999 | GetClientRect(hwnd, &clientRect);
1000 | WinAssert(clientRect.right - clientRect.left == width);
1001 | WinAssert(clientRect.bottom - clientRect.top == height);
1002 | #endif
1003 | Win32WindowLayout layout = GetWindowLayout(hwnd);
1004 | SetWindowLayout(layout, hwndChart, hwndLabels, hwndList, hwndButtons, hwndUpDown, hwndUpDownEdit, hwndCombo);
1005 | InvalidateRect(hwnd, 0, false);
1006 | }
1007 | break;
1008 | }
1009 |
1010 | return DefWindowProc(hwnd, iMsg, wParam, lParam);
1011 | }
1012 |
1013 | void CreateMemoryWindow() {
1014 | HINSTANCE hInstance = GetModuleHandle(NULL);
1015 | const wchar_t className[] = L"MemoryDebug";
1016 |
1017 | WNDCLASSEX wc;
1018 | wc.cbSize = sizeof(WNDCLASSEX);
1019 | wc.style = CS_HREDRAW | CS_VREDRAW;
1020 | wc.lpfnWndProc = WndProc;
1021 | wc.cbClsExtra = 0;
1022 | wc.cbWndExtra = 0;
1023 | wc.hInstance = hInstance;
1024 | wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
1025 | wc.hIconSm = LoadIcon(NULL, IDI_APPLICATION);
1026 | wc.hCursor = LoadCursor(NULL, IDC_ARROW);
1027 | wc.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
1028 | wc.lpszMenuName = 0;
1029 | wc.lpszClassName = className;
1030 | RegisterClassEx(&wc);
1031 |
1032 | // Create the window.
1033 | int screenWidth = GetSystemMetrics(SM_CXSCREEN);
1034 | int screenHeight = GetSystemMetrics(SM_CYSCREEN);
1035 | int clientWidth = 1900;
1036 | int clientHeight = 1000;
1037 | RECT windowRect;
1038 | SetRect(&windowRect, (screenWidth / 2) - (clientWidth / 2), (screenHeight / 2) - (clientHeight / 2), (screenWidth / 2) + (clientWidth / 2), (screenHeight / 2) + (clientHeight / 2));
1039 |
1040 | //DWORD style = (WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_MINIMIZEBOX | WS_MAXIMIZEBOX); // WS_THICKFRAME to resize
1041 | DWORD style = (WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_MINIMIZEBOX | WS_MAXIMIZEBOX | WS_THICKFRAME);
1042 | AdjustWindowRectEx(&windowRect, style, FALSE, 0);
1043 |
1044 | HWND hwnd = CreateWindowEx(
1045 | 0, // Optional window styles.
1046 | className, // Window class
1047 | L"Memory Viewer", // Window text
1048 | style, // Window style
1049 |
1050 | // Size and position
1051 | windowRect.left, windowRect.top,
1052 | windowRect.right - windowRect.left,
1053 | windowRect.bottom - windowRect.top,
1054 |
1055 | NULL, // Parent window
1056 | NULL, // Menu
1057 | hInstance, // Instance handle
1058 | NULL // Additional application data
1059 | );
1060 | WinAssert(gMemoryWindow != 0);
1061 | WinAssert(gMemoryWindow == hwnd);
1062 |
1063 | ShowWindow(gMemoryWindow, SW_SHOWDEFAULT);
1064 | UpdateWindow(gMemoryWindow);
1065 | }
1066 |
1067 | extern "C" DWORD CALLBACK run() {
1068 | scrollY = 0;
1069 | oldNumRows = 0;
1070 | //_fltused = 0;
1071 | unsigned int size = MB(512);
1072 |
1073 | LPVOID memory = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1074 |
1075 | void* m = memory;
1076 | u32 trimmed = Memory::AlignAndTrim(&m, &size, Memory::DefaultPageSize);
1077 | Memory::GlobalAllocator = Memory::Initialize(m, size, Memory::DefaultPageSize);
1078 | #if ATLAS_64
1079 | WinAssert((u64)((void*)Memory::GlobalAllocator) % 8 == 0);
1080 | #elif ATLAS_32
1081 | WinAssert((u32)((void*)Memory::GlobalAllocator) % 8 == 0);
1082 | #else
1083 | #error Unknown platform
1084 | #endif
1085 |
1086 | gFrameBuffer = Memory::GlobalAllocator->New();
1087 | bgColor = Memory::GlobalAllocator->New (255);
1088 | freeMemoryColor = Memory::GlobalAllocator->New < Win32Color>();
1089 | usedMemoryColor = Memory::GlobalAllocator->New < Win32Color>();
1090 | trackMemoryColor = Memory::GlobalAllocator->New < Win32Color>();
1091 | boxColor = Memory::GlobalAllocator->New < Win32Color>();
1092 | textColor = Memory::GlobalAllocator->New < Win32Color>();
1093 |
1094 | int* x = Memory::GlobalAllocator->New < int>();
1095 | Memory::GlobalAllocator->Delete(x);
1096 |
1097 | CreateMemoryWindow();
1098 |
1099 | MSG msg = { 0 };
1100 | do {
1101 | if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE)) {
1102 | if (msg.message == WM_QUIT) {
1103 | break;
1104 | }
1105 | TranslateMessage(&msg);
1106 | DispatchMessage(&msg);
1107 | }
1108 | Sleep(1);
1109 | } while (gMemoryWindow != 0);
1110 |
1111 | Memory::GlobalAllocator->Delete(gFrameBuffer);
1112 | Memory::GlobalAllocator->Delete(bgColor);
1113 | Memory::GlobalAllocator->Delete(freeMemoryColor);
1114 | Memory::GlobalAllocator->Delete(usedMemoryColor);
1115 | Memory::GlobalAllocator->Delete(trackMemoryColor);
1116 | Memory::GlobalAllocator->Delete(boxColor);
1117 | Memory::GlobalAllocator->Delete(textColor);
1118 |
1119 | // Free up any dangling memory (maybe add to debug?)
1120 | Memory::Allocation* iter = Memory::GlobalAllocator->active;
1121 | while (iter != 0) {
1122 | Memory::Allocation* next = (iter->nextOffset == 0) ? 0 : (Memory::Allocation*)((u8*)Memory::GlobalAllocator + iter->nextOffset);
1123 | u8* mem = (u8*)iter + sizeof(Memory::Allocation);
1124 | Memory::GlobalAllocator->Release(mem);
1125 |
1126 | iter = next;
1127 | }
1128 |
1129 | Memory::Shutdown(Memory::GlobalAllocator);
1130 | Memory::GlobalAllocator = 0;
1131 | VirtualFree(memory, 0, MEM_RELEASE);
1132 |
1133 | return 0;
1134 | }
--------------------------------------------------------------------------------
/Win32Sample/Win32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gszauer/GameAllocator/3906f35f7502baffbe2b5c4f16456abce83a00db/Win32Sample/Win32.png
--------------------------------------------------------------------------------
/Win32Sample/Win32.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | # Visual Studio Version 17
4 | VisualStudioVersion = 17.1.32421.90
5 | MinimumVisualStudioVersion = 10.0.40219.1
6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Win32", "Win32.vcxproj", "{CB2AC196-A16C-454E-B41A-CC75AE56C4AD}"
7 | EndProject
8 | Global
9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
10 | Debug|x64 = Debug|x64
11 | Debug|x86 = Debug|x86
12 | Release|x64 = Release|x64
13 | Release|x86 = Release|x86
14 | EndGlobalSection
15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
16 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Debug|x64.ActiveCfg = Debug|x64
17 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Debug|x64.Build.0 = Debug|x64
18 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Debug|x86.ActiveCfg = Debug|Win32
19 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Debug|x86.Build.0 = Debug|Win32
20 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Release|x64.ActiveCfg = Release|x64
21 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Release|x64.Build.0 = Release|x64
22 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Release|x86.ActiveCfg = Release|Win32
23 | {CB2AC196-A16C-454E-B41A-CC75AE56C4AD}.Release|x86.Build.0 = Release|Win32
24 | EndGlobalSection
25 | GlobalSection(SolutionProperties) = preSolution
26 | HideSolutionNode = FALSE
27 | EndGlobalSection
28 | GlobalSection(ExtensibilityGlobals) = postSolution
29 | SolutionGuid = {FB86E000-59E8-4FE6-B887-B5806E6F85A0}
30 | EndGlobalSection
31 | EndGlobal
32 |
--------------------------------------------------------------------------------
/Win32Sample/Win32.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 | Debug
14 | x64
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 | 16.0
23 | Win32Proj
24 | {cb2ac196-a16c-454e-b41a-cc75ae56c4ad}
25 | Win32
26 | 10.0
27 |
28 |
29 |
30 | Application
31 | true
32 | v143
33 | Unicode
34 |
35 |
36 | Application
37 | false
38 | v143
39 | true
40 | Unicode
41 |
42 |
43 | Application
44 | true
45 | v143
46 | Unicode
47 |
48 |
49 | Application
50 | false
51 | v143
52 | true
53 | Unicode
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 | true
75 |
76 |
77 | false
78 |
79 |
80 | true
81 |
82 |
83 | false
84 |
85 |
86 |
87 | Level3
88 | false
89 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
90 | true
91 | MultiThreadedDebug
92 | false
93 | Default
94 | false
95 |
96 |
97 | Console
98 | true
99 | kernel32.lib;user32.lib;gdi32.lib
100 | true
101 | run
102 |
103 |
104 |
105 |
106 | Level3
107 | true
108 | false
109 | false
110 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
111 | true
112 | MultiThreaded
113 | false
114 | false
115 | false
116 | MaxSpeed
117 |
118 |
119 | Console
120 | true
121 | true
122 | true
123 | kernel32.lib;user32.lib;gdi32.lib
124 | true
125 | run
126 |
127 |
128 |
129 |
130 | Level3
131 | false
132 | _DEBUG;_CONSOLE;%(PreprocessorDefinitions)
133 | true
134 | MultiThreadedDebug
135 | false
136 | Default
137 | false
138 |
139 |
140 | Console
141 | true
142 | kernel32.lib;user32.lib;gdi32.lib
143 | true
144 | run
145 |
146 |
147 |
148 |
149 | Level3
150 | true
151 | false
152 | false
153 | NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
154 | true
155 | MultiThreaded
156 | false
157 | false
158 | false
159 | MaxSpeed
160 |
161 |
162 | Console
163 | true
164 | true
165 | true
166 | kernel32.lib;user32.lib;gdi32.lib
167 | true
168 | run
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
--------------------------------------------------------------------------------
/Win32Sample/Win32.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 |
18 |
19 | Header Files
20 |
21 |
22 |
23 |
24 | Source Files
25 |
26 |
27 | Source Files
28 |
29 |
30 |
--------------------------------------------------------------------------------
/Win32Sample/Win32Small.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gszauer/GameAllocator/3906f35f7502baffbe2b5c4f16456abce83a00db/Win32Sample/Win32Small.png
--------------------------------------------------------------------------------
/mem.cpp:
--------------------------------------------------------------------------------
1 | #include "mem.h"
2 |
3 | #pragma warning(disable:6011)
4 | #pragma warning(disable:28182)
5 |
6 | #ifndef ATLAS_U16
7 | #define ATLAS_U16
8 | typedef unsigned short u16;
9 | static_assert (sizeof(u16) == 2, "u16 should be defined as a 2 byte type");
10 | #endif
11 |
12 | #if _DEBUG
13 | #define assert(cond, msg) Memory::Assert(cond, msg, __LINE__, __FILE__)
14 | #else
15 | #define assert(cond, msg) ;
16 | #endif
17 |
18 | #if _WASM32
19 | namespace Memory {
20 | Allocator* wasmGlobalAllocator = 0;
21 | }
22 | #define NotImplementedException() __builtin_trap()
23 | #else
24 | #define NotImplementedException() (*(char*)((void*)0) = '\0')
25 | #endif
26 |
27 | extern "C" void* __cdecl memset(void* _mem, i32 _value, Memory::ptr_type _size) {
28 | return Memory::Set(_mem, (u8)_value, (u32)_size, "internal - memset");
29 | }
30 |
31 | namespace Memory {
32 | namespace Debug {
33 | u32 u32toa(u8* dest, u32 destSize, u32 num);
34 | }
35 | static void Assert(bool condition, const char* msg, u32 line, const char* file) {
36 | #if _WASM32
37 | if (condition == false) {
38 | u32 wasmLen = 0;
39 | for (const char* i = msg; msg != 0 && *i != '\0'; ++i, ++wasmLen);
40 | //wasmConsoleLog(msg, wasmLen);
41 | __builtin_trap();
42 | }
43 | #else
44 | char* data = (char*)((void*)0);
45 | if (condition == false) {
46 | *data = '\0';
47 | }
48 | #endif
49 | }
50 |
51 | static inline u32 AllocatorPaddedSize() {
52 | static_assert (sizeof(Memory::Allocator) % AllocatorAlignment == 0, "Memory::Allocator size needs to be 8 byte aligned for the allocation mask to start on this alignment without any padding");
53 | return sizeof(Allocator);
54 | }
55 |
56 | static inline u8* AllocatorPageMask(Allocator* allocator) {
57 | static_assert (sizeof(Memory::Allocator) % AllocatorAlignment == 0, "Memory::Allocator size needs to be 8 byte aligned for the allocation mask to start on this alignment without any padding");
58 | return ((u8*)allocator) + sizeof(Allocator);
59 | }
60 |
61 | static inline u32 AllocatorPageMaskSize(Allocator* allocator) { // This is the number of u8's that make up the AllocatorPageMask array
62 | const u32 allocatorNumberOfPages = allocator->size / allocator->pageSize; // 1 page = (probably) 4096 bytes, how many are needed
63 | assert(allocator->size % allocator->pageSize == 0, "Allocator size should line up with page size");
64 | // allocatorNumberOfPages is the number of bits that are required to track memory
65 |
66 | // Pad out to sizeof(32) (if MaskTrackerSize is 32). This is because AllocatorPageMask will often be used as a u32 array
67 | // and we want to make sure that enough space is reserved.
68 | const u32 allocatorPageArraySize = allocatorNumberOfPages / TrackingUnitSize + (allocatorNumberOfPages % TrackingUnitSize ? 1 : 0);
69 | return allocatorPageArraySize * (TrackingUnitSize / 8); // In bytes, not bits
70 | }
71 |
72 | static inline void RemoveFromList(Allocator* allocator, Allocation** list, Allocation* allocation) {
73 | u32 allocationOffset = (u32)((u8*)allocation - (u8*)allocator);
74 | u32 listOffset = (u32)((u8*)(*list) - (u8*)allocator);
75 |
76 | Allocation* head = *list;
77 |
78 | if (head == allocation) { // Removing head
79 | if (head->nextOffset != 0) { // There is a next
80 | Allocation* allocNext = 0;
81 | if (allocation->nextOffset != 0) {
82 | allocNext = (Allocation*)((u8*)allocator + allocation->nextOffset);
83 | }
84 | Allocation* headerNext = 0;
85 | if (head->nextOffset != 0) {
86 | headerNext = (Allocation*)((u8*)allocator + head->nextOffset);
87 | }
88 | assert(allocNext == headerNext, __LOCATION__);
89 | assert(headerNext->prevOffset == allocationOffset, __LOCATION__);
90 | headerNext->prevOffset = 0;
91 | }
92 | Allocation* next = 0;
93 | if (head != 0 && head->nextOffset != 0) {
94 | next = (Allocation*)((u8*)allocator + head->nextOffset);
95 | }
96 | *list = next;
97 | }
98 | else {
99 | if (allocation->nextOffset != 0) {
100 | Allocation* _next = (Allocation*)((u8*)allocator + allocation->nextOffset);
101 | assert(_next->prevOffset == allocationOffset, __LOCATION__);
102 | _next->prevOffset = allocation->prevOffset;
103 | }
104 | if (allocation->prevOffset != 0) {
105 | Allocation* _prev = (Allocation*)((u8*)allocator + allocation->prevOffset);
106 | assert(_prev->nextOffset == allocationOffset, __LOCATION__);
107 | _prev->nextOffset = allocation->nextOffset;
108 | }
109 | }
110 |
111 | allocation->prevOffset = 0;
112 | allocation->nextOffset = 0;
113 | }
114 |
115 | static inline void AddtoList(Allocator* allocator, Allocation** list, Allocation* allocation) {
116 | u32 allocationOffset = (u32)((u8*)allocation - (u8*)allocator);
117 | u32 listOffset = (u32)((u8*)(*list) - (u8*)allocator);
118 | Allocation* head = *list;
119 |
120 | allocation->prevOffset = 0;
121 | allocation->nextOffset = 0;
122 | if (head != 0) {
123 | allocation->nextOffset = listOffset;
124 | head->prevOffset = allocationOffset;
125 | }
126 | *list = allocation;
127 | }
128 |
129 | // Returns 0 on error. Since the first page is always tracking overhead it's invalid for a range
130 | static inline u32 FindRange(Allocator* allocator, u32 numPages, u32 searchStartBit) {
131 | assert(allocator != 0, __LOCATION__);
132 | assert(numPages != 0, __LOCATION__);
133 |
134 | u32 * mask = (u32*)AllocatorPageMask(allocator);
135 | u32 numBitsInMask = AllocatorPageMaskSize(allocator) * 8;
136 | u32 numElementsInMask = AllocatorPageMaskSize(allocator) / (TrackingUnitSize / 8);
137 | assert(allocator->size % allocator->pageSize == 0, "Memory::FindRange, the allocators size must be a multiple of Memory::PageSize, otherwise there would be a partial page at the end");
138 | assert(mask != 0, __LOCATION__);
139 | assert(numBitsInMask != 0, __LOCATION__);
140 |
141 | u32 startBit = 0;
142 | u32 numBits = 0;
143 |
144 | for (u32 i = searchStartBit; i < numBitsInMask; ++i) {
145 | u32 m = i / TrackingUnitSize;
146 | u32 b = i % TrackingUnitSize;
147 |
148 | assert(m < numElementsInMask, "indexing mask out of range");
149 | bool set = mask[m] & (1 << b);
150 |
151 | if (!set) {
152 | if (startBit == 0) {
153 | startBit = i;
154 | numBits = 1;
155 | }
156 | else {
157 | numBits++;
158 | }
159 | }
160 | else {
161 | startBit = 0;
162 | numBits = 0;
163 | }
164 |
165 | if (numBits == numPages) {
166 | break;
167 | }
168 | }
169 |
170 | if (numBits != numPages || startBit == 0) {
171 | startBit = 0;
172 | numBits = 0;
173 |
174 | for (u32 i = 0; i < searchStartBit; ++i) {
175 | u32 m = i / TrackingUnitSize;
176 | u32 b = i % TrackingUnitSize;
177 |
178 | bool set = mask[m] & (1 << b);
179 |
180 | if (!set) {
181 | if (startBit == 0) {
182 | startBit = i;
183 | numBits = 1;
184 | }
185 | else {
186 | numBits++;
187 | }
188 | }
189 | else {
190 | startBit = 0;
191 | numBits = 0;
192 | }
193 |
194 | if (numBits == numPages) {
195 | break;
196 | }
197 | }
198 | }
199 |
200 | allocator->scanBit = startBit + numPages;
201 |
202 | assert(numBits == numPages, "Memory::FindRange Could not find enough memory to fufill request");
203 | assert(startBit != 0, "Memory::FindRange Could not memory fufill request");
204 | if (numBits != numPages || startBit == 0 || allocator->size % allocator->pageSize != 0) {
205 | assert(false, __LOCATION__);
206 | return 0;
207 | }
208 |
209 | return startBit;
210 | }
211 |
212 | static inline void SetRange(Allocator* allocator, u32 startBit, u32 bitCount) {
213 | assert(allocator != 0, __LOCATION__);
214 | assert(bitCount != 0, __LOCATION__);
215 |
216 | u32* mask = (u32*)AllocatorPageMask(allocator);
217 | assert(allocator->size % allocator->pageSize == 0, "Memory::FindRange, the allocators size must be a multiple of Memory::PageSize, otherwise there would be a partial page at the end");
218 | assert(mask != 0, __LOCATION__);
219 |
220 | #if _DEBUG
221 | u32 numBitsInMask = AllocatorPageMaskSize(allocator) * 8;
222 | assert(numBitsInMask != 0, __LOCATION__);
223 | #endif
224 | u32 numElementsInMask = AllocatorPageMaskSize(allocator) / (TrackingUnitSize / 8);
225 |
226 | for (u32 i = startBit; i < startBit + bitCount; ++i) {
227 |
228 | u32 m = i / TrackingUnitSize;
229 | u32 b = i % TrackingUnitSize;
230 |
231 | assert(m < numElementsInMask, "indexing mask out of range");
232 | #if _DEBUG
233 | assert(i < numBitsInMask, __LOCATION__);
234 | bool set = mask[m] & (1 << b);
235 | assert(!set, __LOCATION__);
236 | #endif
237 |
238 | mask[m] |= (1 << b);
239 | }
240 |
241 | assert(allocator->numPagesUsed <= numBitsInMask, "Memory::FindRange, over allocating");
242 | assert(allocator->numPagesUsed + bitCount <= numBitsInMask, "Memory::FindRange, over allocating");
243 | allocator->numPagesUsed += bitCount;
244 | if (allocator->numPagesUsed > allocator->peekPagesUsed) {
245 | allocator->peekPagesUsed = allocator->numPagesUsed;
246 | }
247 | }
248 |
249 | static inline void ClearRange(Allocator* allocator, u32 startBit, u32 bitCount) {
250 | assert(allocator != 0, __LOCATION__);
251 | assert(bitCount != 0, __LOCATION__);
252 |
253 | u32* mask = (u32*)AllocatorPageMask(allocator);
254 | assert(allocator->size % allocator->pageSize == 0, "Memory::FindRange, the allocators size must be a multiple of Memory::PageSize, otherwise there would be a partial page at the end");
255 | assert(mask != 0, __LOCATION__);
256 |
257 | #if _DEBUG
258 | u32 numBitsInMask = AllocatorPageMaskSize(allocator) * 8;
259 | assert(numBitsInMask != 0, __LOCATION__);
260 | #endif
261 |
262 | u32 numElementsInMask = AllocatorPageMaskSize(allocator) / (TrackingUnitSize / 8);
263 |
264 | for (u32 i = startBit; i < startBit + bitCount; ++i) {
265 |
266 | u32 m = i / TrackingUnitSize;
267 | u32 b = i % TrackingUnitSize;
268 |
269 | assert(m < numElementsInMask, "indexing mask out of range");
270 |
271 | #if _DEBUG
272 | assert(i < numBitsInMask, __LOCATION__);
273 | bool set = mask[m] & (1 << b);
274 | assert(set, __LOCATION__);
275 | #endif
276 |
277 | mask[m] &= ~(1 << b);
278 | }
279 |
280 | assert(allocator->numPagesUsed != 0, __LOCATION__);
281 | assert(allocator->numPagesUsed >= bitCount != 0, "underflow");
282 | allocator->numPagesUsed -= bitCount;
283 | }
284 |
285 | #if MEM_USE_SUBALLOCATORS
286 | // This function will chop the provided page into several blocks. Since the block size is constant, we
287 | // know that headers will be laid out at a stride of blockSize. There is no additional tracking needed.
288 | void* SubAllocate(u32 requestedBytes, u32 blockSize, Allocation** freeList, const char* location, Allocator* allocator) {
289 | assert(blockSize < allocator->pageSize, "Block size must be less than page size");
290 |
291 | // There is no blocks of the requested size available. Reserve 1 page, and carve it up into blocks.
292 | bool grabNewPage = *freeList == 0;
293 | if (*freeList == 0) {
294 | // Find and reserve 1 free page
295 | #if MEM_FIRST_FIT
296 | const u32 page = FindRange(allocator, 1, 0);
297 | #else
298 | const u32 page = FindRange(allocator, 1, allocator->scanBit);
299 | #endif
300 | SetRange(allocator, page, 1);
301 |
302 | // Zero out the pages memory
303 | u8* mem = (u8*)allocator + allocator->pageSize * page;
304 | Set(mem, 0, allocator->pageSize, __LOCATION__);
305 |
306 | // Figure out how many blocks fit into this page
307 | const u32 numBlocks = allocator->pageSize / blockSize;
308 | assert(numBlocks > 0, __LOCATION__);
309 | assert(numBlocks < 128, __LOCATION__);
310 |
311 | // For each block in this page, initialize it's header and add it to the free list
312 | for (u32 i = 0; i < numBlocks; ++i) {
313 | Allocation* alloc = (Allocation*)mem;
314 | mem += blockSize;
315 |
316 | // Initialize the allocation header
317 | alloc->prevOffset = 0;
318 | alloc->nextOffset = 0;
319 | alloc->size = 0;
320 | alloc->alignment = 0;
321 | #if MEM_TRACK_LOCATION
322 | alloc->location = location;
323 | #endif
324 |
325 | AddtoList(allocator, freeList, alloc);
326 | }
327 | }
328 | assert(*freeList != 0, "The free list literally can't be zero here...");
329 |
330 | // At this point we know the free list has some number of blocks in it.
331 | // Save a reference to the current header & advance the free list
332 | // Advance the free list, we're going to be using this one.
333 | Allocation* block = *freeList;
334 | #if MEM_CLEAR_ON_ALLOC
335 | Set((u8*)block + sizeof(Allocation), 0, blockSize - sizeof(Allocation), location);
336 | #elif MEM_DEBUG_ON_ALLOC
337 | {
338 | const u8 stamp[] = "-MEMORY-";
339 | u8* mem = (u8*)block + sizeof(Allocation);
340 | u32 size = blockSize - sizeof(Allocation);
341 | for (u32 i = requestedBytes; i < size; ++i) {
342 | mem[i] = stamp[(i - requestedBytes) % 7];
343 | }
344 | }
345 | #endif
346 | if ((*freeList)->nextOffset != 0) { // Advance one
347 | Allocation* _next = (Allocation*)((u8*)allocator + (*freeList)->nextOffset);
348 | _next->prevOffset = 0;
349 | *freeList = (Allocation*)((u8*)allocator + (*freeList)->nextOffset); // freeList = freeList.next
350 | }
351 | else {
352 | *freeList = 0;
353 | }
354 |
355 | block->prevOffset = 0;
356 | block->size = requestedBytes;
357 | block->alignment = 0;
358 | #if MEM_TRACK_LOCATION
359 | block->location = location;
360 | #endif
361 |
362 | AddtoList(allocator, &allocator->active, block); // Sets block->next
363 |
364 | if (allocator->allocateCallback != 0) {
365 | u32 firstPage = ((u32)((u8*)block - (u8*)allocator)) / allocator->pageSize;
366 | allocator->allocateCallback(allocator, block, requestedBytes, blockSize, firstPage, grabNewPage? 1 : 0);
367 | }
368 |
369 | // Memory always follows the header
370 | return (u8*)block + sizeof(Allocation);
371 | }
372 | #endif
373 |
374 | #if MEM_USE_SUBALLOCATORS
375 | void SubRelease(void* memory, u32 blockSize, Allocation** freeList, const char* location, Allocator* allocator) {
376 | // Find the allocation header and mark it as free. Early out on double free to avoid breaking.
377 | Allocation* header = (Allocation*)((u8*)memory - sizeof(Allocation));
378 | assert(header->size != 0, "Double Free!"); // Make sure it's not a double free
379 | if (header->size == 0) {
380 | assert(false, __LOCATION__);
381 | return;
382 | }
383 | u32 oldSize = header->size;
384 | header->size = 0;
385 |
386 | // Now remove from the active list.
387 | RemoveFromList(allocator, &allocator->active, header);
388 | // Add memory back into the free list
389 | AddtoList(allocator, freeList, header);
390 | #if _DEBUG & MEM_TRACK_LOCATION
391 | header->location = "SubRelease released this block";
392 | #endif
393 |
394 | // Find the first allocation inside the page
395 | u32 startPage = (u32)((u8*)header - (u8*)allocator) / allocator->pageSize;
396 |
397 | u8* mem =(u8*)allocator + startPage * allocator->pageSize;
398 |
399 | // Each sub allocator page contains multiple blocks. check if all of the blocks
400 | // belonging to a single page are free, if they are, release the page.
401 | bool releasePage = true;
402 |
403 | const u32 numAllocationsPerPage = allocator->pageSize / blockSize;
404 | assert(numAllocationsPerPage >= 1, __LOCATION__);
405 | for (u32 i = 0; i < numAllocationsPerPage; ++i) {
406 | Allocation* alloc = (Allocation*)mem;
407 | if (alloc->size > 0) {
408 | releasePage = false;
409 | break;
410 | }
411 | mem += blockSize;
412 | }
413 |
414 | // If appropriate, release entire page
415 | if (releasePage) {
416 | // Remove from free list
417 | mem = (u8*)allocator + startPage * allocator->pageSize;
418 | for (u32 i = 0; i < numAllocationsPerPage; ++i) {
419 | Allocation* iter = (Allocation*)mem;
420 | mem += blockSize;
421 | assert(iter != 0, __LOCATION__);
422 |
423 | RemoveFromList(allocator, freeList, iter);
424 | }
425 |
426 | // Clear the tracking bits
427 | assert(startPage > 0, __LOCATION__);
428 | ClearRange(allocator, startPage, 1);
429 | }
430 |
431 | if (allocator->releaseCallback != 0) {
432 | allocator->releaseCallback(allocator, header, oldSize, blockSize, startPage, releasePage ? 1 : 0);
433 | }
434 | }
435 | #endif
436 | } // Namespace Memory
437 |
438 | #if _WASM32
439 | #define export __attribute__ (( visibility( "default" ) )) extern "C"
440 |
441 | extern unsigned char __heap_base;
442 | extern unsigned char __data_end;
443 |
444 | // These are wasm shim functions
445 |
446 | export int GameAllocator_wasmHeapSize(int memSize) {
447 | void* heapPtr = &__heap_base;
448 |
449 | Memory::ptr_type heapAddr = (Memory::ptr_type)heapPtr;
450 | Memory::ptr_type maxAddr = (Memory::ptr_type)memSize;
451 |
452 | Memory::ptr_type heapSize = maxAddr - heapAddr;
453 | return (int)heapSize;
454 | }
455 |
456 | export Memory::Allocator* GameAllocator_wasmInitialize(int heapSize) {
457 | void* memory = &__heap_base;
458 | u32 size = (u32)heapSize; //GameAllocator_wasmHeapSize(totalMemorySize);
459 |
460 | Memory::AlignAndTrim(&memory, &size);
461 | Memory::Allocator* allocator = Memory::Initialize(memory, size);
462 | Memory::wasmGlobalAllocator = allocator;
463 |
464 | return allocator;
465 | }
466 |
467 | export void GameAllocator_wasmShutdown(Memory::Allocator* allocator) {
468 | Memory::Shutdown(allocator);
469 | }
470 |
471 | export void* GameAllocator_wasmAllocate(Memory::Allocator* allocator, int bytes, int alignment) {
472 | return Memory::wasmGlobalAllocator->Allocate(bytes, alignment, "GameAllocator_wasmAllocate");
473 | }
474 |
475 | export void GameAllocator_wasmRelease(Memory::Allocator* allocator, void* mem) {
476 | Memory::wasmGlobalAllocator->Release(mem, "GameAllocator_wasmAllocate");
477 | }
478 |
479 | export void GameAllocator_wasmSet(void* mem, int val, int size) {
480 | Memory::Set(mem, (u8)val, (u32)size, "GameAllocator_wasmAllocate");
481 | }
482 |
483 | export void GameAllocator_wasmCopy(void* dst, const void* src, int size) {
484 | Memory::Copy(dst, src, (u32)size, "GameAllocator_wasmAllocate");
485 | }
486 |
487 | export int GameAllocator_wasmGetNumPages(Memory::Allocator* a) {
488 | return a->size / a->pageSize;
489 | }
490 |
491 | export int GameAllocator_wasmGetNumPagesInUse(Memory::Allocator* a) {
492 | return a->numPagesUsed;
493 | }
494 |
495 | export int GameAllocator_wasmGetPeekPagesUsed(Memory::Allocator* a) {
496 | return a->peekPagesUsed;
497 | }
498 |
499 | export int GameAllocator_wasmGetRequestedBytes(Memory::Allocator* a) {
500 | return a->requested;
501 | }
502 |
503 | export int GameAllocator_wasmGetServedBytes(Memory::Allocator* a) {
504 | u32 maskSize = AllocatorPageMaskSize(a) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
505 | u32 metaDataSizeBytes = sizeof(Memory::Allocator) + (maskSize * sizeof(u32));
506 | u32 numberOfMasksUsed = metaDataSizeBytes / a->pageSize;
507 | if (metaDataSizeBytes % a->pageSize != 0) {
508 | numberOfMasksUsed += 1;
509 | }
510 | metaDataSizeBytes = numberOfMasksUsed * a->pageSize; // This way, allocatable will start on a page boundary
511 | // Account for meta data
512 | metaDataSizeBytes += a->pageSize;
513 | numberOfMasksUsed += 1;
514 |
515 | u32 numPages = a->size / a->pageSize;
516 | u32 usedPages = a->numPagesUsed;
517 | u32 freePages = numPages - usedPages;
518 | u32 overheadPages = metaDataSizeBytes / a->pageSize;
519 |
520 | return (usedPages - overheadPages) * a->pageSize;
521 | }
522 |
523 | export int GameAllocator_wasmIsPageInUse(Memory::Allocator* a, int page) {
524 | u32 m = page / Memory::TrackingUnitSize;
525 | u32 b = page % Memory::TrackingUnitSize;
526 | u32 * mask = (u32*)Memory::AllocatorPageMask(a);
527 |
528 | bool set = mask[m] & (1 << b);
529 | return set;
530 | }
531 |
532 | export int GameAllocator_wasmGetSize(Memory::Allocator* a) {
533 | return a->size;
534 | }
535 |
536 | export int GameAllocator_wasmGetNumOverheadPages(Memory::Allocator* a) {
537 | u32 maskSize = Memory::AllocatorPageMaskSize(a) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
538 | u32 metaDataSizeBytes = Memory::AllocatorPaddedSize() + (maskSize * sizeof(u32));
539 | u32 numberOfMasksUsed = metaDataSizeBytes / a->pageSize;
540 | if (metaDataSizeBytes % a->pageSize != 0) {
541 | numberOfMasksUsed += 1;
542 | }
543 | metaDataSizeBytes = numberOfMasksUsed * a->pageSize; // This way, allocatable will start on a page boundary
544 | // Account for meta data
545 | metaDataSizeBytes += a->pageSize;
546 | numberOfMasksUsed += 1;
547 |
548 | u32 overheadPages = metaDataSizeBytes / a->pageSize;
549 |
550 | return (int)overheadPages;
551 | }
552 |
553 | // Helper functions
554 | export int GameAllocator_wasmStrLen(const char* str) {
555 | if (str == 0) {
556 | return 0;
557 | }
558 |
559 | const char *s = str;
560 | while (*s) {
561 | ++s;
562 | }
563 | return (s - str);
564 | }
565 |
566 | extern "C" void GameAllocator_jsBuildMemState(const u8* msg, int len);
567 |
568 | export void GameAllocator_wasmDumpState(Memory::Allocator* allocator) {
569 | Memory::Debug::MemInfo(allocator, [](const u8* mem, u32 size, void* userdata) {
570 | GameAllocator_jsBuildMemState(mem, (int)size);
571 | }, 0);
572 | }
573 |
574 | export void* GameAllocator_wasmGetAllocationDebugName(Memory::Allocator* allocator, void* _m) {
575 | const char* l = "mem_GetAllocationDebugName";
576 |
577 | u8* debugPage = allocator->RequestDbgPage();
578 | u32 debugSize = allocator->pageSize;
579 |
580 | // Reset memory buffer
581 | Memory::Set(debugPage, 0, debugSize, l);
582 | u8* i_to_a_buff = debugPage; // Used to convert numbers to strings
583 | const u32 i_to_a_buff_size = GameAllocator_wasmStrLen((const char*)"18446744073709551615") + 1; // u64 max
584 |
585 | u8* mem = i_to_a_buff + i_to_a_buff_size;
586 | u32 memSize = allocator->pageSize - i_to_a_buff_size;
587 |
588 | u8* m = (u8*)_m - sizeof(Memory::Allocation);
589 | Memory::Allocation* iter = (Memory::Allocation*)m;
590 |
591 | Memory::Copy(mem, "Address: ", 9, l);
592 | mem += 9; memSize -= 9;
593 |
594 | u32 allocationOffset = (u32)((u8*)iter - (u8*)allocator);
595 | i32 i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, allocationOffset);
596 | Memory::Copy(mem, i_to_a_buff, i_len, l);
597 | mem += i_len;
598 | memSize -= i_len;
599 |
600 | Memory::Copy(mem, ", size: ", 8, l);
601 | mem += 8; memSize -= 8;
602 |
603 | i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, iter->size);
604 | Memory::Copy(mem, i_to_a_buff, i_len, l);
605 | mem += i_len;
606 | memSize -= i_len;
607 |
608 | Memory::Copy(mem, ", padded: ", 10, l);
609 | mem += 10; memSize -= 10;
610 |
611 | u32 alignment = iter->alignment;
612 | u32 allocationHeaderPadding = 0;
613 | if (alignment != 0) { // Add padding to the header to compensate for alignment
614 | allocationHeaderPadding = alignment - 1; // Somewhere in this range, we will be aligned
615 | }
616 |
617 | u32 realSize = iter->size + (u32)(sizeof(Memory::Allocation)) + allocationHeaderPadding;
618 | i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, realSize);
619 | Memory::Copy(mem, i_to_a_buff, i_len, l);
620 | mem += i_len;
621 | memSize -= i_len;
622 |
623 | Memory::Copy(mem, ", alignment: ", 13, l);
624 | mem += 13; memSize -= 13;
625 |
626 | i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, iter->alignment);
627 | Memory::Copy(mem, i_to_a_buff, i_len, l);
628 | mem += i_len;
629 | memSize -= i_len;
630 |
631 | Memory::Copy(mem, ", first page: ", 14, l);
632 | mem += 14; memSize -= 14;
633 |
634 | i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, (allocationOffset) / allocator->pageSize);
635 | Memory::Copy(mem, i_to_a_buff, i_len, l);
636 | mem += i_len;
637 | memSize -= i_len;
638 |
639 | Memory::Copy(mem, ", prev: ", 8, l);
640 | mem += 8; memSize -= 8;
641 |
642 | i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, iter->prevOffset);
643 | Memory::Copy(mem, i_to_a_buff, i_len, l);
644 | mem += i_len;
645 | memSize -= i_len;
646 |
647 | Memory::Copy(mem, ", next: ", 8, l);
648 | mem += 8; memSize -= 8;
649 |
650 | i_len = Memory::Debug::u32toa(i_to_a_buff, i_to_a_buff_size, iter->nextOffset);
651 | Memory::Copy(mem, i_to_a_buff, i_len, l);
652 | mem += i_len;
653 | memSize -= i_len;
654 |
655 | u32 pathLen = 0;
656 | #if MEM_TRACK_LOCATION
657 | if (iter->location != 0) {
658 | pathLen = GameAllocator_wasmStrLen((const char*)iter->location);
659 | }
660 | #endif
661 |
662 | Memory::Copy(mem, ", location: ", 12, l);
663 | mem += 12; memSize -= 12;
664 |
665 | #if MEM_TRACK_LOCATION
666 | if (iter->location == 0) {
667 | #else
668 | {
669 | #endif
670 | Memory::Copy(mem, "null", 4, l);
671 | mem += 4; memSize -= 4;
672 | }
673 | #if MEM_TRACK_LOCATION
674 | else {
675 | Memory::Copy(mem, iter->location, pathLen, l);
676 | mem += pathLen;
677 | memSize -= pathLen;
678 | }
679 | #endif
680 |
681 | *mem = '\0';
682 |
683 | allocator->ReleaseDbgPage();
684 |
685 | return debugPage + i_to_a_buff_size;
686 | }
687 | #endif
688 |
689 |
690 | u32 Memory::AlignAndTrim(void** memory, u32* size, u32 alignment, u32 pageSize) {
691 | #if ATLAS_64
692 | u64 ptr = (u64)((const void*)(*memory));
693 | #elif ATLAS_32
694 | u32 ptr = (u32)((const void*)(*memory));
695 | #else
696 | #error Unknown Platform
697 | #endif
698 | u32 delta = 0;
699 |
700 | if (alignment != 0) {
701 | // Align to 8 byte boundary. This is so the mask array lines up on a u64
702 | u32 alignmentDelta = alignment - (u32)(ptr % alignment);
703 | assert(alignmentDelta <= (*size), __LOCATION__);
704 | if (alignmentDelta > *size) { // In release mode, we want to fail on asserts
705 | *memory = 0;
706 | *size = 0;
707 | return 0;
708 | }
709 |
710 | if (ptr % alignment != 0) {
711 | u8* mem = (u8*)(*memory);
712 |
713 | delta += alignmentDelta;
714 | mem += alignmentDelta;
715 | *size -= alignmentDelta;
716 | *memory = mem;
717 | }
718 | }
719 |
720 | // Trim to page size (4096) to make sure the provided memory can be chunked up perfectly
721 | if ((*size) % pageSize != 0) {
722 | u32 diff = (*size) % pageSize;
723 | assert(*size >= diff, __LOCATION__);
724 | if (*size < diff) { // In release mode, fail on assert
725 | *memory = 0;
726 | *size = 0;
727 | return 0;
728 | }
729 | *size -= diff;
730 | delta += diff;
731 | }
732 |
733 | return delta;
734 | }
735 |
736 | Memory::Allocator* Memory::Initialize(void* memory, u32 bytes, u32 pageSize) {
737 | assert(pageSize % AllocatorAlignment == 0, "Memory::Initialize, Page boundaries are expected to be on 8 bytes");
738 | // First, make sure that the memory being passed in is aligned well
739 | #if ATLAS_64
740 | u64 ptr = (u64)((const void*)memory);
741 | #elif ATLAS_32
742 | u32 ptr = (u32)((const void*)memory);
743 | #else
744 | #error Unknown platform
745 | #endif
746 | assert(ptr % AllocatorAlignment == 0, "Memory::Initialize, Memory being managed should be 8 byte aligned. Consider using Memory::AlignAndTrim");
747 | assert(bytes % pageSize == 0, "Memory::Initialize, the size of the memory being managed must be aligned to Memory::PageSize");
748 | assert(bytes / pageSize >= 10, "Memory::Initialize, minimum memory size is 10 pages, page size is Memory::PageSize");
749 |
750 | // Set up the allocator
751 | Allocator* allocator = (Allocator*)memory;
752 | Set(memory, 0, sizeof(Allocator), "Memory::Initialize");
753 | allocator->size = bytes;
754 | allocator->pageSize = pageSize;
755 | allocator->mask = 0;
756 |
757 | // Set up the mask that will track our allocation data
758 | u32* mask = (u32*)AllocatorPageMask(allocator);
759 | u32 maskSize = AllocatorPageMaskSize(allocator) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
760 | Set(mask, 0, sizeof(u32) * maskSize, __LOCATION__);
761 |
762 | // Find how many pages the meta data for the header + allocation mask will take up.
763 | // Store the offset to first allocatable,
764 | u32 metaDataSizeBytes = AllocatorPaddedSize() + (maskSize * sizeof(u32));
765 | u32 numberOfMasksUsed = metaDataSizeBytes / pageSize;
766 | if (metaDataSizeBytes % pageSize != 0) {
767 | numberOfMasksUsed += 1;
768 | }
769 | metaDataSizeBytes = numberOfMasksUsed * pageSize; // This way, allocatable will start on a page boundary
770 |
771 | // Add a debug page at the end
772 | metaDataSizeBytes += pageSize;
773 | numberOfMasksUsed += 1;
774 |
775 | //allocator->offsetToAllocatable = metaDataSizeBytes;
776 | allocator->scanBit = 0;
777 | SetRange(allocator, 0, numberOfMasksUsed);
778 | allocator->requested = 0;
779 |
780 | if (ptr % AllocatorAlignment != 0 || bytes % pageSize != 0 || bytes / pageSize < 10) {
781 | assert(false, __LOCATION__);
782 | return 0;
783 | }
784 |
785 | return (Allocator*)memory;
786 | }
787 |
788 | void Memory::Shutdown(Allocator* allocator) {
789 | assert(allocator != 0, "Memory::Shutdown called without it being initialized");
790 | u32* mask = (u32*)AllocatorPageMask(allocator);
791 | u32 maskSize = AllocatorPageMaskSize(allocator) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
792 | assert(allocator->size > 0, "Memory::Shutdown, trying to shut down an un-initialized allocator");
793 |
794 | // Unset tracking bits
795 | u32 metaDataSizeBytes = AllocatorPaddedSize() + (maskSize * sizeof(u32));
796 | u32 numberOfMasksUsed = metaDataSizeBytes / allocator->pageSize;
797 | if (metaDataSizeBytes % allocator->pageSize != 0) {
798 | numberOfMasksUsed += 1;
799 | }
800 | metaDataSizeBytes = numberOfMasksUsed * allocator->pageSize;
801 |
802 | // There is a debug between the memory bitmask and allocatable memory
803 | metaDataSizeBytes += allocator->pageSize;
804 | numberOfMasksUsed += 1;
805 |
806 | ClearRange(allocator, 0, numberOfMasksUsed);
807 | assert(allocator->requested == 0, "Memory::Shutdown, not all memory has been released");
808 |
809 | assert(allocator->active == 0, "There are active allocations in Memory::Shutdown, leaking memory");
810 | assert(allocator->free_64 == 0, "Free list is not empty in Memory::Shutdown, leaking memory");
811 | assert(allocator->free_128 == 0, "Free list is not empty in Memory::Shutdown, leaking memory");
812 | assert(allocator->free_256 == 0, "Free list is not empty in Memory::Shutdown, leaking memory");
813 | assert(allocator->free_512 == 0, "Free list is not empty in Memory::Shutdown, leaking memory");
814 | assert(allocator->free_1024 == 0, "Free list is not empty in Memory::Shutdown, leaking memory");
815 | assert(allocator->free_2048 == 0, "Free list is not empty in Memory::Shutdown, leaking memory");
816 |
817 | #if _DEBUG
818 | // In debug mode only, we will scan the entire mask to make sure all memory has been free-d
819 | for (u32 i = 0; i < maskSize; ++i) {
820 | assert(mask[i] == 0, "Page tracking unit isn't empty in Memory::Shutdown, leaking memory.");
821 | }
822 | #endif
823 | }
824 |
825 | void Memory::Copy(void* dest, const void* source, u32 size, const char* location) {
826 | #if ATLAS_64
827 | u64 dst_ptr = (u64)((const void*)(dest));
828 | u64 src_ptr = (u64)((const void*)(source));
829 | u64 alignment = sizeof(u64);
830 | #elif ATLAS_32
831 | u32 dst_ptr = (u32)((const void*)(dest));
832 | u32 src_ptr = (u32)((const void*)(source));
833 | u32 alignment = sizeof(u32);
834 | #else
835 | #error Unknown Platform
836 | #endif
837 |
838 | if (dst_ptr % alignment != 0 || src_ptr % alignment != 0) {
839 | // Memory is not aligned well, fall back on slow copy
840 | u8* dst = (u8*)dest;
841 | const u8* src = (const u8*)source;
842 | for (u32 i = 0; i < size; ++i) {
843 | dst[i] = src[i];
844 | }
845 | return;
846 | }
847 |
848 | #if ATLAS_64
849 | u64 size_64 = size / sizeof(u64);
850 | u64* dst_64 = (u64*)dest;
851 | const u64* src_64 = (const u64*)source;
852 | for (u32 i = 0; i < size_64; ++i) {
853 | dst_64[i] = src_64[i];
854 | }
855 | #endif
856 |
857 | #if ATLAS_64
858 | u32 size_32 = (u32)((size - size_64 * sizeof(u64)) / sizeof(u32));
859 | u32* dst_32 = (u32*)(dst_64 + size_64);
860 | const u32* src_32 = (const u32*)(src_64 + size_64);
861 | #else
862 | u32 size_32 = size / sizeof(u32);
863 | u32* dst_32 = (u32*)dest;
864 | const u32* src_32 = (u32*)source;
865 | #endif
866 | for (u32 i = 0; i < size_32; ++i) {
867 | dst_32[i] = src_32[i];
868 | }
869 |
870 | #if ATLAS_64
871 | u32 size_16 = (u32)((size - size_64 * sizeof(u64) - size_32 * sizeof(u32)) / sizeof(u16));
872 | #else
873 | u32 size_16 = (size - size_32 * sizeof(u32)) / sizeof(u16);
874 | #endif
875 | u16* dst_16 = (u16*)(dst_32 + size_32);
876 | const u16* src_16 = (const u16*)(src_32 + size_32);
877 | for (u32 i = 0; i < size_16; ++i) {
878 | dst_16[i] = src_16[i];
879 | }
880 |
881 | #if ATLAS_64
882 | u32 size_8 = (u32)(size - size_64 * sizeof(u64) - size_32 * sizeof(u32) - size_16 * sizeof(u16));
883 | #else
884 | u32 size_8 = (size - size_32 * sizeof(u32) - size_16 * sizeof(u16));
885 | #endif
886 | u8* dst_8 = (u8*)(dst_16 + size_16);
887 | const u8* src_8 = (const u8*)(src_16 + size_16);
888 | for (u32 i = 0; i < size_8; ++i) {
889 | dst_8[i] = src_8[i];
890 | }
891 |
892 | #if ATLAS_64
893 | assert(size_64 * sizeof(u64) + size_32 * sizeof(u32) + size_16 * sizeof(u16) + size_8 == size, "Number of pages not adding up");
894 | #elif ATLAS_32
895 | assert(size_32 * sizeof(u32) + size_16 * sizeof(u16) + size_8 == size, "Number of pages not adding up");
896 | #else
897 | #error Unknown Platform
898 | #endif
899 | }
900 |
901 | // MSVC generates a recursive memset with this implementation. The naive one works fine.
902 | //#pragma optimize( "", off )
903 | void* Memory::Set(void* memory, u8 value, u32 size, const char* location) {
904 | if (memory == 0) {
905 | return 0; // Can't set null!
906 | }
907 |
908 | #if ATLAS_64
909 | u64 ptr = (u64)((const void*)(memory));
910 | u64 alignment = sizeof(u64);
911 | #elif ATLAS_32
912 | u32 ptr = (u32)((const void*)(memory));
913 | u32 alignment = sizeof(u32);
914 | #else
915 | #error Unknown Platform
916 | #endif
917 |
918 | if (size <= alignment) {
919 | u8* mem = (u8*)memory;
920 | /* MSCV was optimizing this loop into a recursive call?
921 | for (u32 i = 0; i < size; ++i) {
922 | mem[i] = value;
923 | }*/
924 | while ((alignment--) > 0) {
925 | *mem = value;
926 | }
927 | return memory;
928 | }
929 |
930 | // Algin memory if needed
931 | assert(alignment >= (ptr % alignment), __LOCATION__);
932 | u32 alignDelta = (u32)(alignment - (ptr % alignment));
933 | assert(alignDelta <= alignment, __LOCATION__);
934 | assert(size >= alignDelta, __LOCATION__);
935 |
936 | u8* mem = (u8*)(memory);
937 | if (alignDelta != 0) {
938 | if (alignDelta > size) {
939 | alignDelta = size;
940 | }
941 | for (u32 iter = 0; iter < alignDelta; ++iter) {
942 | mem[iter] = value;
943 | }
944 |
945 | mem += alignDelta;
946 | size -= alignDelta;
947 | }
948 |
949 | #if ATLAS_64
950 | u64 size_64 = size / sizeof(u64);
951 | u64* ptr_64 = (u64*)mem;
952 | u32 v32 = (((u32)value) << 8) | (((u32)value) << 16) | (((u32)value) << 24) | ((u32)value);
953 | u64 val_64 = (((u64)v32) << 32) | ((u64)v32);
954 | for (u32 i = 0; i < size_64; ++i) {
955 | ptr_64[i] = val_64;
956 | }
957 | #endif
958 |
959 | #if ATLAS_64
960 | u32 size_32 = (u32)((size - size_64 * sizeof(u64)) / sizeof(u32));
961 | u32* ptr_32 = (u32*)(ptr_64 + size_64);
962 | #else
963 | u32 size_32 = size / sizeof(u32);
964 | u32* ptr_32 = (u32*)memory;
965 | #endif
966 | u32 val_32 = (((u32)value) << 8) | (((u32)value) << 16) | (((u32)value) << 24) | ((u32)value);
967 | for (u32 i = 0; i < size_32; ++i) {
968 | ptr_32[i] = val_32;
969 | }
970 |
971 | #if ATLAS_64
972 | u32 size_16 = (u32)((size - size_64 * sizeof(u64) - size_32 * sizeof(u32)) / sizeof(u16));
973 | #else
974 | u32 size_16 = (size - size_32 * sizeof(u32)) / sizeof(u16);
975 | #endif
976 | u16* ptr_16 = (u16*)(ptr_32 + size_32);
977 | u32 val_16 = (((u16)value) << 8) | ((u16)value);
978 | for (u32 i = 0; i < size_16; ++i) {
979 | ptr_16[i] = val_16;
980 | }
981 |
982 | #if ATLAS_64
983 | u32 size_8 = (u32)((size - size_64 * sizeof(u64) - size_32 * sizeof(u32) - size_16 * sizeof(u16)) / sizeof(u8));
984 | #else
985 | u32 size_8 = (size - size_32 * sizeof(u32) - size_16 * sizeof(u16));
986 | #endif
987 | u8* ptr_8 = (u8*)(ptr_16 + size_16);
988 | for (u32 i = 0; i < size_8; ++i) {
989 | ptr_8[i] = value;
990 | }
991 |
992 | #if ATLAS_64
993 | assert(size_64 * sizeof(u64) + size_32 * sizeof(u32) + size_16 * sizeof(u16) + size_8 == size, "Number of pages not adding up");
994 | #elif ATLAS_32
995 | assert(size_32 * sizeof(u32) + size_16 * sizeof(u16) + size_8 == size, "Number of pages not adding up");
996 | #else
997 | #error Unknown Platform
998 | #endif
999 |
1000 | return memory;
1001 | }
1002 | //#pragma optimize( "", on )
1003 |
1004 | u8* Memory::Allocator::RequestDbgPage() {
1005 | Memory::Allocator* allocator = this;
1006 |
1007 | assert(allocator->mask == 0, "Debug page already in use");
1008 | allocator->mask = 1;
1009 |
1010 | // Set up the mask that will track our allocation data
1011 | u32* mask = (u32*)AllocatorPageMask(allocator);
1012 | u32 maskSize = AllocatorPageMaskSize(allocator) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
1013 |
1014 | // Find how many pages the meta data for the header + allocation mask will take up.
1015 | // Store the offset to first allocatable,
1016 | u32 metaDataSizeBytes = AllocatorPaddedSize() + (maskSize * sizeof(u32));
1017 | u32 numberOfMasksUsed = metaDataSizeBytes / allocator->pageSize;
1018 | if (metaDataSizeBytes % allocator->pageSize != 0) {
1019 | numberOfMasksUsed += 1;
1020 | }
1021 | metaDataSizeBytes = numberOfMasksUsed * allocator->pageSize; // This way, allocatable will start on a page boundary
1022 |
1023 | // Add a debug page at the end
1024 | metaDataSizeBytes += allocator->pageSize;
1025 | numberOfMasksUsed += 1;
1026 |
1027 | u8* debugPage = (u8*)allocator + metaDataSizeBytes - allocator->pageSize; // Debug page is always one page before allocatable
1028 | return debugPage;
1029 | }
1030 |
1031 | void Memory::Allocator::ReleaseDbgPage() {
1032 | Memory::Allocator* allocator = this;
1033 |
1034 | assert(allocator->mask != 0, "Debug page not in use");
1035 | allocator->mask = 0;
1036 | }
1037 |
1038 | void* Memory::Allocator::Allocate(u32 bytes, u32 alignment, const char* location) {
1039 | if (bytes == 0) {
1040 | bytes = 1; // At least one byte required
1041 | }
1042 | Memory::Allocator* allocator = this;
1043 | assert(bytes < allocator->size, "Memory::Allocate trying to allocate more memory than is available");
1044 | assert(bytes < allocator->size - allocator->requested, "Memory::Allocate trying to allocate more memory than is available");
1045 |
1046 | u32 allocationHeaderPadding = 0;
1047 | if (alignment != 0) { // Add paddnig to make sure we can align the memory
1048 | allocationHeaderPadding = alignment - 1; // Somewhere in this range, we will be aligned
1049 | }
1050 | u32 allocationHeaderSize = sizeof(Allocation) + allocationHeaderPadding;
1051 |
1052 | // Add the header size to our allocation size
1053 | u32 allocationSize = bytes; // Add enough space to pad out for alignment
1054 | allocationSize += allocationHeaderSize;
1055 |
1056 | // Figure out how many pages are going to be needed to hold that much memory
1057 | u32 numPagesRequested = allocationSize / allocator->pageSize + (allocationSize % allocator->pageSize ? 1 : 0);
1058 | assert(numPagesRequested > 0, "Memory::Allocate needs to request at least 1 page");
1059 |
1060 | // We can record the request here. It's made before the allocation callback, and is valid for sub-allocations too.
1061 | allocator->requested += bytes;
1062 | assert(allocator->requested < allocator->size, __LOCATION__);
1063 |
1064 | #if MEM_USE_SUBALLOCATORS
1065 | if (alignment == 0) {
1066 | if (allocationSize <= 64) {
1067 | return SubAllocate(bytes, 64, &allocator->free_64, location, allocator);
1068 | }
1069 | else if (allocationSize <= 128) {
1070 | return SubAllocate(bytes, 128, &allocator->free_128, location, allocator);
1071 | }
1072 | else if (allocationSize <= 256) {
1073 | return SubAllocate(bytes, 256, &allocator->free_256, location, allocator);
1074 | }
1075 | else if (allocationSize <= 512) {
1076 | return SubAllocate(bytes, 512, &allocator->free_512, location, allocator);
1077 | }
1078 | else if (allocationSize <= 1024) {
1079 | return SubAllocate(bytes, 1024, &allocator->free_1024, location, allocator);
1080 | }
1081 | else if (allocationSize <= 2048) {
1082 | return SubAllocate(bytes, 2048, &allocator->free_2048, location, allocator);
1083 | }
1084 | }
1085 | #endif
1086 |
1087 | // Find enough memory to allocate
1088 | #if MEM_FIRST_FIT
1089 | u32 firstPage = FindRange(allocator, numPagesRequested, 0);
1090 | #else
1091 | u32 firstPage = FindRange(allocator, numPagesRequested, allocator->scanBit);
1092 | #endif
1093 | assert(firstPage != 0, "Memory::Allocate failed to find enough pages to fufill allocation");
1094 |
1095 | SetRange(allocator, firstPage, numPagesRequested);
1096 |
1097 | if (firstPage == 0 || allocator->size % allocator->pageSize != 0) {
1098 | assert(false, __LOCATION__);
1099 | return 0; // Fail this allocation in release mode
1100 | }
1101 |
1102 | // Fill out header
1103 | u8* mem = (u8*)allocator + firstPage * allocator->pageSize;
1104 |
1105 | u32 alignmentOffset = 0;
1106 | if (alignment != 0) {
1107 | #if ATLAS_64
1108 | u64 mem_addr = (u64)((void*)mem) + sizeof(Allocation);
1109 | #elif ATLAS_32
1110 | u32 mem_addr = (u32)((void*)mem) + sizeof(Allocation);
1111 | #else
1112 | #error Unknown platform
1113 | #endif
1114 | if (mem_addr % alignment != 0) {
1115 | mem_addr = (mem_addr + (alignment - 1)) / alignment * alignment;
1116 | mem = (u8*)(mem_addr - sizeof(Allocation));
1117 | }
1118 | }
1119 |
1120 | Allocation* allocation = (Allocation*)mem;
1121 | mem += sizeof(Allocation);
1122 |
1123 | allocation->alignment = alignment;
1124 | allocation->size = bytes;
1125 | allocation->prevOffset = 0;
1126 | allocation->nextOffset = 0;
1127 | #if MEM_TRACK_LOCATION
1128 | allocation->location = location;
1129 | #endif
1130 |
1131 | // Track allocated memory
1132 | assert(allocation != allocator->active, __LOCATION__); // Should be impossible, but we could have bugs...
1133 | AddtoList(allocator, &allocator->active, allocation);
1134 |
1135 | // Return memory
1136 | #if MEM_CLEAR_ON_ALLOC
1137 | Set(mem, 0, bytes, location);
1138 | #elif MEM_DEBUG_ON_ALLOC
1139 | const u8 stamp[] = "-MEMORY-";
1140 | u32 size = PageSize - allocationHeaderPadding - sizeof(Allocation);
1141 | for (u32 i = bytes; i < size; ++i) {
1142 | mem[i] = stamp[(i - bytes) % 7];
1143 | }
1144 | #endif
1145 |
1146 | if (allocator->allocateCallback != 0) {
1147 | u8* _mem = (u8*)allocator + firstPage * allocator->pageSize;
1148 | _mem += allocationHeaderPadding;
1149 | Allocation* _allocation = (Allocation*)_mem;
1150 | allocator->allocateCallback(allocator, _allocation, bytes, allocationSize, firstPage, numPagesRequested);
1151 | }
1152 |
1153 | return mem;
1154 | }
1155 |
1156 | void Memory::Allocator::Release(void* memory, const char* location) {
1157 | assert(memory != 0, "Memory:Free can't free a null pointer");
1158 | Allocator* allocator = this;
1159 |
1160 | // Retrieve allocation information from header. The allocation header always
1161 | // preceeds the allocation.
1162 | u8* mem = (u8*)memory;
1163 | mem -= sizeof(Allocation);
1164 | Allocation* allocation = (Allocation*)mem;
1165 | assert(allocation != 0, "Can't free null");
1166 | u32 alignment = allocation->alignment;
1167 |
1168 | u32 allocationSize = allocation->size; // Add enough space to pad out for alignment
1169 |
1170 | u32 allocationHeaderPadding = 0;
1171 | if (alignment != 0) { // Add padding to the header to compensate for alignment
1172 | allocationHeaderPadding = alignment - 1; // Somewhere in this range, we will be aligned
1173 | }
1174 | u32 paddedAllocationSize = allocationSize + allocationHeaderPadding + sizeof(Allocation);
1175 | assert(allocationSize != 0, "Memory::Free, double free");
1176 |
1177 | assert(allocator->requested >= allocation->size, "Memory::Free releasing more memory than was requested");
1178 | assert(allocator->requested != 0, "Memory::Free releasing more memory, but there is nothing to release");
1179 | allocator->requested -= allocation->size;
1180 |
1181 | #if MEM_USE_SUBALLOCATORS
1182 | if (alignment == 0) {
1183 | if (paddedAllocationSize <= 64) {
1184 | SubRelease(memory, 64, &allocator->free_64, location, allocator);
1185 | return;
1186 | }
1187 | else if (paddedAllocationSize <= 128) {
1188 | SubRelease(memory, 128, &allocator->free_128, location, allocator);
1189 | return;
1190 | }
1191 | else if (paddedAllocationSize <= 256) {
1192 | SubRelease(memory, 256, &allocator->free_256, location, allocator);
1193 | return;
1194 | }
1195 | else if (paddedAllocationSize <= 512) {
1196 | SubRelease(memory, 512, &allocator->free_512, location, allocator);
1197 | return;
1198 | }
1199 | else if (paddedAllocationSize <= 1024) {
1200 | SubRelease(memory, 1024, &allocator->free_1024, location, allocator);
1201 | return;
1202 | }
1203 | else if (paddedAllocationSize <= 2048) {
1204 | SubRelease(memory, 2048, &allocator->free_2048, location, allocator);
1205 | return;
1206 | }
1207 | }
1208 | #endif
1209 |
1210 | // Clear the bits that where tracking this memory
1211 | u8* firstMemory = (u8*)allocator;
1212 | u32 address = (u32)((u8*)mem - (u8*)firstMemory);
1213 |
1214 | u32 firstPage = address / allocator->pageSize;
1215 | u32 numPages = paddedAllocationSize / allocator->pageSize + (paddedAllocationSize % allocator->pageSize ? 1 : 0);
1216 | ClearRange(allocator, firstPage, numPages);
1217 |
1218 | // Unlink tracking
1219 | RemoveFromList(allocator, &allocator->active, allocation);
1220 |
1221 | // Set the size to 0, to indicate that this header has been free-d
1222 | u32 oldSize = allocation->size;
1223 | allocation->size = 0;
1224 |
1225 | if (allocator->releaseCallback != 0) {
1226 | allocator->releaseCallback(allocator, allocation, oldSize, paddedAllocationSize, firstPage, numPages);
1227 | }
1228 | }
1229 |
1230 | namespace Memory {
1231 | namespace Debug {
1232 | class str_const { // constexpr string
1233 | private:
1234 | const char* const p_;
1235 | const ptr_type sz_;
1236 | private:
1237 | str_const& operator= (const str_const& other) = delete;
1238 | str_const(const str_const&& other) = delete;
1239 | str_const& operator= (const str_const&& other) = delete;
1240 | public:
1241 | template
1242 | constexpr str_const(const char(&a)[N]) noexcept : // ctor
1243 | p_(a), sz_(N - 1) {
1244 | }
1245 | constexpr char operator[](ptr_type n) const noexcept { // []
1246 | #if _WASM32
1247 | if (n >= sz_) {
1248 | __builtin_trap();
1249 | }
1250 | return p_[n];
1251 | #else
1252 | return n < sz_ ? p_[n] : (*(char*)((void*)0) = '\0');
1253 | #endif
1254 | }
1255 | constexpr u32 size() const noexcept { // string length
1256 | return (u32)sz_;
1257 | } // size()
1258 | const char* begin() const noexcept { // start iterator
1259 | return p_;
1260 | } // begin()
1261 | const char* end() const noexcept { // End iterator
1262 | return p_ + sz_;
1263 | } // end()
1264 | template
1265 | T& operator<<(T& stream) { // Stream op
1266 | stream << p_;
1267 | return stream;
1268 | } // <<
1269 | };
1270 |
1271 | u32 u32toa(u8* dest, u32 destSize, u32 num) { // Returns length of string
1272 | Set(dest, 0, destSize, "Memory::Debug::u32toa");
1273 |
1274 | u32 count = 0;
1275 | u32 tmp = num;
1276 | while (tmp != 0) {
1277 | tmp = tmp / 10;
1278 | count = count + 1;
1279 | }
1280 |
1281 | if (count == 0) {
1282 | *dest = '0';
1283 | return 1;
1284 | }
1285 |
1286 | u8* last = dest + count - 1;
1287 | while (num != 0) {
1288 | u32 digit = num % 10;
1289 | num = num / 10;
1290 |
1291 | *last-- = '0' + digit;
1292 | }
1293 |
1294 | return count;
1295 | }
1296 |
1297 | u32 strlen(const u8* str) {
1298 | const u8* s;
1299 | for (s = str; *s; ++s);
1300 | return (u32)(s - str);
1301 | }
1302 | } // namespace Debug
1303 | } // namespace Memory
1304 |
1305 | void Memory::Debug::MemInfo(Allocator* allocator, WriteCallback callback, void* userdata) {
1306 | const char* l = "Memory::Debug::DumpAllocationHeaders";
1307 |
1308 | u8* debugPage = allocator->RequestDbgPage();
1309 | u32 debugSize = allocator->pageSize;
1310 |
1311 | // Reset memory buffer
1312 | Set(debugPage, 0, debugSize, l);
1313 | u8* i_to_a_buff = debugPage; // Used to convert numbers to strings
1314 | const u32 i_to_a_buff_size = strlen((const u8*)"18446744073709551615") + 1; // u64 max
1315 | u8* mem = i_to_a_buff + i_to_a_buff_size;
1316 | u32 memSize = allocator->pageSize - i_to_a_buff_size;
1317 |
1318 | { // Tracking %d Pages, %d KiB (%d MiB)
1319 | constexpr str_const out0("Tracking ");
1320 | Copy(mem, out0.begin(), out0.size(), l);
1321 | mem += out0.size();
1322 | memSize -= out0.size();
1323 |
1324 | u32 numPages = allocator->size / allocator->pageSize;
1325 | assert(allocator->size % allocator->pageSize == 0, l);
1326 |
1327 | u32 i_len = u32toa(i_to_a_buff, i_to_a_buff_size, numPages);
1328 | Copy(mem, i_to_a_buff, i_len, l);
1329 | mem += i_len;
1330 | memSize -= i_len;
1331 |
1332 | constexpr str_const out1(" pages, Page size: ");
1333 | Copy(mem, out1.begin(), out1.size(), l);
1334 | mem += out1.size();
1335 | memSize -= out1.size();
1336 |
1337 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, allocator->pageSize);
1338 | Copy(mem, i_to_a_buff, i_len, l);
1339 | mem += i_len;
1340 | memSize -= i_len;
1341 |
1342 | constexpr str_const out11(" bytes\nTotal memory size: ");
1343 | Copy(mem, out11.begin(), out11.size(), l);
1344 | mem += out11.size();
1345 | memSize -= out11.size();
1346 |
1347 | u32 kib = allocator->size / 1024;
1348 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, kib);
1349 | Copy(mem, i_to_a_buff, i_len, l);
1350 | mem += i_len;
1351 | memSize -= i_len;
1352 |
1353 | constexpr str_const out2(" KiB (");
1354 | Copy(mem, out2.begin(), out2.size(), l);
1355 | mem += out2.size();
1356 | memSize -= out2.size();
1357 |
1358 | u32 mib = kib / 1024;
1359 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, mib);
1360 | Copy(mem, i_to_a_buff, i_len, l);
1361 | mem += i_len;
1362 | memSize -= i_len;
1363 |
1364 | constexpr str_const out3(" MiB)\n");
1365 | Copy(mem, out3.begin(), out3.size(), l);
1366 | mem += out3.size();
1367 | memSize -= out3.size();
1368 | }
1369 |
1370 | // Dump what's been written so far
1371 | mem = i_to_a_buff + i_to_a_buff_size;
1372 | callback(mem, (allocator->pageSize - i_to_a_buff_size) - memSize, userdata);
1373 |
1374 | // Reset memory buffer
1375 | Set(debugPage, 0, debugSize, l);
1376 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1377 | mem = i_to_a_buff + i_to_a_buff_size;
1378 | memSize = allocator->pageSize - i_to_a_buff_size;
1379 |
1380 | { // Pages: %d free, %d used, %d overhead
1381 | constexpr str_const out0("Page state: ");
1382 | Copy(mem, out0.begin(), out0.size(), l);
1383 | mem += out0.size();
1384 | memSize -= out0.size();
1385 |
1386 | u32 maskSize = AllocatorPageMaskSize(allocator) / (sizeof(u32) / sizeof(u8)); // convert from u8 to u32
1387 | u32 metaDataSizeBytes = AllocatorPaddedSize() + (maskSize * sizeof(u32));
1388 | u32 numberOfMasksUsed = metaDataSizeBytes / allocator->pageSize;
1389 | if (metaDataSizeBytes % allocator->pageSize != 0) {
1390 | numberOfMasksUsed += 1;
1391 | }
1392 | metaDataSizeBytes = numberOfMasksUsed * allocator->pageSize; // This way, allocatable will start on a page boundary
1393 | // Account for meta data
1394 | metaDataSizeBytes += allocator->pageSize;
1395 | numberOfMasksUsed += 1;
1396 |
1397 | u32 numPages = allocator->size / allocator->pageSize;
1398 | assert(allocator->size % allocator->pageSize == 0, l);
1399 | u32 usedPages = allocator->numPagesUsed;
1400 | assert(usedPages <= numPages, l);
1401 | u32 freePages = numPages - usedPages;
1402 | u32 overheadPages = metaDataSizeBytes / allocator->pageSize;
1403 | assert(usedPages >= overheadPages, l);
1404 | usedPages -= overheadPages;
1405 |
1406 | u32 i_len = u32toa(i_to_a_buff, i_to_a_buff_size, freePages);
1407 | Copy(mem, i_to_a_buff, i_len, l);
1408 | mem += i_len;
1409 | memSize -= i_len;
1410 |
1411 | constexpr str_const out1(" free, ");
1412 | Copy(mem, out1.begin(), out1.size(), l);
1413 | mem += out1.size();
1414 | memSize -= out1.size();
1415 |
1416 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, usedPages);
1417 | Copy(mem, i_to_a_buff, i_len, l);
1418 | mem += i_len;
1419 | memSize -= i_len;
1420 |
1421 | constexpr str_const out2(" used, ");
1422 | Copy(mem, out2.begin(), out2.size(), l);
1423 | mem += out2.size();
1424 | memSize -= out2.size();
1425 |
1426 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, overheadPages);
1427 | Copy(mem, i_to_a_buff, i_len, l);
1428 | mem += i_len;
1429 | memSize -= i_len;
1430 |
1431 | constexpr str_const out3(" overhead\nRequested: ");
1432 | Copy(mem, out3.begin(), out3.size(), l);
1433 | mem += out3.size();
1434 | memSize -= out3.size();
1435 |
1436 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, allocator->requested);
1437 | Copy(mem, i_to_a_buff, i_len, l);
1438 | mem += i_len;
1439 | memSize -= i_len;
1440 |
1441 | constexpr str_const out4(" bytes, Served: ");
1442 | Copy(mem, out4.begin(), out4.size(), l);
1443 | mem += out4.size();
1444 | memSize -= out4.size();
1445 |
1446 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, usedPages * allocator->pageSize);
1447 | Copy(mem, i_to_a_buff, i_len, l);
1448 | mem += i_len;
1449 | memSize -= i_len;
1450 |
1451 | constexpr str_const out5(" bytes\n");
1452 | Copy(mem, out5.begin(), out5.size(), l);
1453 | mem += out5.size();
1454 | memSize -= out5.size();
1455 | }
1456 |
1457 | // Dump what's been written so far
1458 | mem = i_to_a_buff + i_to_a_buff_size;
1459 | callback(mem, (allocator->pageSize - i_to_a_buff_size) - memSize, userdata);
1460 |
1461 | // Reset memory buffer
1462 | Set(debugPage, 0, debugSize, l);
1463 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1464 | mem = i_to_a_buff + i_to_a_buff_size;
1465 | memSize = allocator->pageSize - i_to_a_buff_size;
1466 |
1467 | { // Dump active list
1468 | constexpr str_const out0("\nActive allocations:\n");
1469 | Copy(mem, out0.begin(), out0.size(), l);
1470 | mem += out0.size();
1471 | memSize -= out0.size();
1472 |
1473 | for (Allocation* iter = allocator->active; iter != 0; iter = (iter->nextOffset == 0)? 0 : (Allocation*)((u8*)allocator + iter->nextOffset)) {
1474 | //u64 address = (u64)((void*)iter);
1475 | u64 alloc_address = (u64)((void*)allocator);
1476 |
1477 | constexpr str_const out5("\t");
1478 | Copy(mem, out5.begin(), out5.size(), l);
1479 | mem += out5.size();
1480 | memSize -= out5.size();
1481 |
1482 | u32 allocationOffset = (u32)((u8*)iter - (u8*)allocator);
1483 | i32 i_len = u32toa(i_to_a_buff, i_to_a_buff_size, allocationOffset);
1484 | Copy(mem, i_to_a_buff, i_len, l);
1485 | mem += i_len;
1486 | memSize -= i_len;
1487 |
1488 | constexpr str_const out2(", size: ");
1489 | Copy(mem, out2.begin(), out2.size(), l);
1490 | mem += out2.size();
1491 | memSize -= out2.size();
1492 |
1493 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, iter->size);
1494 | Copy(mem, i_to_a_buff, i_len, l);
1495 | mem += i_len;
1496 | memSize -= i_len;
1497 |
1498 | constexpr str_const out3(", padded: ");
1499 | Copy(mem, out3.begin(), out3.size(), l);
1500 | mem += out3.size();
1501 | memSize -= out3.size();
1502 |
1503 | u32 alignment = iter->alignment;
1504 | u32 allocationHeaderPadding = 0;
1505 | if (alignment != 0) { // Add padding to the header to compensate for alignment
1506 | allocationHeaderPadding = alignment - 1; // Somewhere in this range, we will be aligned
1507 | }
1508 |
1509 | u32 realSize = iter->size + (u32)(sizeof(Allocation)) + allocationHeaderPadding;
1510 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, realSize);
1511 | Copy(mem, i_to_a_buff, i_len, l);
1512 | mem += i_len;
1513 | memSize -= i_len;
1514 |
1515 | constexpr str_const out6(", alignment: ");
1516 | Copy(mem, out6.begin(), out6.size(), l);
1517 | mem += out6.size();
1518 | memSize -= out6.size();
1519 |
1520 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, iter->alignment);
1521 | Copy(mem, i_to_a_buff, i_len, l);
1522 | mem += i_len;
1523 | memSize -= i_len;
1524 |
1525 | constexpr str_const outfp(", first page: ");
1526 | Copy(mem, outfp.begin(), outfp.size(), l);
1527 | mem += outfp.size();
1528 | memSize -= outfp.size();
1529 |
1530 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, (allocationOffset) / allocator->pageSize);
1531 | Copy(mem, i_to_a_buff, i_len, l);
1532 | mem += i_len;
1533 | memSize -= i_len;
1534 |
1535 | constexpr str_const out0(", prev: ");
1536 | Copy(mem, out0.begin(), out0.size(), l);
1537 | mem += out0.size();
1538 | memSize -= out0.size();
1539 |
1540 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, iter->prevOffset);
1541 | Copy(mem, i_to_a_buff, i_len, l);
1542 | mem += i_len;
1543 | memSize -= i_len;
1544 |
1545 | constexpr str_const out1(", next: ");
1546 | Copy(mem, out1.begin(), out1.size(), l);
1547 | mem += out1.size();
1548 | memSize -= out1.size();
1549 |
1550 | i_len = u32toa(i_to_a_buff, i_to_a_buff_size, iter->nextOffset);
1551 | Copy(mem, i_to_a_buff, i_len, l);
1552 | mem += i_len;
1553 | memSize -= i_len;
1554 |
1555 | u32 pathLen = 0;
1556 | #if MEM_TRACK_LOCATION
1557 | if (iter->location != 0) {
1558 | pathLen = strlen((const u8*)iter->location);
1559 | }
1560 | #endif
1561 |
1562 | if (memSize < allocator->pageSize / 4 || memSize < (pathLen + pathLen / 4)) { // Drain occasiaonally
1563 | // Dump what's been written so far
1564 | mem = i_to_a_buff + i_to_a_buff_size;
1565 | callback(mem, (allocator->pageSize - i_to_a_buff_size) - memSize, userdata);
1566 |
1567 | // Reset memory buffer
1568 | Set(debugPage, 0, debugSize, l);
1569 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1570 | mem = i_to_a_buff + i_to_a_buff_size;
1571 | memSize = allocator->pageSize - i_to_a_buff_size;
1572 | }
1573 |
1574 | constexpr str_const out_loc(", location: ");
1575 | Copy(mem, out_loc.begin(), out_loc.size(), l);
1576 | mem += out_loc.size();
1577 | memSize -= out_loc.size();
1578 |
1579 | #if MEM_TRACK_LOCATION
1580 | if (iter->location == 0) {
1581 | #else
1582 | {
1583 | #endif
1584 | assert(pathLen == 0, __LOCATION__);
1585 |
1586 | constexpr str_const out_loc("null");
1587 | Copy(mem, out_loc.begin(), out_loc.size(), l);
1588 | mem += out_loc.size();
1589 | memSize -= out_loc.size();
1590 | }
1591 | #if MEM_TRACK_LOCATION
1592 | else {
1593 | assert(pathLen != 0, __LOCATION__);
1594 | Copy(mem, iter->location, pathLen, l);
1595 | mem += pathLen;
1596 | memSize -= pathLen;
1597 | }
1598 | #endif
1599 |
1600 | constexpr str_const out4("\n");
1601 | Copy(mem, out4.begin(), out4.size(), l);
1602 | mem += out4.size();
1603 | memSize -= out4.size();
1604 | }
1605 |
1606 | if (memSize != allocator->pageSize - i_to_a_buff_size) { // Drain if needed
1607 | // Dump what's been written so far
1608 | mem = i_to_a_buff + i_to_a_buff_size;
1609 | callback(mem, (allocator->pageSize - i_to_a_buff_size) - memSize, userdata);
1610 |
1611 | // Reset memory buffer
1612 | Set(debugPage, 0, debugSize, l);
1613 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1614 | mem = i_to_a_buff + i_to_a_buff_size;
1615 | memSize = allocator->pageSize - i_to_a_buff_size;
1616 | }
1617 | }
1618 |
1619 | // Reset memory buffer
1620 | Set(debugPage, 0, debugSize, l);
1621 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1622 | mem = i_to_a_buff + i_to_a_buff_size;
1623 | memSize = allocator->pageSize - i_to_a_buff_size;
1624 |
1625 | constexpr str_const newline("\n\t");
1626 | constexpr str_const isSet("0");
1627 | constexpr str_const notSet("-");
1628 |
1629 | { // Draw a pretty graph
1630 | u32 numPages = allocator->size / allocator->pageSize;
1631 | u32* mask = (u32*)AllocatorPageMask(allocator);
1632 |
1633 | constexpr str_const out5("\nPage chart:\n\t");
1634 | Copy(mem, out5.begin(), out5.size(), l);
1635 | mem += out5.size();
1636 | memSize -= out5.size();
1637 |
1638 | for (u32 i = 0; i < numPages; ++i) {
1639 | u32 m = i / TrackingUnitSize;
1640 | u32 b = i % TrackingUnitSize;
1641 |
1642 | bool set = mask[m] & (1 << b);
1643 | if (set) {
1644 | Copy(mem, isSet.begin(), isSet.size(), l);
1645 | mem += isSet.size();
1646 | memSize -= isSet.size();
1647 | }
1648 | else {
1649 | Copy(mem, notSet.begin(), notSet.size(), l);
1650 | mem += notSet.size();
1651 | memSize -= notSet.size();
1652 | }
1653 |
1654 | if ((i + 1) % 80 == 0) {
1655 | Copy(mem, newline.begin(), newline.size(), l);
1656 | mem += newline.size();
1657 | memSize -= newline.size();
1658 | }
1659 |
1660 | if (memSize < allocator->pageSize / 4) { // Drain occasiaonally
1661 | // Dump what's been written so far
1662 | mem = i_to_a_buff + i_to_a_buff_size;
1663 | callback(mem, (allocator->pageSize - i_to_a_buff_size) - memSize, userdata);
1664 |
1665 | // Reset memory buffer
1666 | Set(debugPage, 0, debugSize, l);
1667 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1668 | mem = i_to_a_buff + i_to_a_buff_size;
1669 | memSize = allocator->pageSize - i_to_a_buff_size;
1670 | }
1671 | }
1672 |
1673 | if (memSize != allocator->pageSize - i_to_a_buff_size) { // Drain if needed
1674 | // Dump what's been written so far
1675 | mem = i_to_a_buff + i_to_a_buff_size;
1676 | callback(mem, (allocator->pageSize - i_to_a_buff_size) - memSize, userdata);
1677 |
1678 | // Reset memory buffer
1679 | Set(debugPage, 0, debugSize, l);
1680 | i_to_a_buff = debugPage; // Used to convert numbers to strings
1681 | mem = i_to_a_buff + i_to_a_buff_size;
1682 | memSize = allocator->pageSize - i_to_a_buff_size;
1683 | }
1684 | }
1685 |
1686 | allocator->ReleaseDbgPage();
1687 | }
1688 |
1689 | void Memory::Debug::PageContent(Allocator* allocator, u32 page, WriteCallback callback, void* userdata) {
1690 | u8* mem = (u8*)allocator + page * allocator->pageSize;
1691 | u32 chunk = allocator->pageSize / 4; // Does not need to be a multiple of 4
1692 |
1693 | callback(mem, chunk, userdata);
1694 | mem += chunk;
1695 | callback(mem, chunk, userdata);
1696 | mem += chunk;
1697 | callback(mem, chunk, userdata);
1698 | mem += chunk;
1699 | callback(mem, allocator->pageSize - (allocator->pageSize / 4) * 3, userdata);
1700 | }
1701 |
1702 | #pragma warning(default:6011)
1703 | #pragma warning(default:28182)
1704 |
--------------------------------------------------------------------------------
/mem.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | /*
4 | Game Memory Allocator:
5 |
6 | Game Allocator is a generic memory manager intended for games, embedded devices, and web assembly.
7 | Given a large array of memory, the library provides functions to allocate and release that memory similar to malloc / free.
8 | The memory will be broken up into pages (4 KiB by default) and tracked at the page granularity.
9 | A sub-allocator provided which breaks the page up into a fast free list for smaller allocation.
10 |
11 | Usage:
12 |
13 | Let's assume you have a void* to some large area of memory and know how many bytes large that area is.
14 |
15 | Call the Memory::Initialize function to create an allocator. The first two arguments are the memory and size,
16 | the third argument is the page size with which the memory should be managed. The default page size is 4 KiB
17 |
18 | The memory being passed it should be 8 byte aligned, and the size of the memory should be a multiple of pageSize.
19 | The Memory::AlignAndTrim helper function will align a region of memory so it's ready for initialize.
20 | It modifies the memory and size variables that are passed to the function. AlignAndTrim returns the number of bytes lost.
21 |
22 | Allocate memory with the allocator objects Allocate function, and release memory with the its Release function.
23 | Allocate takes an optional alignment, which by default is 0. Only unaligned allocations utilize a fast free list allocator.
24 | Both functions also take a const char* which is optionally the location of the allocation.
25 |
26 | New and delete functions are also provided, these will invoke the constructor / destructor of the class they are
27 | being invoked on. New will forward up to three arguments and takes an optional location pointer.
28 |
29 | When you are finished with an allocator, clean it up by calling Memory::Shutdown. The shutdown function
30 | will assert in debug builds if there are any memory leaks.
31 |
32 | Example:
33 |
34 | void run() {
35 | // Declare how much memory to use
36 | // Adding (DefaultPageSize - 1) to size ensures that there is enough space for padding
37 | unsigned int size = MB(512) + (DefaultPageSize - 1);
38 |
39 | // Allocate memory from the operating system
40 | LPVOID memory = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); // Windows
41 |
42 | // Initialize the global allocator
43 | u32 lost = Memory::AlignAndTrim(&m, &size, Memory::DefaultPageSize);
44 | Memory::Allocator* allocator = Memory::Initialize(m, size, Memory::DefaultPageSize);
45 |
46 | // Allocate & release memory
47 | int* number = allocator->Allocate(sizeof(int)); // Only the number of bytes is required
48 | allocator->Release(number); // Only the void* is required
49 |
50 | // New and delete can also be used:
51 | SomeClass* obj = allocator->New("arguments");
52 | allocator->Delete(obj);
53 |
54 | // Cleanup the global allocator
55 | Memory::Shutdown(Memory::GlobalAllocator);
56 | Memory::GlobalAllocator = 0;
57 |
58 | // Release memory back to operating system
59 | VirtualFree(memory, 0, MEM_RELEASE);
60 | }
61 |
62 | Compile flags:
63 |
64 | MEM_FIRST_FIT -> This affects how fast memory is allocated. If it's set then every allocation
65 | searches for the first available page from the start of the memory. If it's not
66 | set, then an allocation header is maintained. It's advanced with each allocation,
67 | and new allocations search for memory from the allocation header.
68 | MEM_CLEAR_ON_ALLOC -> When set, memory will be cleared to 0 before being returned from Memory::Allocate
69 | If both clear and debug on alloc are set, clear will take precedence
70 | MEM_DEBUG_ON_ALLOC -> If set, full page allocations will fill the padding of the page with "-MEMORY"
71 | MEM_USE_SUBALLOCATORS -> If set, small allocations will be made using a free list allocaotr. There are free list
72 | allocators for 64, 128, 256, 512, 1024 and 2049 byte allocations. Only allocations that
73 | don't specify an alignment can use the fast free list allocator. The sub-allocator will
74 | provide better page utilization, for example a 4096 KiB page can hold 32 128 bit allocations.
75 | MEM_TRACK_LOCATION -> If set, a const char* will be added to Memory::Allocation which tracks the __LINE__ and __FILE__
76 | of each allocation. Setting this bit will add 8 bytes to the Memory::Allocation struct.
77 |
78 | Debugging:
79 |
80 | There are a few debug functions exposed in the Memory::Debug namespace. When an allocator is initialized, the page
81 | immediateley before the first allocatable page is reserved as a debug page. You can fill this page with whatever
82 | data is needed. Any function in Memory::Debug might overwrite the contents of the debug page. You can get a pointer
83 | to the debug page of an allocator with the RequestDbgPage function. Be sure to release the page after you are dont
84 | using it by calling ReleaseDbgPage();
85 |
86 | The Memory::Debug::MemInfo function can be used to retrieve information about the state of the memory allocator.
87 | It provides meta data like how many pages are in use, a list of active allocations, and a visual bitmap chart to
88 | make debugging the memory bitmask easy. You can write this information to a file like so:
89 |
90 | DeleteFile(L"MemInfo.txt");
91 | HANDLE hFile = CreateFile(L"MemInfo.txt", GENERIC_WRITE, FILE_SHARE_READ, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL);
92 | Memory::Debug::MemInfo(Memory::GlobalAllocator, [](const u8* mem, u32 size, void* fileHandle) {
93 | HANDLE file = *(HANDLE*)fileHandle;
94 | DWORD bytesWritten;
95 | WriteFile(file, mem, size, &bytesWritten, nullptr);
96 | }, &hFile);
97 | CloseHandle(hFile);
98 |
99 | There is a similar Memory::Debug::PageContent, which given a page number will dump the binary conent of a page.
100 |
101 | Resources:
102 |
103 | Compile without CRT
104 | https://yal.cc/cpp-a-very-tiny-dll/
105 |
106 | Ready Set Allocate:
107 | https://web.archive.org/web/20120419125628/http://www.altdevblogaday.com/2011/04/11/ready-set-allocate-part-1/
108 | https://web.archive.org/web/20120419125404/http://www.altdevblogaday.com/2011/04/26/ready-set-allocate-part-2/
109 | https://web.archive.org/web/20120419010208/http://www.altdevblogaday.com/2011/05/15/ready-set-allocate-part-3/
110 | https://web.archive.org/web/20120418212016/http://www.altdevblogaday.com/2011/05/26/ready-set-allocate-part-4/
111 | https://web.archive.org/web/20120413201435/http://www.altdevblogaday.com/2011/06/08/ready-set-allocate-part-5/
112 | https://web.archive.org/web/20120321205231/http://www.altdevblogaday.com/2011/06/30/ready-set-allocate-part-6/
113 |
114 | How to combine __LINE__ and __FILE__ into a c string:
115 | https://stackoverflow.com/questions/2653214/stringification-of-a-macro-value
116 |
117 | C++ overload new, new[], delete, and delete[]
118 | https://cplusplus.com/reference/new/operator%20new/
119 | https://cplusplus.com/reference/new/operator%20delete/
120 | https://cplusplus.com/reference/new/operator%20new[]/
121 | https://cplusplus.com/reference/new/operator%20delete[]/
122 |
123 | Memory alignment discussion:
124 | https://stackoverflow.com/questions/227897/how-to-allocate-aligned-memory-only-using-the-standard-library
125 |
126 | Scott Schurr's const string
127 | https://www.youtube.com/watch?v=BUnNA2dLRsU
128 | */
129 |
130 | #pragma warning(disable:28251)
131 |
132 | // When allocating new memory, if MEM_FIRST_FIT is defined and set to 1 every allocation will scan
133 | // the available memory from the first bit to the last bit looking for enough space to satisfy the
134 | // allocation. If MEM_FIRST_FIT is set to 0, then the memory is searched iterativley. Ie, when we
135 | // allocate the position in memory after the allocation is saved, and the next allocation starts
136 | // searching from there.
137 | #define MEM_FIRST_FIT 1
138 |
139 | // If set to 1, the allocator will clear or fill memory when allocating it
140 | #define MEM_CLEAR_ON_ALLOC 0 // Clears memory on each allocation
141 | #define MEM_DEBUG_ON_ALLOC 0 // Fills memory with Memory- on each allocation
142 |
143 | // Disables sub-allocators if defined
144 | #define MEM_USE_SUBALLOCATORS 1
145 |
146 | // If true, adds char* to each allocation
147 | #define MEM_TRACK_LOCATION 1
148 |
149 | #ifndef ATLAS_U8
150 | #define ATLAS_U8
151 | typedef unsigned char u8;
152 | static_assert (sizeof(u8) == 1, "u8 should be defined as a 1 byte type");
153 | #endif
154 |
155 | #ifndef ATLAS_U32
156 | #define ATLAS_U32
157 | typedef unsigned int u32;
158 | static_assert (sizeof(u32) == 4, "u32 should be defined as a 4 byte type");
159 | #endif
160 |
161 | #ifndef ATLAS_I32
162 | #define ATLAS_I32
163 | typedef int i32;
164 | static_assert (sizeof(i32) == 4, "i32 should be defined as a 4 byte type");
165 | #endif
166 |
167 | #ifndef ATLAS_U64
168 | #define ATLAS_U64
169 | typedef unsigned long long u64;
170 | static_assert (sizeof(u64) == 8, "u64 should be defined as an 8 byte type");
171 | #endif
172 |
173 | #ifndef ATLAS_I64
174 | #define ATLAS_I64
175 | typedef long long i64;
176 | static_assert (sizeof(i64) == 8, "i64 should be defined as an 8 byte type");
177 | #endif
178 |
179 |
180 | #if _WIN64
181 | #ifdef ATLAS_32
182 | #error Can't define both 32 and 64 bit system
183 | #endif
184 | #define ATLAS_64 1
185 | namespace Memory {
186 | typedef u64 ptr_type;
187 | typedef i64 diff_type;
188 | static_assert (sizeof(ptr_type) == 8, "ptr_type should be defined as an 8 byte type on a 64 bit system");
189 | static_assert (sizeof(diff_type) == 8, "diff_type should be defined as an 8 byte type on a 64 bit system");
190 | }
191 | #elif _WIN32
192 | #ifdef ATLAS_64
193 | #error Can't define both 32 and 64 bit system
194 | #endif
195 | #define ATLAS_32 1
196 |
197 | namespace Memory {
198 | typedef u32 ptr_type;
199 | typedef i32 diff_type;
200 | static_assert (sizeof(ptr_type) == 4, "ptr_type should be defined as a 4 byte type on a 32 bit system");
201 | static_assert (sizeof(diff_type) == 4, "diff_type should be defined as a 4 byte type on a 32 bit system");
202 | }
203 | #elif _WASM32
204 | #ifdef ATLAS_64
205 | #error Can't define both 32 and 64 bit system
206 | #endif
207 | #define ATLAS_32 1
208 |
209 | namespace Memory {
210 | typedef unsigned long ptr_type;
211 | typedef long diff_type;
212 | static_assert (sizeof(ptr_type) == 4, "ptr_type should be defined as a 4 byte type on a 32 bit system");
213 | static_assert (sizeof(diff_type) == 4, "diff_type should be defined as a 4 byte type on a 32 bit system");
214 | }
215 | #else
216 | #error Unknown platform
217 | #endif
218 |
219 | inline void* operator new (Memory::ptr_type n, void* ptr) {
220 | return ptr;
221 | };
222 |
223 | namespace Memory {
224 | // The callback allocator can be used to register a callback with each allocator. It's the same callback signature for both Allocate and Release
225 | typedef void (*Callback)(struct Allocator* allocator, void* allocationHeaderAddress, u32 bytesRequested, u32 bytesServed, u32 firstPage, u32 numPages);
226 |
227 | // Allocation struct uses a 32 bit offset instead of a pointer. This makes the maximum amount of memory GameAllocator can manage be 4 GiB
228 | typedef u32 Offset32;
229 |
230 | struct Allocation {
231 | #if MEM_TRACK_LOCATION
232 | const char* location;
233 | #if ATLAS_32
234 | u32 padding_32bit; // Keep sizeof(Allocation) consistent between x64 & x86
235 | #endif
236 | #endif
237 | Offset32 prevOffset; // Offsets are the number of bytes from allocator
238 | Offset32 nextOffset;
239 | u32 size; // Unpadded allocation size, ie what you pass to malloc
240 | u32 alignment;
241 | };
242 |
243 | // Unlike Allocation, Allocator uses pointers. There is only ever one allocator
244 | // and saving a few bytes here isn't that important. Similarly, the free list
245 | // pointers exist even if MEM_USE_SUBALLOCATORS is off. This is done to keep the
246 | // size of this struct consistent for debugging.
247 | struct Allocator {
248 | Callback allocateCallback; // Callback for malloc / new
249 | Callback releaseCallback; // Callback for free / delete
250 |
251 | Allocation* free_64; // The max size for each of these lists is whatever the number after the
252 | Allocation* free_128; // underscore is, minus the size of the Allocation structure, which is
253 | Allocation* free_256; // either 16 or 24 bytes (depenging if the location is tracked or not).
254 | Allocation* free_512; // For exampe, the largest allocation the 64 byte free list can hold is 50 bytes
255 | Allocation* free_1024; // There isn't much significance to these numbers, tune them to better match your structs
256 | Allocation* free_2048; // Only unaligned allocations (alignment of 0) can utilize the sub allocators.
257 |
258 | Allocation* active; // Memory that has been allocated, but not released
259 |
260 | u32 size; // In bytes, how much total memory is the allocator managing
261 | u32 requested; // How many bytes where requested (raw)
262 | u32 pageSize; // Default is 4096, but each allocator can have a unique size
263 | u32 scanBit; // Only used if MEM_FIRST_FIT is off
264 |
265 | u32 numPagesUsed;
266 | u32 peekPagesUsed; // Use this to monitor how much memory your application actually needs
267 | u32 mask;
268 | u32 mask_padding;
269 |
270 | #if ATLAS_32
271 | u32 padding_32bit[9]; // Padding to make sure the struct stays the same size in x64 / x86 builds
272 | #endif
273 |
274 | void* Allocate(u32 bytes, u32 alignemnt = 0, const char* location = 0);
275 | void Release(void* t, const char* location = 0);
276 |
277 | u8* RequestDbgPage();
278 | void ReleaseDbgPage();
279 |
280 | template
281 | inline T* New(A1&& a1, const char* location = 0) {
282 | const u32 bytes = sizeof(T);
283 | const u32 alignment = 0;
284 | void* memory = this->Allocate(bytes, alignment, location);
285 | T* object = ::new (memory) T(a1);
286 | return object;
287 | }
288 |
289 | template
290 | inline T* New(A1&& a1, A2&& a2, const char* location = 0) {
291 | const u32 bytes = sizeof(T);
292 | const u32 alignment = 0;
293 | void* memory = this->Allocate(bytes, alignment, location);
294 | T* object = ::new (memory) T(a1, a2);
295 | return object;
296 | }
297 |
298 | template
299 | inline T* New(A1&& a1, A2&& a2, A3&& a3, const char* location = 0) {
300 | const u32 bytes = sizeof(T);
301 | const u32 alignment = 0;
302 | void* memory = this->Allocate(bytes, alignment, location);
303 | T* object = ::new (memory) T(a1, a2, a3);
304 | return object;
305 | }
306 |
307 | template
308 | inline T* New(const char* location = 0) {
309 | const u32 bytes = sizeof(T);
310 | const u32 alignment = 0;
311 | void* memory = this->Allocate(bytes, alignment, location);
312 | T* object = ::new (memory) T();
313 | return object;
314 | }
315 |
316 | template
317 | inline void Delete(T* ptr, const char* location = 0) {
318 | T* obj = (T*)ptr;
319 | obj->T::~T();
320 | this->Release(ptr, location);
321 | }
322 | };
323 |
324 | // 4 KiB is a good default page size. Most of your small allocations will go trough the sub-allocators
325 | // so this page size is mostly important for larger allocations. Feel free to change to something more
326 | // appropriate if needed.
327 | const u32 DefaultPageSize = 4096;
328 |
329 | // Don't change tracking unit size. The bitmask that tracks which pages are free is stored as an
330 | // array of 32 bit integers. Changing this number would require changing how mem.cpp is implemented
331 | const u32 TrackingUnitSize = 32;
332 | // Don't change allocator alignment. Every allocator should start at an 8 byte aligned memory address.
333 | // Internally the allocator uses offsets to access some data, the start alignment is important.
334 | const u32 AllocatorAlignment = 8; // Should stay 8, even on 32 bit platforms
335 |
336 | // Call AlignAndTrim before Initialize to make sure that memory is aligned to alignment
337 | // and to make sure that the size of the memory (after it's been aligned) is a multiple of pageSize
338 | // both arguments are modified, the return value is how many bytes where removed
339 | u32 AlignAndTrim(void** memory, u32* size, u32 alignment = AllocatorAlignment, u32 pageSize = DefaultPageSize);
340 |
341 | // The initialize function will place the Allocator struct at the start of the provided memory.
342 | // The allocaotr struct is followed by a bitmask, in which each bit tracks if a page is in use or not.
343 | // The bitmask is a bit u32 array. If the end of the bitmask is in the middle of a page, the rest of that
344 | // page is lost as padding. The next page is a debug page that you can use for anything, only functions in
345 | // the Memory::Debug namespace mess with the debug page, anything in Memory:: doesn't touch it.
346 | // The allocator that's returned should be used to set the global allocator.
347 | Allocator* Initialize(void* memory, u32 bytes, u32 pageSize = DefaultPageSize);
348 |
349 | // After you are finished with an allocator, shut it down. The shutdown function will assert in a debug build
350 | // if you have any memory that was allocated but not released. This function doesn't do much, it exists
351 | // to provide a bunch of asserts that ensure that an application is shutting down cleanly.
352 | void Shutdown(Allocator* allocator);
353 |
354 | // Memset and Memcpy utility functions. One big difference is that this set function only takes a u8.
355 | // both of these functions work on larger data types, then work their way down. IE: they try to set or
356 | // copy the memory using u64's, then u32's, then u16's, and finally u8's
357 | void* Set(void* memory, u8 value, u32 size, const char* location = 0);
358 | void Copy(void* dest, const void* source, u32 size, const char* location = 0);
359 |
360 | // The debug namespace let's you access information about the current state of the allocator,
361 | // gives you access to the contents of a page for debugging, and contains a debug page that
362 | // you can use for whatever. Be careful tough, MemInfo and PageContent might write to the
363 | // debug page, invalidating what was previously in there.
364 | namespace Debug {
365 | typedef void (*WriteCallback)(const u8* mem, u32 size, void* userdata);
366 |
367 | void MemInfo(Allocator* allocator, WriteCallback callback, void* userdata = 0);
368 | void PageContent(Allocator* allocator, u32 page, WriteCallback callback, void* userdata = 0);
369 | }
370 | }
371 |
372 |
373 |
374 | // Some compile time asserts to make sure that all our memory is sized correctly and aligns well
375 | static_assert (sizeof(Memory::Allocator) % 8 == 0, "Memory::Allocator size needs to be 8 byte alignable for the allocation mask to start on u64 alignment without any padding");
376 | static_assert (Memory::TrackingUnitSize% Memory::AllocatorAlignment == 0, "Memory::MaskTrackerSize must be a multiple of 8 (bits / byte)");
377 | static_assert (sizeof(Memory::Allocator) == 96 + 8, "Memory::Allocator is not the expected size");
378 | #if MEM_TRACK_LOCATION
379 | static_assert (sizeof(Memory::Allocation) == 24, "Memory::Allocation should be 24 bytes (192 bits)");
380 | #else
381 | static_assert (sizeof(Memory::Allocation) == 16, "Memory::Allocation should be 16 bytes (128 bits)");
382 | #endif
383 |
384 | // Use the __LOCATION__ macro to pack both __LINE__ and __FILE__ into a c string
385 | #define atlas_xstr(a) atlas_str(a)
386 | #define atlas_str(a) #a
387 | #define __LOCATION__ "On line: " atlas_xstr(__LINE__) ", in file: " __FILE__
388 |
389 | // Make sure hte platform is set
390 | #ifndef ATLAS_32
391 | #ifndef ATLAS_64
392 | #error "Unknown platform type. Is this 32 or 64 bit?"
393 | #endif
394 | #endif
395 |
396 | #pragma warning(default:28251)
--------------------------------------------------------------------------------
/mem.js:
--------------------------------------------------------------------------------
1 | class GameAllocator {
2 |
3 | constructor(totalMemoryBytes, heapSize) {
4 | this.WebAssemblyMemory = null;
5 | this.memory = null // Alias for WebAssemblyMemory
6 | this.AllocatorPtr = null;
7 | this.WasmExports = null;
8 | this.RequestedBytes = totalMemoryBytes;
9 | this.HeapSizeBytes = 0;
10 | this.RequestedHeapSize = heapSize;
11 | this.GlobalDumpState = "";
12 |
13 | let self = this;
14 |
15 | // WASM is 64 KiB / page (our allocator is 4 KiB);
16 | let wasmPageSize = 64 * 1024; // 64 KiB
17 | let wasmNumPages = Math.ceil(totalMemoryBytes / wasmPageSize);
18 | self.WebAssemblyMemory = new WebAssembly.Memory( {
19 | initial: wasmNumPages,
20 | maximum: wasmNumPages
21 | });
22 | self.memory = this.WebAssemblyMemory;
23 | }
24 |
25 | logError(str) {
26 | console.logError(str);
27 | }
28 |
29 | logInfo(str) {
30 | console.log(str);
31 | }
32 |
33 | InjectWebAssemblyImportObject(importObject) {
34 | if (!importObject.hasOwnProperty("env")) {
35 | importObject.env = {};
36 | }
37 | importObject.env.memory = this.WebAssemblyMemory;
38 | let self = this;
39 |
40 | importObject.env["GameAllocator_jsBuildMemState"] = function(ptr, len) {
41 | const array = new Uint8Array(self.WebAssemblyMemory.buffer, ptr, len);
42 | const decoder = new TextDecoder()
43 | const string = decoder.decode(array);
44 | self.GlobalDumpState += string;
45 | };
46 | }
47 |
48 | InitializeWebAssembly(wasmExports) {
49 | if (!wasmExports) {
50 | this.logError("invalid exports object");
51 | }
52 | this.WasmExports = wasmExports;
53 | this.AllocatorPtr = this.WasmExports.GameAllocator_wasmInitialize(this.RequestedHeapSize);
54 | this.HeapSizeBytes = this.WasmExports.GameAllocator_wasmHeapSize(this.RequestedBytes);
55 | this.logInfo("Requested heap: " + this.RequestedHeapSize + ", actual heap: " + this.HeapSizeBytes);
56 | if (this.HeapSizeBytes < this.RequestedHeapSize) {
57 | console.logError("Actual heap size is less than requested heap size");
58 | }
59 | }
60 |
61 | ShutdownWebAssembly() {
62 | this.WasmExports.GameAllocator_wasmShutdown(this.AllocatorPtr);
63 | this.AllocatorPtr = 0;
64 | }
65 |
66 | Allocte(bytes, alignment) {
67 | if (!alignment) {
68 | alignment = 0;
69 | }
70 | if (!bytes || bytes <= 0) {
71 | this.logError("Can't allocate <=0 bytes!");
72 | bytes = 1;
73 | }
74 |
75 | return this.WasmExports.GameAllocator_wasmAllocate(this.AllocatorPtr, bytes, alignment);
76 | }
77 |
78 | Release(ptr) {
79 | this.WasmExports.GameAllocator_wasmRelease(this.AllocatorPtr, ptr);
80 | }
81 |
82 | Set(ptr, value, size) {
83 | this.WasmExports.GameAllocator_wasmSet(ptr, value, size);
84 | }
85 |
86 | Copy(dest_ptr, src_ptr, size) {
87 | this.WasmExports.GameAllocator_wasmCopy(dest_ptr, src_ptr, size);
88 | }
89 |
90 | GetNumPages() {
91 | return this.WasmExports.GameAllocator_wasmGetNumPages(this.AllocatorPtr);
92 | }
93 |
94 | GetNumPagesInUse() {
95 | return this.WasmExports.GameAllocator_wasmGetNumPagesInUse(this.AllocatorPtr);
96 | }
97 |
98 | GetPeekPagesUsed() {
99 | return this.WasmExports.GameAllocator_wasmGetPeekPagesUsed(this.AllocatorPtr);
100 | }
101 |
102 | GetRequestedBytes() {
103 | return this.WasmExports.GameAllocator_wasmGetRequestedBytes(this.AllocatorPtr);
104 | }
105 |
106 | GetServedBytes() {
107 | return this.WasmExports.GameAllocator_wasmGetServedBytes(this.AllocatorPtr);
108 | }
109 |
110 | IsPageInUse(page) {
111 | if (page < 0) {
112 | this.logError("invalid page");
113 | page = 0;
114 | }
115 | let result = this.WasmExports.GameAllocator_wasmIsPageInUse(this.AllocatorPtr, page);
116 | return result != 0;
117 | }
118 |
119 | Size() {
120 | return this.WasmExports.GameAllocator_wasmGetSize(this.AllocatorPtr);
121 | }
122 |
123 | GetNumOverheadPages() {
124 | return this.WasmExports.GameAllocator_wasmGetNumOverheadPages(this.AllocatorPtr);
125 | }
126 |
127 | StrLen(str_ptr) {
128 | return this.WasmExports.GameAllocator_wasmStrLen(str_ptr);
129 | }
130 |
131 | GetAllocationDebugInfo(alloc_ptr) {
132 | return this.WasmExports.GameAllocator_wasmGetAllocationDebugName(this.AllocatorPtr, alloc_ptr);
133 | }
134 |
135 | DebugDumpState() {
136 | this.GlobalDumpState = "";
137 | this.WasmExports.GameAllocator_wasmDumpState(this.AllocatorPtr);
138 | return this.GlobalDumpState;
139 | }
140 | }
--------------------------------------------------------------------------------
/notes.md:
--------------------------------------------------------------------------------
1 | # Notes
2 |
3 | I didn't put enough importance on aligned allocations. If the goal of the memory manager is to run in a browser, every allocation (even the sub-allocators) should be aligned on a 4 byte boundary, so it's easy to access / set as a uint32 array.
4 |
5 | The easiest way to accomodate this is probably to ensure that all allocation headers land on an 4 byte alignment boundary (unless explicitly requested?), and the the header struct is a multiple of 4 bytes as well.
6 |
7 | The memory library should expose free-standing functions as needed
8 |
9 | ```
10 | importObject.env.memcpy = function(ptr_dest, ptr_src, int_len) {
11 | let dst_buffer = new Uint8Array(allocator.memory.buffer, ptr_dest, int_len);
12 | let src_buffer = new Uint8Array(allocator.memory.buffer, ptr_src, int_len);
13 | for (let i = 0; i < int_len; ++i) {
14 | dst_buffer[i] = src_buffer[i];
15 | }
16 | return dest;
17 | }
18 | ```
--------------------------------------------------------------------------------