├── .gitignore ├── GarbageCollection.cpp ├── GarbageCollection.h ├── GarbageCollection.sln ├── GarbageCollection.vcxproj ├── GarbageCollection.vcxproj.filters ├── LICENSE ├── README.md ├── makefile └── test.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | #custom 2 | 3 | *.exe 4 | *.patch 5 | 6 | # Created by https://www.gitignore.io/api/linux,windows,notepadpp,visualstudio 7 | # Edit at https://www.gitignore.io/?templates=linux,windows,notepadpp,visualstudio 8 | 9 | ### Linux ### 10 | *~ 11 | 12 | # temporary files which can be created if a process still has a handle open of a deleted file 13 | .fuse_hidden* 14 | 15 | # KDE directory preferences 16 | .directory 17 | 18 | # Linux trash folder which might appear on any partition or disk 19 | .Trash-* 20 | 21 | # .nfs files are created when an open file is removed but is still being accessed 22 | .nfs* 23 | 24 | ### NotepadPP ### 25 | # Notepad++ backups # 26 | *.bak 27 | 28 | ### Windows ### 29 | # Windows thumbnail cache files 30 | Thumbs.db 31 | ehthumbs.db 32 | ehthumbs_vista.db 33 | 34 | # Dump file 35 | *.stackdump 36 | 37 | # Folder config file 38 | [Dd]esktop.ini 39 | 40 | # Recycle Bin used on file shares 41 | $RECYCLE.BIN/ 42 | 43 | # Windows Installer files 44 | *.cab 45 | *.msi 46 | *.msix 47 | *.msm 48 | *.msp 49 | 50 | # Windows shortcuts 51 | *.lnk 52 | 53 | ### VisualStudio ### 54 | ## Ignore Visual Studio temporary files, build results, and 55 | ## files generated by popular Visual Studio add-ons. 56 | ## 57 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore 58 | 59 | # User-specific files 60 | *.rsuser 61 | *.suo 62 | *.user 63 | *.userosscache 64 | *.sln.docstates 65 | 66 | # User-specific files (MonoDevelop/Xamarin Studio) 67 | *.userprefs 68 | 69 | # Build results 70 | [Dd]ebug/ 71 | [Dd]ebugPublic/ 72 | [Rr]elease/ 73 | [Rr]eleases/ 74 | x64/ 75 | x86/ 76 | bld/ 77 | [Bb]in/ 78 | [Oo]bj/ 79 | [Ll]og/ 80 | 81 | # Visual Studio 2015/2017 cache/options directory 82 | .vs/ 83 | # Uncomment if you have tasks that create the project's static files in wwwroot 84 | #wwwroot/ 85 | 86 | # Visual Studio 2017 auto generated files 87 | Generated\ Files/ 88 | 89 | # MSTest test Results 90 | [Tt]est[Rr]esult*/ 91 | [Bb]uild[Ll]og.* 92 | 93 | # NUNIT 94 | *.VisualState.xml 95 | TestResult.xml 96 | 97 | # Build Results of an ATL Project 98 | [Dd]ebugPS/ 99 | [Rr]eleasePS/ 100 | dlldata.c 101 | 102 | # Benchmark Results 103 | BenchmarkDotNet.Artifacts/ 104 | 105 | # .NET Core 106 | project.lock.json 107 | project.fragment.lock.json 108 | artifacts/ 109 | 110 | # StyleCop 111 | StyleCopReport.xml 112 | 113 | # Files built by Visual Studio 114 | *_i.c 115 | *_p.c 116 | *_h.h 117 | *.ilk 118 | *.meta 119 | *.obj 120 | *.iobj 121 | *.pch 122 | *.pdb 123 | *.ipdb 124 | *.pgc 125 | *.pgd 126 | *.rsp 127 | *.sbr 128 | *.tlb 129 | *.tli 130 | *.tlh 131 | *.tmp 132 | *.tmp_proj 133 | *_wpftmp.csproj 134 | *.log 135 | *.vspscc 136 | *.vssscc 137 | .builds 138 | *.pidb 139 | *.svclog 140 | *.scc 141 | 142 | # Chutzpah Test files 143 | _Chutzpah* 144 | 145 | # Visual C++ cache files 146 | ipch/ 147 | *.aps 148 | *.ncb 149 | *.opendb 150 | *.opensdf 151 | *.sdf 152 | *.cachefile 153 | *.VC.db 154 | *.VC.VC.opendb 155 | 156 | # Visual Studio profiler 157 | *.psess 158 | *.vsp 159 | *.vspx 160 | *.sap 161 | 162 | # Visual Studio Trace Files 163 | *.e2e 164 | 165 | # TFS 2012 Local Workspace 166 | $tf/ 167 | 168 | # Guidance Automation Toolkit 169 | *.gpState 170 | 171 | # ReSharper is a .NET coding add-in 172 | _ReSharper*/ 173 | *.[Rr]e[Ss]harper 174 | *.DotSettings.user 175 | 176 | # JustCode is a .NET coding add-in 177 | .JustCode 178 | 179 | # TeamCity is a build add-in 180 | _TeamCity* 181 | 182 | # DotCover is a Code Coverage Tool 183 | *.dotCover 184 | 185 | # AxoCover is a Code Coverage Tool 186 | .axoCover/* 187 | !.axoCover/settings.json 188 | 189 | # Visual Studio code coverage results 190 | *.coverage 191 | *.coveragexml 192 | 193 | # NCrunch 194 | _NCrunch_* 195 | .*crunch*.local.xml 196 | nCrunchTemp_* 197 | 198 | # MightyMoose 199 | *.mm.* 200 | AutoTest.Net/ 201 | 202 | # Web workbench (sass) 203 | .sass-cache/ 204 | 205 | # Installshield output folder 206 | [Ee]xpress/ 207 | 208 | # DocProject is a documentation generator add-in 209 | DocProject/buildhelp/ 210 | DocProject/Help/*.HxT 211 | DocProject/Help/*.HxC 212 | DocProject/Help/*.hhc 213 | DocProject/Help/*.hhk 214 | DocProject/Help/*.hhp 215 | DocProject/Help/Html2 216 | DocProject/Help/html 217 | 218 | # Click-Once directory 219 | publish/ 220 | 221 | # Publish Web Output 222 | *.[Pp]ublish.xml 223 | *.azurePubxml 224 | # Note: Comment the next line if you want to checkin your web deploy settings, 225 | # but database connection strings (with potential passwords) will be unencrypted 226 | *.pubxml 227 | *.publishproj 228 | 229 | # Microsoft Azure Web App publish settings. Comment the next line if you want to 230 | # checkin your Azure Web App publish settings, but sensitive information contained 231 | # in these scripts will be unencrypted 232 | PublishScripts/ 233 | 234 | # NuGet Packages 235 | *.nupkg 236 | # The packages folder can be ignored because of Package Restore 237 | **/[Pp]ackages/* 238 | # except build/, which is used as an MSBuild target. 239 | !**/[Pp]ackages/build/ 240 | # Uncomment if necessary however generally it will be regenerated when needed 241 | #!**/[Pp]ackages/repositories.config 242 | # NuGet v3's project.json files produces more ignorable files 243 | *.nuget.props 244 | *.nuget.targets 245 | 246 | # Microsoft Azure Build Output 247 | csx/ 248 | *.build.csdef 249 | 250 | # Microsoft Azure Emulator 251 | ecf/ 252 | rcf/ 253 | 254 | # Windows Store app package directories and files 255 | AppPackages/ 256 | BundleArtifacts/ 257 | Package.StoreAssociation.xml 258 | _pkginfo.txt 259 | *.appx 260 | 261 | # Visual Studio cache files 262 | # files ending in .cache can be ignored 263 | *.[Cc]ache 264 | # but keep track of directories ending in .cache 265 | !*.[Cc]ache/ 266 | 267 | # Others 268 | ClientBin/ 269 | ~$* 270 | *.dbmdl 271 | *.dbproj.schemaview 272 | *.jfm 273 | *.pfx 274 | *.publishsettings 275 | orleans.codegen.cs 276 | 277 | # Including strong name files can present a security risk 278 | # (https://github.com/github/gitignore/pull/2483#issue-259490424) 279 | #*.snk 280 | 281 | # Since there are multiple workflows, uncomment next line to ignore bower_components 282 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) 283 | #bower_components/ 284 | 285 | # RIA/Silverlight projects 286 | Generated_Code/ 287 | 288 | # Backup & report files from converting an old project file 289 | # to a newer Visual Studio version. Backup files are not needed, 290 | # because we have git ;-) 291 | _UpgradeReport_Files/ 292 | Backup*/ 293 | UpgradeLog*.XML 294 | UpgradeLog*.htm 295 | ServiceFabricBackup/ 296 | *.rptproj.bak 297 | 298 | # SQL Server files 299 | *.mdf 300 | *.ldf 301 | *.ndf 302 | 303 | # Business Intelligence projects 304 | *.rdl.data 305 | *.bim.layout 306 | *.bim_*.settings 307 | *.rptproj.rsuser 308 | 309 | # Microsoft Fakes 310 | FakesAssemblies/ 311 | 312 | # GhostDoc plugin setting file 313 | *.GhostDoc.xml 314 | 315 | # Node.js Tools for Visual Studio 316 | .ntvs_analysis.dat 317 | node_modules/ 318 | 319 | # Visual Studio 6 build log 320 | *.plg 321 | 322 | # Visual Studio 6 workspace options file 323 | *.opt 324 | 325 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.) 326 | *.vbw 327 | 328 | # Visual Studio LightSwitch build output 329 | **/*.HTMLClient/GeneratedArtifacts 330 | **/*.DesktopClient/GeneratedArtifacts 331 | **/*.DesktopClient/ModelManifest.xml 332 | **/*.Server/GeneratedArtifacts 333 | **/*.Server/ModelManifest.xml 334 | _Pvt_Extensions 335 | 336 | # Paket dependency manager 337 | .paket/paket.exe 338 | paket-files/ 339 | 340 | # FAKE - F# Make 341 | .fake/ 342 | 343 | # JetBrains Rider 344 | .idea/ 345 | *.sln.iml 346 | 347 | # CodeRush personal settings 348 | .cr/personal 349 | 350 | # Python Tools for Visual Studio (PTVS) 351 | __pycache__/ 352 | *.pyc 353 | 354 | # Cake - Uncomment if you are using it 355 | # tools/** 356 | # !tools/packages.config 357 | 358 | # Tabs Studio 359 | *.tss 360 | 361 | # Telerik's JustMock configuration file 362 | *.jmconfig 363 | 364 | # BizTalk build output 365 | *.btp.cs 366 | *.btm.cs 367 | *.odx.cs 368 | *.xsd.cs 369 | 370 | # OpenCover UI analysis results 371 | OpenCover/ 372 | 373 | # Azure Stream Analytics local run output 374 | ASALocalRun/ 375 | 376 | # MSBuild Binary and Structured Log 377 | *.binlog 378 | 379 | # NVidia Nsight GPU debugger configuration file 380 | *.nvuser 381 | 382 | # MFractors (Xamarin productivity tool) working folder 383 | .mfractor/ 384 | 385 | # Local History for Visual Studio 386 | .localhistory/ 387 | 388 | # End of https://www.gitignore.io/api/linux,windows,notepadpp,visualstudio 389 | -------------------------------------------------------------------------------- /GarbageCollection.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include "GarbageCollection.h" 12 | 13 | // ------------------------------------------------------------- // 14 | 15 | // -- dev build settings - you probably want all of these off -- // 16 | 17 | // ------------------------------------------------------------- // 18 | 19 | // iff nonzero, prints log messages for disjunction handle activity 20 | #define DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 0 21 | 22 | // iff nonzero, does extra und safety checks for atomic disjuction handle internals 23 | #define DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_DATA_UND_SAFETY 1 24 | 25 | // if nonzero, displays a message on cerr that an object was added to gc database (+ its address) 26 | #define DRAGAZO_GARBAGE_COLLECT_SHOW_CREATMSG 0 27 | 28 | // if nonzero, displays a message on cerr that an object was deleted (+ its address) 29 | #define DRAGAZO_GARBAGE_COLLECT_SHOW_DELMSG 0 30 | 31 | // if nonzero, displays info messages on cerr during GC::collect() 32 | #define DRAGAZO_GARBAGE_COLLECT_MSG 0 33 | 34 | // ------------- // 35 | 36 | // -- globals -- // 37 | 38 | // ------------- // 39 | 40 | std::atomic GC::_strategy(GC::strategies::timed | GC::strategies::allocfail); 41 | 42 | std::atomic GC::_sleep_time(std::chrono::milliseconds(60000)); 43 | 44 | // ---------- // 45 | 46 | // -- misc -- // 47 | 48 | // ---------- // 49 | 50 | void *GC::aligned_malloc(std::size_t size, std::size_t align) 51 | { 52 | // calling with 0 yields nullptr 53 | if (size == 0) return nullptr; 54 | 55 | // allocate enough space for a void*, padding, and the array 56 | size += sizeof(void*) + align - 1; 57 | 58 | // grab that much space - if that fails, return null 59 | void *raw = std::malloc(size); 60 | if (!raw) return nullptr; 61 | 62 | // get the pointer to return (before alignment) 63 | void *ret = (char*)raw + sizeof(void*); 64 | 65 | // align the return pointer 66 | ret = (char*)ret + (-(std::intptr_t)ret & (align - 1)); 67 | 68 | // store the raw pointer before start of ret array 69 | *(void**)((char*)ret - sizeof(void*)) = raw; 70 | 71 | // return ret pointer 72 | return ret; 73 | } 74 | void GC::aligned_free(void *ptr) 75 | { 76 | // free the raw pointer (freeing nullptr does nothing) 77 | if (ptr) std::free(*(void**)((char*)ptr - sizeof(void*))); 78 | } 79 | 80 | GC::bind_new_obj_t GC::bind_new_obj; 81 | 82 | // ------------------------------------ // 83 | 84 | // -- object database implementation -- // 85 | 86 | // ------------------------------------ // 87 | 88 | GC::obj_list::obj_list() : first(nullptr), last(nullptr) {} 89 | 90 | void GC::obj_list::add(info *obj) 91 | { 92 | // put it at the end of the list 93 | obj->prev = last; 94 | obj->next = nullptr; 95 | 96 | // link the other way as well - account for edge cases 97 | if (last) last = last->next = obj; 98 | else first = last = obj; 99 | } 100 | void GC::obj_list::remove(info *obj) 101 | { 102 | // not using first == last for the first case because in the (illegal) case where 103 | // handle is not in the gc database this would unlink an unrelated object. 104 | if (obj == first && obj == last) first = last = nullptr; 105 | else if (obj == first) (first = first->next)->prev = nullptr; 106 | else if (obj == last) (last = last->prev)->next = nullptr; 107 | else 108 | { 109 | obj->prev->next = obj->next; 110 | obj->next->prev = obj->prev; 111 | } 112 | } 113 | 114 | void GC::obj_list::merge(obj_list &&other) 115 | { 116 | // don't do anything if other is us 117 | if (&other == this) return; 118 | 119 | // if we're empty 120 | if (!first) 121 | { 122 | // just take other's stuff 123 | first = other.first; 124 | last = other.last; 125 | } 126 | // otherwise if other isn't empty 127 | else if (other.first) 128 | { 129 | // do an actual splice 130 | last->next = other.first; 131 | other.first->prev = last; 132 | 133 | // repoint the last pointer 134 | last = other.last; 135 | } 136 | 137 | // empty other 138 | other.first = other.last = nullptr; 139 | } 140 | 141 | bool GC::obj_list::contains(info *obj) const noexcept 142 | { 143 | for (info *i = first; i; i = i->next) 144 | if (i == obj) return true; 145 | 146 | return false; 147 | } 148 | 149 | // --------------- // 150 | 151 | // -- gc module -- // 152 | 153 | // --------------- // 154 | 155 | GC::disjoint_module::~disjoint_module() 156 | { 157 | // getting here means there's no longer any owning handles for this disjoint module. 158 | // that means we're about to release control of all our stuff - the only thing that can leak is objects. 159 | // however we can't perform a collection because this thread no longer as living handle to this module. 160 | // so all we can do is enforce the fact that there should not be any memory leaks. 161 | 162 | // if we still have objects, bad news - the user probably violated a disjunction barrier 163 | if (!objs.empty()) 164 | { 165 | std::cerr << "\n\nYOU MADE A USAGE VIOLATION!!\ndestruction of a disjoint gc module had leftover objects\n\n"; 166 | std::cerr << objs.front() << ' ' << objs.front()->next << '\n' << roots.size() << '\n'; 167 | std::abort(); 168 | } 169 | // same thing for roots - less important cause this can't leak, but we don't want dangling pointers floating around out there. 170 | if (!roots.empty()) 171 | { 172 | std::cerr << "\n\nYOU MADE A USAGE VIOLATION!!\ndestruction of a disjoint gc module had leftover roots\n\n"; 173 | std::cerr << roots.size() << '\n' << *roots.begin() << '\n'; 174 | std::abort(); 175 | } 176 | } 177 | 178 | void GC::info::mark_sweep() 179 | { 180 | // mark this handle 181 | this->marked = true; 182 | 183 | // for each outgoing arc 184 | this->route(+[](const smart_handle &arc) 185 | { 186 | // get the current arc value - this is only safe because we're in a collect action 187 | info *raw = arc.raw_handle(); 188 | 189 | // if it hasn't been marked, recurse to it (only if non-null) 190 | if (raw && !raw->marked) raw->mark_sweep(); 191 | }); 192 | } 193 | 194 | bool GC::disjoint_module::collect() 195 | { 196 | // -- begin the collection action -- // 197 | 198 | { 199 | std::lock_guard internal_lock(internal_mutex); 200 | 201 | // if there are 1 or more ignore sentries acting on this module, do nothing and return true. 202 | // true is to prevent a deadlock case where we do a blocking collection is made to an ignoring module. 203 | if (ignore_collect_count > 0) return true; 204 | 205 | // if there's already a collection in progress for this module, we do nothing. 206 | // if the collector is us, return true - this is to prevent a deadlock case where we do a blocking collection from a router/destructor. 207 | // otherwise return false - someone else is doing something. 208 | if (collector_thread != std::thread::id()) return collector_thread == std::this_thread::get_id(); 209 | 210 | // otherwise mark the calling thread as the collector thread 211 | collector_thread = std::this_thread::get_id(); 212 | 213 | // begin caching ref count deletion events 214 | cache_ref_count_del_actions = true; 215 | 216 | // since we just came out of no-collect phase, all the caches should be empty 217 | assert(objs_add_cache.empty()); 218 | 219 | assert(roots_add_cache.empty()); 220 | assert(roots_remove_cache.empty()); 221 | 222 | assert(handle_repoint_cache.empty()); 223 | 224 | // ref count del cache should also be empty 225 | assert(ref_count_del_cache.empty()); 226 | 227 | // the del list should also be empty 228 | assert(del_list.empty()); 229 | } 230 | 231 | // -- initialize the collection data -- // 232 | 233 | // we've now started the collection action, so we have lock-free access to collector-only resources. 234 | 235 | // to ensure all unused objects are deleted in one pass, we need to unroot all mutables arcs. 236 | // this requires going through all the obj list entities, so we might as well clear their marks. 237 | 238 | // for each object we'll examine 239 | for (info *i = objs.front(); i; i = i->next) 240 | { 241 | // clear the marked flag 242 | i->marked = false; 243 | 244 | // route to mutable arcs and directly unroot from the collector-only root set. 245 | // this is only safe because we're guaranteed to be the (only) collector at this point. 246 | i->mutable_route(+[](const smart_handle &arc) { local()->roots.erase(&arc.raw_handle()); }); 247 | } 248 | 249 | // clear the root objs set 250 | root_objs.clear(); 251 | 252 | { 253 | std::lock_guard lock(internal_mutex); 254 | 255 | // at this point we've directly unrooted all the mutables in the obj list. 256 | // however, other threads could have destroyed rooted handles. 257 | // this means the roots set at this point may contain dangling pointers. 258 | // thus, we need to apply the cached root/unroot actions. 259 | // this will ensure there are no dangling pointers that are represented. 260 | 261 | // the reason we can't just do the cached unroot actions is because it could exclude live objects. 262 | // for instance, suppose we have a pre-existing dynamic rooted handle A. 263 | // now, say a new dynamic rooted handle B is created and initialized to A, then A is destroyed. 264 | // thus, A was unrooted, but the object it refered to is still reachable through B. 265 | // this could happen e.g. on a std::vector> reallocation. 266 | // this of course could only happen for live objects, so we want to make sure we keep them that way. 267 | // thus we account for both root and unroot cached actions. 268 | 269 | // for much the same reasons, we need to apply the handle repoint cache. 270 | // and, because we now have roots that may point to objects in the obj add cache, we need to add those as well. 271 | // however, we need to clear their marks first. 272 | // we can't perform the routing step for them because we need the mutex to be locked during this process. 273 | // but that's not a problem as collect() only guarantees it will collect all objects unreachable prior to invocation. 274 | // the routing logic is just to ensure this happens in 1 pass and not 2 (uncommon but otherwise possible without this step). 275 | 276 | // so long story short we need to apply all the caches (aside from obj deletion), plus a tiny bit of extra logic. 277 | // we know this is safe because it's as if we took the graph snapshot later on and just routed to a subset of it for unrooting. 278 | 279 | // apply the obj add cache - also clear their marks (the ones in the obj list are already cleared) 280 | for (info *i : objs_add_cache) 281 | { 282 | i->marked = false; 283 | objs.add(i); 284 | } 285 | objs_add_cache.clear(); 286 | 287 | // apply cached root actions 288 | for (auto i : roots_add_cache) roots.insert(i); 289 | roots_add_cache.clear(); 290 | 291 | // apply cached unroot actions 292 | for (auto i : roots_remove_cache) roots.erase(i); 293 | roots_remove_cache.clear(); 294 | 295 | // apply handle repoint actions 296 | for (auto i : handle_repoint_cache) *i.first = i.second; 297 | handle_repoint_cache.clear(); 298 | 299 | // now that that's all done... 300 | 301 | // add the pointed-at objects of all remaining (valid) roots to a set of root objects. 302 | // we only include the non-null targets for convenience. 303 | for (auto root : roots) 304 | if (*root) root_objs.insert(*root); 305 | } 306 | 307 | // ----------------------------------------------------------- 308 | 309 | #if DRAGAZO_GARBAGE_COLLECT_MSG 310 | std::size_t collect_count = 0; // number of objects that we scheduled for deletion 311 | #endif 312 | 313 | // -- mark and sweep -- // 314 | 315 | // perform a mark sweep from each root object 316 | for (info *i : root_objs) i->mark_sweep(); 317 | 318 | // -- clean anything not marked -- // 319 | 320 | // for each item in the gc database 321 | for (info *i = objs.front(), *next; i; i = next) 322 | { 323 | next = i->next; 324 | 325 | // if it hasn't been marked, mark it for deletion 326 | if (!i->marked) 327 | { 328 | // mark it for deletion 329 | objs.remove(i); 330 | del_list.add(i); 331 | 332 | #if DRAGAZO_GARBAGE_COLLECT_MSG 333 | ++collect_count; 334 | #endif 335 | } 336 | } 337 | 338 | #if DRAGAZO_GARBAGE_COLLECT_MSG 339 | std::cerr << "collecting - deleting: " << collect_count << '\n'; 340 | #endif 341 | 342 | // ----------------------------------------------------------- 343 | 344 | // we've now divided the old obj list into two partitions: 345 | // the reachable objects are still in the obj list. 346 | // the unreachable objects are now in the del list. 347 | // ref count deletion caching is still in effect. 348 | 349 | // destroy unreachable objects 350 | for (info *i = del_list.front(); i; i = i->next) i->destroy(); 351 | 352 | // now we've destroyed the unreachable objects but there may be cached deletions from ref count logic. 353 | // we'll now resume immediate ref count deletions. 354 | // we can't resume immediate ref count deletions prior to destroying the unreachable objs because it could double delete. 355 | // e.g. could drop an unreachable obj ref count to 0 and insta-delete on its own before we get to it. 356 | // even if we made a check for double delete, it would still deallocate the info object as well and would cause even more headache. 357 | // we know this usage is safe because there's no way a reachable object could ref count delete an unreachable object. 358 | 359 | // resume immediate ref count deletions. 360 | { 361 | std::lock_guard internal_lock(internal_mutex); 362 | 363 | // stop caching ref count deletion actions (i.e. resume immediate ref count deletions) 364 | cache_ref_count_del_actions = false; 365 | 366 | // if an unreachable object is in the ref count del cache purge it (to avoid the double delete issue for unreachable objs). 367 | // we know this'll work because the unreachable objects are unreachable from reachable objects (hence the name). 368 | // thus, since we already called unreachable destructors, there will be no further ref count logic for unreachables. 369 | 370 | // purge unreachable objects from the ref count del cache (to avoid double deletions - see above). 371 | for (info *i = del_list.front(); i; i = i->next) ref_count_del_cache.erase(i); 372 | 373 | // after the double-deletion purge, remove remaining ref count del cache objects from the obj list. 374 | // we do this now because enabling immediate ref count del logic means the obj list can be modified by any holder of the mutex. 375 | for (auto i : ref_count_del_cache) objs.remove(i); 376 | } 377 | 378 | // we now have lock-free exclusive ownership of the ref count del cache. 379 | 380 | // deallocate memory 381 | // done after calling ALL unreachable dtors so that the dtors can access the info objects safely. 382 | // this is because we might be deleting objects whose reference count is not zero. 383 | // which means they could potentially hold live gc references to other objects in del list and try to refer to their info objects. 384 | for (info *i = del_list.front(), *next; i; i = next) 385 | { 386 | next = i->next; // dealloc() will deallocate the info object as well, so grab the next object now 387 | i->dealloc(); 388 | } 389 | 390 | // clear the del list (we already deallocated the resources above) 391 | del_list.unsafe_clear(); 392 | assert(del_list.empty()); // just to make sure 393 | 394 | // remember, we still have lock-free exclusive ownership of the ref count del cache from above. 395 | // process the cached ref count deletions. 396 | for (auto i : ref_count_del_cache) 397 | { 398 | // we don't need to do all destructors before all deallocators like we did above. 399 | // this is because we know the ref count for these is zero (because they were cached ref count deletion). 400 | // this means we don't have the same risks as above (i.e. live references being forcibly severed). 401 | 402 | i->destroy(); 403 | i->dealloc(); 404 | } 405 | ref_count_del_cache.clear(); 406 | 407 | // end the collection action 408 | // must be after dtors/deallocs to ensure that if they call collect() it'll no-op (otherwise very slow). 409 | // additionally, must be after those to ensure the caches are fully emptied as the last atomic step. 410 | // also, if this came before dtors, the reference count system could fall to 0 and result in double dtor. 411 | { 412 | std::lock_guard internal_lock(internal_mutex); 413 | 414 | // mark that there is no longer a collector thread 415 | collector_thread = std::thread::id(); 416 | 417 | // apply all the cached obj add actions that occurred during the collection action 418 | for (auto i : objs_add_cache) objs.add(i); 419 | objs_add_cache.clear(); 420 | 421 | // apply cached root actions 422 | for (auto i : roots_add_cache) roots.insert(i); 423 | roots_add_cache.clear(); 424 | 425 | // apply cached unroot actions 426 | for (auto i : roots_remove_cache) roots.erase(i); 427 | roots_remove_cache.clear(); 428 | 429 | // apply all the cached handle repoint actions 430 | for (auto i : handle_repoint_cache) *i.first = i.second; 431 | handle_repoint_cache.clear(); 432 | } 433 | 434 | // return that we did the collection 435 | return true; 436 | } 437 | void GC::disjoint_module::blocking_collect() 438 | { 439 | while (!collect()); 440 | } 441 | 442 | bool GC::disjoint_module::this_is_collector_thread() 443 | { 444 | std::lock_guard internal_lock(internal_mutex); 445 | return collector_thread == std::this_thread::get_id(); 446 | } 447 | 448 | void GC::disjoint_module::schedule_handle_create_null(smart_handle &handle) 449 | { 450 | std::lock_guard internal_lock(internal_mutex); 451 | 452 | // point it at null 453 | handle.raw = nullptr; 454 | 455 | // root it 456 | __schedule_handle_root(handle); 457 | } 458 | void GC::disjoint_module::schedule_handle_create_bind_new_obj(smart_handle &handle, info *new_obj) 459 | { 460 | std::lock_guard internal_lock(internal_mutex); 461 | 462 | // point it at the object 463 | handle.raw = new_obj; 464 | 465 | // root it 466 | __schedule_handle_root(handle); 467 | 468 | // -- add the object -- // 469 | 470 | // set its reference count to 1 471 | new_obj->ref_count = 1; 472 | 473 | // if there's no collector thread, we MUST apply the change immediately 474 | if (collector_thread == std::thread::id()) 475 | { 476 | // if this branch was selected, the caches should be empty 477 | assert(objs_add_cache.empty()); 478 | 479 | objs.add(new_obj); 480 | } 481 | // otherwise we need to cache the request 482 | else objs_add_cache.insert(new_obj); 483 | } 484 | void GC::disjoint_module::schedule_handle_create_alias(smart_handle &handle, const smart_handle &src_handle) 485 | { 486 | std::lock_guard internal_lock(internal_mutex); 487 | 488 | // get the target 489 | info *target = __get_current_target(src_handle); 490 | 491 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_SAFETY_CHECKS 492 | 493 | // if we're going to repoint outside the disjunction of the handle, that's a disjunction violation 494 | if (target && handle.disjunction != target->disjunction) 495 | { 496 | throw GC::disjunction_error("attempt to repoint GC::ptr outside of the current disjunction"); 497 | } 498 | 499 | #endif 500 | 501 | // point it at the source handle's current target 502 | handle.raw = target; 503 | 504 | // increment the target reference count 505 | if (handle.raw) ++handle.raw->ref_count; 506 | 507 | // root it 508 | __schedule_handle_root(handle); 509 | } 510 | 511 | void GC::disjoint_module::schedule_handle_destroy(const smart_handle &handle) 512 | { 513 | std::unique_lock internal_lock(internal_mutex); 514 | 515 | // get the old target 516 | info *old_target = __get_current_target(handle); 517 | 518 | // unroot the handle 519 | __schedule_handle_unroot(handle); 520 | 521 | // purge the handle from the repoint cache so we don't dereference undefined memory. 522 | // the const cast is ok because we won't be modifying it - just for lookup. 523 | handle_repoint_cache.erase(const_cast(&handle.raw)); 524 | 525 | // dec the reference count 526 | __MUST_BE_LAST_ref_count_dec(old_target, std::move(internal_lock)); 527 | } 528 | 529 | void GC::disjoint_module::schedule_handle_unroot(const smart_handle &handle) 530 | { 531 | std::lock_guard internal_lock(internal_mutex); 532 | 533 | // unroot it 534 | __schedule_handle_unroot(handle); 535 | } 536 | 537 | void GC::disjoint_module::schedule_handle_repoint_null(smart_handle &handle) 538 | { 539 | std::unique_lock internal_lock(internal_mutex); 540 | 541 | // get the old target 542 | info *old_target = __get_current_target(handle); 543 | 544 | // repoint handle to null 545 | __raw_schedule_handle_repoint(handle, nullptr); 546 | 547 | // decrement old target reference count 548 | __MUST_BE_LAST_ref_count_dec(old_target, std::move(internal_lock)); 549 | } 550 | void GC::disjoint_module::schedule_handle_repoint(smart_handle &handle, const smart_handle &new_value) 551 | { 552 | std::unique_lock internal_lock(internal_mutex); 553 | 554 | // get the old/new targets 555 | info *old_target = __get_current_target(handle); 556 | info *new_target = __get_current_target(new_value); 557 | 558 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_SAFETY_CHECKS 559 | 560 | // if we're going to repoint outside the disjunction of the handle, that's a disjunction violation 561 | if (new_target && handle.disjunction != new_target->disjunction) 562 | { 563 | throw GC::disjunction_error("attempt to repoint GC::ptr outside of the current disjunction"); 564 | } 565 | 566 | #endif 567 | 568 | // only do the remaining logic if it's an actual repoint 569 | if (old_target != new_target) 570 | { 571 | // repoint handle to the new target 572 | __raw_schedule_handle_repoint(handle, new_target); 573 | 574 | // increment new target reference count 575 | if (new_target) ++new_target->ref_count; 576 | 577 | // decrement old target reference count 578 | __MUST_BE_LAST_ref_count_dec(old_target, std::move(internal_lock)); 579 | } 580 | } 581 | void GC::disjoint_module::schedule_handle_repoint_swap(smart_handle &handle_a, smart_handle &handle_b) 582 | { 583 | std::lock_guard internal_lock(internal_mutex); 584 | 585 | // get their current repoint targets 586 | info *target_a = __get_current_target(handle_a); 587 | info *target_b = __get_current_target(handle_b); 588 | 589 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_SAFETY_CHECKS 590 | 591 | // if we're going to repoint outside the disjunction of either handle, that's a disjunction violation 592 | if ((target_b && handle_a.disjunction != target_b->disjunction) || (target_a && handle_b.disjunction != target_a->disjunction)) 593 | { 594 | throw GC::disjunction_error("attempt to repoint GC::ptr outside of the current disjunction"); 595 | } 596 | 597 | #endif 598 | 599 | // only perform the swap if they point to different things 600 | if (target_a != target_b) 601 | { 602 | // schedule repoint actions to swap them 603 | __raw_schedule_handle_repoint(handle_a, target_b); 604 | __raw_schedule_handle_repoint(handle_b, target_a); 605 | 606 | // there's no need for reference counting logic in a swap operation 607 | } 608 | } 609 | 610 | std::size_t GC::disjoint_module::begin_ignore_collect() 611 | { 612 | std::lock_guard internal_lock(internal_mutex); 613 | return ignore_collect_count++; 614 | } 615 | void GC::disjoint_module::end_ignore_collect() 616 | { 617 | std::lock_guard internal_lock(internal_mutex); 618 | assert(ignore_collect_count != 0); 619 | --ignore_collect_count; 620 | } 621 | 622 | void GC::disjoint_module::__schedule_handle_root(const smart_handle &handle) 623 | { 624 | // if there's no collector thread, we MUST apply the change immediately 625 | if (collector_thread == std::thread::id()) 626 | { 627 | // if this branch was selected, the caches should be empty 628 | assert(roots_add_cache.empty()); 629 | assert(roots_remove_cache.empty()); 630 | 631 | roots.insert(&handle.raw_handle()); 632 | } 633 | // otherwise we need to cache the request 634 | else 635 | { 636 | roots_add_cache.insert(&handle.raw); 637 | roots_remove_cache.erase(&handle.raw); // ensure the sets are disjoint 638 | } 639 | } 640 | void GC::disjoint_module::__schedule_handle_unroot(const smart_handle &handle) 641 | { 642 | // if there's no collector thread, we MUST apply the change immediately 643 | if (collector_thread == std::thread::id()) 644 | { 645 | // if this branch was selected, the caches should be empty 646 | assert(roots_add_cache.empty()); 647 | assert(roots_remove_cache.empty()); 648 | 649 | roots.erase(&handle.raw); 650 | } 651 | // otherwise we need to cache the request 652 | else 653 | { 654 | roots_remove_cache.insert(&handle.raw); 655 | roots_add_cache.erase(&handle.raw); // ensure the sets are disjoint 656 | } 657 | } 658 | 659 | void GC::disjoint_module::__raw_schedule_handle_repoint(smart_handle &handle, info *target) 660 | { 661 | // if there's no collector thread, we MUST apply the change immediately 662 | if (collector_thread == std::thread::id()) 663 | { 664 | // if this branch was selected, the caches should be empty 665 | assert(handle_repoint_cache.empty()); 666 | 667 | // immediately repoint handle to target 668 | handle.raw = target; 669 | } 670 | // otherwise we need to cache the request 671 | else handle_repoint_cache[&handle.raw] = target; 672 | } 673 | 674 | GC::info *GC::disjoint_module::__get_current_target(const smart_handle &handle) 675 | { 676 | // find new_value's repoint target from the cache. 677 | // const cast is safe because we won't be modifying it (just for lookup in the repoint cache). 678 | auto new_value_iter = handle_repoint_cache.find(const_cast(&handle.raw)); 679 | 680 | // get the target - if it's in the repoint cache, get the repoint target, otherwise use it raw. 681 | // this works regardless of if we're in a collect action or not (if we're in a collect action the cache is empty). 682 | return new_value_iter != handle_repoint_cache.end() ? new_value_iter->second : handle.raw; 683 | } 684 | 685 | void GC::disjoint_module::__MUST_BE_LAST_ref_count_dec(info *target, std::unique_lock internal_lock) 686 | { 687 | // decrement the reference count 688 | // if it falls to zero we need to perform ref count deletion logic 689 | if (target && --target->ref_count == 0) 690 | { 691 | // if it's in the obj add cache we can delete it immediately regardless of what's going on. 692 | // this is because it being in the obj add cache means it's not in the obj list, and is thus not under gc consideration. 693 | if (objs_add_cache.find(target) != objs_add_cache.end()) 694 | { 695 | // remove it from the obj add cache 696 | objs_add_cache.erase(target); 697 | 698 | // unlock the mutex so we can call arbitrary code 699 | internal_lock.unlock(); 700 | 701 | target->destroy(); 702 | target->dealloc(); 703 | } 704 | // otherwise we know it exists and isn't in the add cache, therefore it's in the obj list. 705 | // if we're not suppoed to cache ref count deletions, handle it immediately 706 | else if (!cache_ref_count_del_actions) 707 | { 708 | // remove it from the obj list 709 | objs.remove(target); 710 | 711 | // unlock the mutex so we can call arbitrary code 712 | internal_lock.unlock(); 713 | 714 | target->destroy(); 715 | target->dealloc(); 716 | } 717 | // otherwise we're supposed to cache the ref count deletion action. 718 | // this also implies we're in a collection action. 719 | else 720 | { 721 | assert(collector_thread != std::thread::id()); 722 | 723 | ref_count_del_cache.insert(target); 724 | } 725 | } 726 | } 727 | 728 | // ------------------------------- // 729 | 730 | // -- special disjunction stuff -- // 731 | 732 | // ------------------------------- // 733 | 734 | GC::primary_disjunction_t GC::primary_disjunction; 735 | GC::inherit_disjunction_t GC::inherit_disjunction; 736 | GC::new_disjunction_t GC::new_disjunction; 737 | 738 | GC::disjoint_module *GC::disjoint_module::local_detour = nullptr; 739 | 740 | const GC::shared_disjoint_handle &GC::disjoint_module::primary_handle() 741 | { 742 | // not thread_local because the primary disjunction must exist for the entire program runtime. 743 | // the default value creates that actual collection module. 744 | static struct primary_handle_t 745 | { 746 | shared_disjoint_handle m; 747 | 748 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 749 | struct _ 750 | { 751 | _() { std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ctor primary handle\n"; } 752 | ~_() { std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! dtor primary handle\n"; } 753 | } __; 754 | #endif 755 | 756 | primary_handle_t() 757 | { 758 | disjoint_module_container::get().create_new_disjunction(m); 759 | } 760 | 761 | ~primary_handle_t() 762 | { 763 | // because this happens at static dtor time, all thread_local objects have been destroyed already - including the local handle. 764 | // thus accesses to the local handle will result in und memory accesses. 765 | // set up the local detour to bypass the local handle and instead go to the primary module - which is still alive. 766 | local_detour = m.get(); 767 | 768 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 769 | std::cerr << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! primary mid dtor - roots: " << m->roots.size() << '\n'; 770 | #endif 771 | } 772 | } primary_handle; 773 | 774 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 775 | std::cerr << " !!!! primary handle access\n"; 776 | #endif 777 | 778 | return primary_handle.m; 779 | } 780 | GC::shared_disjoint_handle &GC::disjoint_module::local_handle() 781 | { 782 | // thread_local because this is a thread-specific owning handle. 783 | // the default value points the current thread to use the primary disjunction. 784 | thread_local struct local_handle_t 785 | { 786 | shared_disjoint_handle m = primary_handle(); 787 | 788 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 789 | struct _ 790 | { 791 | _() { std::cerr << "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ctor local handle " << std::this_thread::get_id() << '\n'; } 792 | ~_() { std::cerr << "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ dtor local handle " << std::this_thread::get_id() << '\n'; } 793 | } __; 794 | #endif 795 | } local_handle; 796 | 797 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 798 | std::cerr << " ~~~~ local handle access " << std::this_thread::get_id() << '\n'; 799 | #endif 800 | 801 | // if local detour is non-null it means we're in the static dtors (from primary() dtor handler), which means the thread_local handle has already been destroyed. 802 | // this would be und access of a destroyed object, but would theoretically only happen if a static dtor tried to make a thread for some reason. 803 | assert(local_detour == nullptr); 804 | 805 | return local_handle.m; 806 | } 807 | 808 | GC::disjoint_module *GC::disjoint_module::primary() 809 | { 810 | return primary_handle().get(); 811 | } 812 | GC::disjoint_module *GC::disjoint_module::local() 813 | { 814 | // get the local detour 815 | disjoint_module *detour = local_detour; 816 | 817 | // if we're taking a detour, use that, otherwise the handle is alive and we should read that instead 818 | return detour ? detour : local_handle().get(); 819 | } 820 | 821 | void GC::disjoint_module_container::create_new_disjunction(shared_disjoint_handle &dest) 822 | { 823 | // create a new disjoint module handle data block 824 | auto m = std::make_unique(); 825 | // and construct its module in-place 826 | new (m->get()) disjoint_module; 827 | 828 | // repoint dest to the new disjunction 829 | dest.reset(m.release()); 830 | 831 | // add it to the disjunction database 832 | { 833 | std::lock_guard internal_lock(internal_mutex); 834 | 835 | // if we're not collecting, we need to put it immediately in the disjunction list 836 | if (!collecting) disjunctions.emplace_back(dest); 837 | // otherwise we need to cache the add action 838 | else disjunction_add_cache.emplace_back(dest); 839 | } 840 | } 841 | 842 | void GC::disjoint_module_container::BACKGROUND_COLLECTOR_ONLY___collect(bool collect) 843 | { 844 | { 845 | std::lock_guard internal_lock(internal_mutex); 846 | 847 | // we should not already be collecting (this is background collector only) 848 | assert(!collecting); 849 | 850 | // enter collecting mode 851 | collecting = true; 852 | 853 | // the add cache should be empty (just came out of a non-collecting phase) 854 | assert(disjunction_add_cache.empty()); 855 | } 856 | 857 | // if performing a real collection 858 | if (collect) 859 | { 860 | // for each stored disjunction: 861 | // (the EXPLICIT std::list::iterator ensures it is indeed a LIST). 862 | // (otherwise we need to constantly update the end iterator after each erasure). 863 | for (auto i = disjunctions.begin(), end = disjunctions.end(); i != end; ) 864 | { 865 | // lock and use this disjunction as the local disjunction - then get the raw handle 866 | disjoint_module *const raw_handle = (disjoint_module::local_handle() = *i).get(); 867 | 868 | // if it's still allive, perform a collection on it 869 | if (raw_handle) 870 | { 871 | // perform the collection 872 | raw_handle->collect(); 873 | ++i; 874 | 875 | // afterwards unlink the handle - we don't want to keep them alive longer than they need to be 876 | disjoint_module::local_handle() = nullptr; 877 | } 878 | // otherwise it's invalid (dangling) - erase it 879 | else i = disjunctions.erase(i); 880 | } 881 | } 882 | // otherwise just performing a cull 883 | else 884 | { 885 | // for each stored disjunction: 886 | // (the EXPLICIT std::list::iterator ensures it is indeed a LIST). 887 | // (otherwise we need to constantly update the end iterator after each erasure). 888 | for (auto i = disjunctions.begin(), end = disjunctions.end(); i != end; ) 889 | { 890 | // if this is a dangling pointer, erase it 891 | if (i->expired()) i = disjunctions.erase(i); 892 | else ++i; 893 | } 894 | } 895 | 896 | { 897 | std::lock_guard internal_lock(internal_mutex); 898 | 899 | // exit collecting mode 900 | collecting = false; 901 | 902 | // apply all the cached disjunction insertions 903 | disjunctions.splice(disjunctions.begin(), disjunction_add_cache); 904 | disjunction_add_cache.clear(); // just to be sure 905 | } 906 | } 907 | 908 | // ----------------------------------- // 909 | 910 | // -- disjunction handle data stuff -- // 911 | 912 | // ----------------------------------- // 913 | 914 | GC::handle_data::tag_t GC::handle_data::tag_add(tag_t v, std::memory_order order) 915 | { 916 | const auto prev = tag.fetch_add(v, order); 917 | 918 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_DATA_UND_SAFETY 919 | 920 | const auto cur = prev + v; // compute current value from previous 921 | 922 | // make sure we didn't overflow any of the fields 923 | assert((cur & strong_mask) >= (prev & strong_mask)); 924 | assert((cur & weak_mask) >= (prev & weak_mask)); 925 | assert((cur & lock_mask) >= (prev & lock_mask)); 926 | 927 | #endif 928 | 929 | return prev; 930 | } 931 | GC::handle_data::tag_t GC::handle_data::tag_sub(tag_t v, std::memory_order order) 932 | { 933 | const auto prev = tag.fetch_sub(v, order); 934 | 935 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_DATA_UND_SAFETY 936 | 937 | const auto cur = prev - v; // compute current value from previous 938 | 939 | // make sure we didn't overflow any of the fields 940 | assert((cur & strong_mask) <= (prev & strong_mask)); 941 | assert((cur & weak_mask) <= (prev & weak_mask)); 942 | assert((cur & lock_mask) <= (prev & lock_mask)); 943 | 944 | #endif 945 | 946 | return prev; 947 | } 948 | 949 | // ------------------------------- // 950 | 951 | // -- shared disjunction handle -- // 952 | 953 | // ------------------------------- // 954 | 955 | void GC::shared_disjoint_handle::reset(handle_data *other) 956 | { 957 | // handle redundant assignment as no-op 958 | if (data == other) return; 959 | 960 | // if we pointed at something 961 | if (data) 962 | { 963 | // drop a strong reference and get the previous tag 964 | auto prev = data->tag_sub(handle_data::strong_1, std::memory_order_acq_rel); 965 | 966 | // if we were the last strong reference, there are no longer any strong references - destroy the object 967 | // we include the lock strong refs because those locks succeeded - i.e. our very existence as a non-lock strong owner proves those locks succeeded. 968 | if ((prev & handle_data::strong_mask) == handle_data::strong_1) 969 | { 970 | __module->blocking_collect(); // perform one final collection to make sure everything's collected 971 | __module->~disjoint_module(); // then destroy the module itself - its dtor asserts that all objects were collected 972 | 973 | // if there were also no more weak references, delete the handle 974 | if ((prev & handle_data::weak_mask) == 0) delete data; 975 | // otherwise we're not deleting it but the weak ref count might have fallen to zero in the meantime and could be waiting - alert that we're done destroying 976 | else data->destroyed_flag.store(true, std::memory_order_release); 977 | } 978 | } 979 | 980 | // repoint and test for null - must come after destruction logic to ensure the collection can potentially alias this handle 981 | data = other; 982 | if (data) 983 | { 984 | __module = data->get(); // cache the module pointer 985 | data->tag_add(handle_data::strong_1, std::memory_order_acq_rel); // bump up the strong ref count 986 | } 987 | else __module = nullptr; // cache the module pointer (in this case null) 988 | } 989 | 990 | void GC::shared_disjoint_handle::lock(handle_data *other) 991 | { 992 | // we need to start out null 993 | reset(); 994 | 995 | // if the relock target is non-null 996 | if (other) 997 | { 998 | // bump up the strong and lock counts 999 | auto prev = other->tag_add(handle_data::lock_1 | handle_data::strong_1, std::memory_order_acq_rel); 1000 | 1001 | // if there was at least 1 non-lock strong reference, the lock is successful and the object is still alive. 1002 | // we exclude lock strong refs because otherwise 2 locks back-to-back from 2 threads could trick the latter into thinking it succeeded. 1003 | if (handle_data::non_lock_strongs(prev) != 0) 1004 | { 1005 | // unmark the lock ref (success keeps only the strong ref because we're a non-lock strong owner type) 1006 | other->tag_sub(handle_data::lock_1, std::memory_order_acq_rel); 1007 | 1008 | // we now own a reference - do the raw repoint 1009 | data = other; 1010 | __module = other->get(); 1011 | } 1012 | // otherwise the object is expired 1013 | else 1014 | { 1015 | // unmark the lock ref and strong ref (failure to lock a strong ref) 1016 | other->tag_sub(handle_data::lock_1 | handle_data::strong_1, std::memory_order_acq_rel); 1017 | } 1018 | } 1019 | } 1020 | 1021 | GC::shared_disjoint_handle::shared_disjoint_handle(std::nullptr_t) noexcept : __module(nullptr), data(nullptr) {} 1022 | GC::shared_disjoint_handle::~shared_disjoint_handle() { reset(); } 1023 | 1024 | GC::shared_disjoint_handle::shared_disjoint_handle(const shared_disjoint_handle &other) 1025 | { 1026 | // alias the same disjunction 1027 | data = other.data; 1028 | __module = other.__module; 1029 | 1030 | // bump up the strong ref count to account for our aliasing 1031 | if (data) data->tag_add(handle_data::strong_1, std::memory_order_acq_rel); 1032 | } 1033 | GC::shared_disjoint_handle::shared_disjoint_handle(shared_disjoint_handle &&other) noexcept 1034 | { 1035 | // steal disjunction ownership - ref count change is net zero 1036 | data = std::exchange(other.data, nullptr); 1037 | __module = std::exchange(other.__module, nullptr); 1038 | } 1039 | 1040 | GC::shared_disjoint_handle &GC::shared_disjoint_handle::operator=(const shared_disjoint_handle &other) 1041 | { 1042 | reset(other.data); // internally performs self-assignment safety stuff 1043 | return *this; 1044 | } 1045 | GC::shared_disjoint_handle &GC::shared_disjoint_handle::operator=(shared_disjoint_handle &&other) 1046 | { 1047 | if (&other != this) 1048 | { 1049 | // repoint to null with the ref count logic 1050 | reset(); 1051 | 1052 | // then steal disjunction ownership - ref count change is net zero 1053 | data = std::exchange(other.data, nullptr); 1054 | __module = std::exchange(other.__module, nullptr); 1055 | } 1056 | 1057 | return *this; 1058 | } 1059 | 1060 | GC::shared_disjoint_handle &GC::shared_disjoint_handle::operator=(std::nullptr_t) 1061 | { 1062 | reset(); 1063 | return *this; 1064 | } 1065 | 1066 | GC::shared_disjoint_handle &GC::shared_disjoint_handle::operator=(const weak_disjoint_handle &other) 1067 | { 1068 | lock(other.data); 1069 | return *this; 1070 | } 1071 | 1072 | // ----------------------------- // 1073 | 1074 | // -- weak disjunction handle -- // 1075 | 1076 | // ----------------------------- // 1077 | 1078 | void GC::weak_disjoint_handle::reset(handle_data *other) 1079 | { 1080 | // handle redundant assignment as no-op 1081 | if (data == other) return; 1082 | 1083 | // if we pointed at something 1084 | if (data) 1085 | { 1086 | // drop a weak ref and get the previous tag 1087 | auto prev = data->tag_sub(handle_data::weak_1, std::memory_order_acq_rel); 1088 | 1089 | // if we were the last weak ref and there were no strong refs, the object is already destroyed and needs to be deleted. 1090 | // being the last weak ref implies there are no locks at the moment because without unsynchronized read/write to the same variable from several threads that's impossible. 1091 | // therefore we don't need to bother about strong vs. non-lock strong references in this context because strong == non-lock strong 1092 | if ((prev & handle_data::weak_mask) == handle_data::weak_1 && (prev & handle_data::strong_mask) == 0) 1093 | { 1094 | // in this case the strong count fell to zero, so the object is potentially being destroyed by the strong ref count logic. 1095 | // however, that strong ref count logic won't delete the data block because our existence proves there was a weak reference prior to the strong ref dec logic. 1096 | // therefore we just need to wait until the object is destroyed so we can delete the data block 1097 | while (!data->destroyed_flag.load(std::memory_order_acquire)); 1098 | delete data; 1099 | } 1100 | } 1101 | 1102 | // repoint - if non-null bump up weak ref count 1103 | data = other; 1104 | if (data) data->tag_add(handle_data::weak_1, std::memory_order_acq_rel); 1105 | } 1106 | 1107 | GC::weak_disjoint_handle::weak_disjoint_handle(std::nullptr_t) noexcept : data(nullptr) {} 1108 | GC::weak_disjoint_handle::~weak_disjoint_handle() { reset(); } 1109 | 1110 | GC::weak_disjoint_handle::weak_disjoint_handle(const weak_disjoint_handle &other) 1111 | { 1112 | data = other.data; // alias the same disjunction 1113 | if (data) data->tag_add(handle_data::weak_1, std::memory_order_acq_rel); // bump up the weak ref count to account for our aliasing 1114 | } 1115 | GC::weak_disjoint_handle::weak_disjoint_handle(weak_disjoint_handle &&other) noexcept 1116 | { 1117 | data = std::exchange(other.data, nullptr); // steal disjunction ownership - ref count change is net zero 1118 | } 1119 | 1120 | GC::weak_disjoint_handle &GC::weak_disjoint_handle::operator=(const weak_disjoint_handle &other) 1121 | { 1122 | reset(other.data); // internally performs self-assignment safety 1123 | return *this; 1124 | } 1125 | GC::weak_disjoint_handle &GC::weak_disjoint_handle::operator=(weak_disjoint_handle &&other) 1126 | { 1127 | if (&other != this) 1128 | { 1129 | reset(); // repoint to null with the ref count logic 1130 | data = std::exchange(other.data, nullptr); // then unsafe repoint to other and disconnect other - the ref count change is net zero 1131 | } 1132 | return *this; 1133 | } 1134 | 1135 | GC::weak_disjoint_handle &GC::weak_disjoint_handle::operator=(std::nullptr_t) 1136 | { 1137 | reset(); 1138 | return *this; 1139 | } 1140 | 1141 | GC::weak_disjoint_handle::weak_disjoint_handle(const shared_disjoint_handle &other) 1142 | { 1143 | data = other.data; // alias the same disjunction 1144 | if (data) data->tag_add(handle_data::weak_1, std::memory_order_acq_rel); // bump up the weak ref count to account for our aliasing 1145 | } 1146 | GC::weak_disjoint_handle &GC::weak_disjoint_handle::operator=(const shared_disjoint_handle &other) 1147 | { 1148 | reset(other.data); // perform the repoint action, accounting for ref counts 1149 | return *this; 1150 | } 1151 | 1152 | bool GC::weak_disjoint_handle::expired() const noexcept 1153 | { 1154 | // get the current tag 1155 | auto tag = data ? data->tag.load(std::memory_order_acq_rel) : 0; 1156 | 1157 | // this weak ptr is expired if there are no strong refs remaining. 1158 | // we include lock strong refs because otherwise it's possible for a series of atomic steps to result in a successful lock but intermediately no non-lock strong refs. 1159 | // due to locking, however, this approach could report false negatives (but never false positives). 1160 | return (tag & handle_data::strong_mask) == 0; 1161 | } 1162 | 1163 | // ---------------- // 1164 | 1165 | // -- collection -- // 1166 | 1167 | // ---------------- // 1168 | 1169 | void GC::collect() 1170 | { 1171 | disjoint_module::local()->collect(); 1172 | } 1173 | 1174 | // ------------------------------ // 1175 | 1176 | // -- utility router functions -- // 1177 | 1178 | // ------------------------------ // 1179 | 1180 | void GC::router_unroot(const smart_handle &arc) 1181 | { 1182 | arc.disjunction->schedule_handle_unroot(arc); 1183 | } 1184 | 1185 | // --------------------- // 1186 | 1187 | // -- auto collection -- // 1188 | 1189 | // --------------------- // 1190 | 1191 | GC::strategies GC::strategy() { return _strategy; } 1192 | void GC::strategy(strategies new_strategy) { _strategy = new_strategy; } 1193 | 1194 | GC::sleep_time_t GC::sleep_time() { return _sleep_time; } 1195 | void GC::sleep_time(sleep_time_t new_sleep_time) { _sleep_time = new_sleep_time; } 1196 | 1197 | void GC::start_timed_collect() 1198 | { 1199 | static struct _ 1200 | { 1201 | _() 1202 | { 1203 | std::thread([] 1204 | { 1205 | // begin sever ties to the default disjunction. 1206 | // this is because we're going to be a detached thread and we don't want to impede object deletion. 1207 | disjoint_module::local_handle() = nullptr; 1208 | 1209 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_HANDLE_LOGGING 1210 | std::cerr << "start timed collect thread: " << std::this_thread::get_id() << '\n'; 1211 | #endif 1212 | 1213 | // try the operation 1214 | try 1215 | { 1216 | // we'll run forever 1217 | while (true) 1218 | { 1219 | // sleep the sleep time 1220 | std::this_thread::sleep_for(sleep_time()); 1221 | 1222 | // if we're using timed strategy 1223 | if ((int)strategy() & (int)strategies::timed) 1224 | { 1225 | // run a collect pass for all the dynamic disjunctions 1226 | disjoint_module_container::get().BACKGROUND_COLLECTOR_ONLY___collect(true); 1227 | } 1228 | // otherwise perform any other relevant logic 1229 | else 1230 | { 1231 | // even if we don't perform a dynamic disjunction collection, we need to cull dangling references 1232 | disjoint_module_container::get().BACKGROUND_COLLECTOR_ONLY___collect(false); 1233 | } 1234 | } 1235 | } 1236 | // if we ever hit an error, something terrible happened 1237 | catch (...) 1238 | { 1239 | // print error message and terminate with a nonzero code 1240 | std::cerr << "CRITICAL ERROR: garbage collection threw an exception\n"; 1241 | std::abort(); 1242 | } 1243 | }).detach(); 1244 | } 1245 | } __; 1246 | } 1247 | -------------------------------------------------------------------------------- /GarbageCollection.sln: -------------------------------------------------------------------------------- 1 |  2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 15 4 | VisualStudioVersion = 15.0.28010.2041 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "GarbageCollection", "GarbageCollection.vcxproj", "{C988FE99-0C66-4013-8C36-E3DB13639AC1}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|x64 = Debug|x64 11 | Debug|x86 = Debug|x86 12 | Release|x64 = Release|x64 13 | Release|x86 = Release|x86 14 | EndGlobalSection 15 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 16 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Debug|x64.ActiveCfg = Debug|x64 17 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Debug|x64.Build.0 = Debug|x64 18 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Debug|x86.ActiveCfg = Debug|Win32 19 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Debug|x86.Build.0 = Debug|Win32 20 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Release|x64.ActiveCfg = Release|x64 21 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Release|x64.Build.0 = Release|x64 22 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Release|x86.ActiveCfg = Release|Win32 23 | {C988FE99-0C66-4013-8C36-E3DB13639AC1}.Release|x86.Build.0 = Release|Win32 24 | EndGlobalSection 25 | GlobalSection(SolutionProperties) = preSolution 26 | HideSolutionNode = FALSE 27 | EndGlobalSection 28 | GlobalSection(ExtensibilityGlobals) = postSolution 29 | SolutionGuid = {3ADA6DE3-9693-41B0-B606-F8393F8A2260} 30 | EndGlobalSection 31 | EndGlobal 32 | -------------------------------------------------------------------------------- /GarbageCollection.vcxproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Debug 6 | Win32 7 | 8 | 9 | Release 10 | Win32 11 | 12 | 13 | Debug 14 | x64 15 | 16 | 17 | Release 18 | x64 19 | 20 | 21 | 22 | 15.0 23 | {C988FE99-0C66-4013-8C36-E3DB13639AC1} 24 | GarbageCollection 25 | 10.0.17134.0 26 | 27 | 28 | 29 | Application 30 | true 31 | v141 32 | MultiByte 33 | 34 | 35 | Application 36 | false 37 | v141 38 | true 39 | MultiByte 40 | 41 | 42 | Application 43 | true 44 | v141 45 | MultiByte 46 | 47 | 48 | Application 49 | false 50 | v141 51 | true 52 | MultiByte 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | Level3 76 | Disabled 77 | true 78 | true 79 | stdcpp17 80 | 81 | 82 | 83 | 84 | Level3 85 | Disabled 86 | true 87 | true 88 | stdcpp17 89 | 90 | 91 | 92 | 93 | Level3 94 | MaxSpeed 95 | true 96 | true 97 | true 98 | true 99 | true 100 | stdcpp17 101 | 102 | 103 | true 104 | true 105 | 106 | 107 | 108 | 109 | Level3 110 | MaxSpeed 111 | true 112 | true 113 | true 114 | true 115 | true 116 | stdcpp17 117 | 118 | 119 | true 120 | true 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | -------------------------------------------------------------------------------- /GarbageCollection.vcxproj.filters: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF} 6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx 7 | 8 | 9 | {93995380-89BD-4b04-88EB-625FBE52EBFB} 10 | h;hh;hpp;hxx;hm;inl;inc;ipp;xsd 11 | 12 | 13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} 14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms 15 | 16 | 17 | 18 | 19 | Source Files 20 | 21 | 22 | Source Files 23 | 24 | 25 | 26 | 27 | Header Files 28 | 29 | 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 dragazo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cpp-gc 2 | 3 | One big complaint I've seen from C++ initiates is that the language doesn't have automatic garbage collection. Although on most days I'd argue that's actually a feature, let's face it: sometimes it's just inconvenient. I mean sure, there are standard C++ utilities to help out, but as soon as cycles are involved even the mighty `std::shared_ptr` ceases to function. You could always refactor your entire data structure to use `std::weak_ptr`, but for anything as or more complicated than a baked potato that means doing all the work manually anyway. 4 | 5 | `cpp-gc` is a **self-managed**, **thread-safe**, **non-blocking** garbage collection library written in **standard C++14**. 6 | 7 | With `cpp-gc` in place, all you'd need to do to fix the above example is change all your `std::shared_ptr` to `GC::ptr`. The rest of the logic will take care of itself automatically *(with one exception - see below)*. 8 | 9 | Contents: 10 | 11 | * [How It Works](#how-it-works) 12 | * [GC Strategy](#gc-strategy) 13 | * [Guarantees](#guarantees) 14 | * [Formal Definitions](#formal-definitions) 15 | * [Router Functions](#router-functions) 16 | * [Built-in Router Functions](#built-in-router-functions) 17 | * [Example Structs and Router Functions](#example-structs-and-router-functions) 18 | * [Disjunctions](#disjunctions) 19 | * [Undefined Behavior](#undefined-behavior) 20 | * [Usage Examples](#usage-examples) 21 | * [Best Practices](#best-practices) 22 | 23 | ## How it Works 24 | 25 | `cpp-gc` is a very powerful library with a lot of built-in features. However, there are only a few things that you *really* need to know about in terms of functions and types: 26 | 27 | * `GC` - Static class containing types and functions that help you **manage your memory conveniently**. 28 | * `GC::ptr` - The shining star of `cpp-gc` - represents an **autonomous garbage-collected pointer**. 29 | * `GC::atomic_ptr` - An **atomic** version of `GC::ptr` that's safe to read/write from several threads. Equivalent to `std::atomic>`. 30 | * `GC::make()` - Constructs a new dynamic object and puts it in gc control. Used like `std::make_shared`. 31 | * `GC::adopt()` - Adopts a pre-existing object into gc control. Like the `T*` constructor of `std::shared_ptr`. 32 | * `GC::alias()` - Aliases a sub-object of an object under gc control. Like the aliasing constructor of `std::shared_ptr` 33 | * `GC::collect()` - Triggers a full garbage collection pass *(see below)*. 34 | 35 | When you allocate an object via `GC::make` or bind a pre-existing object with `GC::adopt` it creates a new garbage-collected object with a reference count of 1. Just like `std::shared_ptr`, it will automatically manage the reference count and delete the object **immediately** when the reference count hits zero *(except in one case - see [Guarantees](#guarantees))*. What does this mean? Well this means if `std::shared_ptr` worked for you before, `GC::ptr` will function almost identically *(though a bit slower due to having extra things to manage)*. 36 | 37 | `GC::collect()` triggers a full garbage collection pass, which accounts for cycles using the typical mark-and-sweep algorithm. This is rather slow compared to the other method `cpp-gc` uses to manage non-cyclic references, but is required if you do in fact have cycles. So when should you call it? Probably never. I'll explain: 38 | 39 | ## GC Strategy 40 | 41 | `cpp-gc` has several "strategy" options for automatically deciding when to perform a full garbage collect pass. This is controlled by a bitfield enum called `GC::strategies`. 42 | 43 | The available strategy options are: 44 | * `manual` - No automatic collection (except non-cyclic dependencies, which are always handled automatically once the reference count hits zero). 45 | * `timed` - Collect from a background thread on a regular basis. 46 | * `allocfail` - Collect every time a call to `GC::make()` or `GC::adopt()` fails to allocate space. 47 | 48 | `GC::strategy()` allows you to read/write the strategy to use. 49 | 50 | `GC::sleep_time()` allows you to change the sleep time duration for timed collection. 51 | 52 | The default strategy is `timed | allocfail`, with the time set to 60 seconds. 53 | 54 | Typically, if you want to use non-default settings, you should set them up as soon as possible on program start and not modify them again. 55 | 56 | ## Guarantees 57 | 58 | The following guarantees are made for all objects under gc control assuming all objects present have `cpp-gc`-compliant router functions (see below) and are not destroyed by external code: 59 | 60 | * Adding an object to gc control (i.e. `GC::make()` or `GC::adopt()`) offers the strong exception guarantee and is O(1). 61 | * Once under gc control, the object shall not be relocated - i.e. raw pointers to said object will never be invalidated. 62 | * The allocating form of gc object insertion (i.e. `GC::make()`) shall allocate a block of memory suitably-aligned for type `T` even if `T` is an over-aligned type. 63 | * Invoking a garbage collection (i.e. `GC::collect()`) while another garbage collection is running in any thread is non-blocking and indeed no-op. 64 | * A reference count shall be maintained for each object under gc control. When this reference count reaches zero the object is immediately deleted unless it is currently under collection consideration by an active call to `GC::collect()`, in which case the object is at least guaranteed to be destroyed before the end of said call to `GC::collect()`. 65 | 66 | Given the same assumptions of objects under gc control, the following (non-)guarantees are made by cpp-gc: 67 | 68 | * The thread that destroys an object under gc control is undefined. If your object requires the same thread that made it to destroy it (e.g. `std::unique_lock`), it should not be used directly by cpp-gc. 69 | 70 | ## Formal Definitions 71 | 72 | **This section is extremely-important. If you plan to use `cpp-gc` in any capacity, read this section in its entirety.** 73 | 74 | A type `T` is defined to be "gc" if it owns an object that is itself considered to be gc. 75 | By definition, `GC::ptr`, `GC::atomic_ptr`, and `std::atomic` are gc types. 76 | Ownership means said "owned" object's lifetime is entirely controlled by the "owner". 77 | An object may only have one owner at any point in time - shared ownership is considered non-owning. 78 | Thus global variables and static member variables are never considered to be owned objects. 79 | At any point in time, the owned object is considered to be part of its owner (i.e. as if it were a by-value member). 80 | 81 | The simplest form of ownership is a by-value member. 82 | Another common category of owned object is a by-value container (e.g. the contents of `std::vector`, `std::list`, `std::set`, etc.). 83 | Another case is a uniquely-owning pointer or reference - e.g. pointed-to object of `std::unique_ptr`, or any other (potentially-smart) pointer/reference to an object you know you own uniquely. 84 | Of course, these cases can be mixed - e.g. a by-value member `std::unique_ptr` which is a uniquely-owning pointer to a by-value container `std::vector` of a gc type `T`. 85 | 86 | It is important to remember that a container type like `std::vector` is a gc type if `T` is a gc type (because it thus contains or "owns" gc objects). 87 | 88 | Object reachability traversals are performed by router functions. 89 | For a gc type `T`, its router functions route a function-like object to its owned gc objects recursively. 90 | Because object ownership cannot be cyclic, this will never degrade into infinite recursion. 91 | 92 | Routing to anything you don't own is undefined behavior. 93 | Routing to the same object twice is likewise undefined behavior. 94 | Thus, although it is legal to route to the "contents" of an owned object (i.e. owned object of an owned object), it is typically dangerous to do so. 95 | In general, you should just route to all your by-value members - it is their responsibility to properly route the message the rest of the way. 96 | Thus, if you own a `std::vector` where `T` is a gc type, you should just route to the vector itself, which has its own router functions to route the message to its contents. 97 | 98 | If you are the owner of a gc type object, it is undefined behavior not to route to it (except in the special case of a mutable router function - see below). 99 | 100 | For a gc type `T`, a "mutable" owned object is defined to be an owned object for which you could legally route to its contents but said contents can be changed after construction. 101 | e.g. `std::vector`, `std::list`, `std::set`, and `std::unique_ptr` are all examples of "mutable" gc types because you own them (and their contents), but their contents can change. 102 | An "immutable" or "normal" owned object is defined as any owned object that is not "mutable". 103 | 104 | More formally, consider an owned object x: 105 | Suppose you examine the set of all objects routed-to through x recursively. 106 | x is a "mutable" owned gc object iff for any two such router invocation sets taken over the lifetime of x the two sets are different. 107 | 108 | It should be noted that re-pointing a `GC::ptr`, `GC::atomic_ptr`, or `std::atomic` is not a mutating action for the purposes of classifying a "mutable" owned gc object. 109 | This is due to the fact that you would never route to its pointed-to contents due to it being a shared resource. 110 | 111 | The following is critical and easily forgotten: 112 | All mutating actions in a mutable gc object (e.g. adding items to a `std::vector>`) must occur in a manner mutually exclusive with the object's router function. 113 | This is because any thread may at any point make routing requests to any number of objects under gc management in any order and for any purpose. 114 | Thus, if you have e.g. a `std::vector>`, you should also have a mutex to guard it on insertion/deletion/reordering of elements and for the router function. 115 | Additionally, if your router function locks one or more mutexes, performing any actions that may self-route (e.g. a full garbage collection via `GC::collect()`) may deadlock if any of the locks are still held when the call is made. 116 | This can be fixed by either unlocking the mutex(es) prior to performing the self-routing action or by switching to a recursive mutex. 117 | This would likely need to be encapsulated by methods of your class to ensure external code can't violate this requirement (it is undefined behavior to violate this). 118 | 119 | On the bright side, `cpp-gc` has wrappers for all standard containers that internally apply all of this logic without the need to remember it. 120 | So, you could use a `GC::vector>` instead of a `std::vector>` and avoid the need to be careful or encapsulate anything. 121 | 122 | The following requirements pertain to router functions: 123 | For a normal router function (i.e. `GC::router_fn`) this must at least route to all owned gc objects. 124 | For a mutable router function (i.e. `GC::mutable_router_fn`) this must at least route to all owned "mutable" gc objects. 125 | The mutable router function system is entirely a method for optimization and can safely be ignored if desired. 126 | 127 | ## Router Functions 128 | 129 | **This section is extremely-important. If you plan to use `cpp-gc` in any capacity, read this section in its entirety.** 130 | 131 | Other languages that implement garbage collection have it built right into the language and the compiler handles all the nasty bits for you. For instance, one piece of information a garbage collector needs to know is the set of all outgoing garbage-collected pointers from an object. 132 | 133 | However, something like this is impossible in C++. After all, C++ allows you to do very low-level things like create a buffer as `alignas(T) char buffer[sizeof(T)]` and then refer to an object that you conditionally may or may not have constructed at any point in runtime via `reinterpret_cast(buffer)`. Garbage-collected languages like Java and C# would curl up into a little ball at the sight of this, but in C++ it's rather common *(in library code)*. 134 | 135 | And so, if you want to create a proper garbage-collected type for use in `cpp-gc` you must do a tiny bit of extra work to specify such things explicitly. So how do you do this? 136 | 137 | The following struct represents the router function set for objects of type `T`. 138 | The `T` in `router` must not be cv-qualified. 139 | Router functions must be static, named "route", return void, and take two args: a reference to (possibly cv-qualified) `T` and a by-value router function object (i.e. `GC::router_fn` or `GC::mutable_router_fn`). 140 | If you don't care about the efficiency mechanism of mutable router functions, you can define the function type as a template type paramter, but it must be deducible. 141 | The default implementation is no-op, which is suitable for any non-gc type. 142 | This should not be used directly for routing to owned objects - use the helper functions `GC::route()` and `GC::route_range()` instead. 143 | 144 | ```cpp 145 | template 146 | struct router 147 | { 148 | template 149 | static void route(const T &obj, F func) {} 150 | }; 151 | ``` 152 | 153 | This is the means by which `cpp-gc` polls your object for outgoing arcs. Here's a short example of how it works - consider the following type definition and its correct `cpp-gc` router specialization: 154 | 155 | ```cpp 156 | struct MyType 157 | { 158 | GC::ptr foo; 159 | GC::ptr bar; 160 | 161 | int some_data; 162 | double some_other_data; 163 | 164 | std::vector flags; 165 | }; 166 | template<> struct GC::router 167 | { 168 | // when we want to route to MyType 169 | template 170 | static void route(const MyType &obj, F func) 171 | { 172 | // also route to its children (only ones that may own GC::ptr objects are required) 173 | GC::route(obj.foo, func); 174 | GC::route(obj.bar, func); 175 | } 176 | }; 177 | ``` 178 | 179 | So here's what's happening: `cpp-gc` will send a message *(the 'func' object)* to your type. Your type will route that to all its children. Recursively, this will eventually reach leaf types *(which have no children)* via `GC::route()` or `GC::route_range()`. Because object ownership cannot be cyclic, this will never degrade into an infinite loop. 180 | 181 | Additionally, you only need to route to children that may own (directly or indirectly) `GC::ptr` objects. Routing to anything else is a glorified no-op that may be slow if your optimizer isn't smart enough to elide it. However, because optimizers are generally pretty clever, you may wish to route to some objects just for future-proofing. Feel free. 182 | 183 | The following section summarizes built-in specializations of `GC::router`: 184 | 185 | ## Built-in Router Functions 186 | 187 | The following types have well-formed `GC::router` specializations pre-defined for your convenience. As mentioned in the above section, you should not route to one of these types and also to its contents, as this would result in routing to the same object twice, which is undefined bahavior. 188 | 189 | * `T[N]` 190 | * `std::array` 191 | * `std::atomic>` 192 | * `std::deque` 193 | * `std::forward_list` 194 | * `std::list` 195 | * `std::map` 196 | * `std::multimap` 197 | * `std::multiset` 198 | * `std::pair` 199 | * `std::priority_queue` 200 | * `std::queue` 201 | * `std::set` 202 | * `std::stack` 203 | * `std::tuple` 204 | * `std::unique_ptr` 205 | * `std::unordered_map` 206 | * `std::unordered_multimap` 207 | * `std::unordered_multiset` 208 | * `std::unordered_set` 209 | * `std::vector` 210 | 211 | Additionally, the equivalent wrappers for the above standard library containers defined in the `GC` class likewise have well-formed router functions (which perform their own synchronization logic - see [Formal Definitions](#formal-definitions). 212 | 213 | The following types have ill-formed `GC::router` specializations pre-defined for safety. This is typically because there is no way to route to said type's contents or to hint that routing to such an object won't have the desired effect. It is a compile error to use any of these, which should help limit confusion on usage. 214 | 215 | * `T[]` 216 | * `std::atomic` (where T is not a `GC::ptr`) 217 | 218 | ## Example Structs and Router Functions 219 | 220 | This section will consist of several examples of possible structs/classes you might write and their accompanying router specializations, including all necessary safety features in the best practices section. Only the relevant pieces of code are included. Everything else is assumed to be exactly the same as normal C++ class writing with no tricks involved. 221 | 222 | This example demonstrates the most common use case: having `GC::ptr` objects by value in the object. Note that this type doesn't contain any "mutable" gc objects, so we can optionally make the mutable router function no-op. 223 | 224 | ```cpp 225 | struct TreeNode 226 | { 227 | // left and right sub-trees 228 | GC::ptr left; 229 | GC::ptr right; 230 | 231 | double value; 232 | 233 | enum op_t { val, add, sub, mul, div } op; 234 | }; 235 | template<> struct GC::router 236 | { 237 | // the "normal" router function 238 | static void route(const TreeNode &node, GC::router_fn func) 239 | { 240 | // route to our GC::ptr instances 241 | GC::route(node.left, func); 242 | GC::route(node.right, func); 243 | // no need to route to anything else 244 | } 245 | // the "mutable" router function 246 | static void route(const TreeNode &node, GC::mutable_router_fn) {} 247 | }; 248 | ``` 249 | 250 | Now we'll use the tree type we just created to make a garbage-collected symbol table. This demonstrates the (less-frequent) use case of having a mutable container of `GC::ptr` objects. This requires special considerations in terms of thread safety, as mentioned in the above section on router functions. 251 | 252 | Note that in this case we have "mutable" gc children. Thus, we'll elect to merge the router functions together by making it a template. 253 | 254 | ```cpp 255 | class SymbolTable 256 | { 257 | private: 258 | std::unordered_map> symbols; 259 | 260 | // we need to route to the contents of symbols, but symbols is a mutable collection. 261 | // we therefore need insert/delete to be synchronous with respect to the router: 262 | mutable std::mutex symbols_mutex; 263 | 264 | // make the particular router class a friend so it can use our private data 265 | friend struct GC::router; 266 | 267 | public: 268 | void update(std::string name, GC::ptr new_value) 269 | { 270 | // modification of the mutable collection of GC::ptr and router must be mutually exclusive 271 | std::lock_guard lock(symbols_mutex); 272 | 273 | symbols[name] = new_value; 274 | } 275 | }; 276 | template<> struct GC::router 277 | { 278 | // serves as both the "normal" and the "mutable" router function 279 | template 280 | static void route(const SymbolTable &table, F func) 281 | { 282 | // modification of the mutable collection of GC::ptr and router must be mutually exclusive 283 | std::lock_guard lock(table.symbols_mutex); 284 | 285 | GC::route(table.symbols, func); 286 | } 287 | }; 288 | ``` 289 | 290 | Of course, the above seems rather bloated. Having to wrap all that mutex logic is super annoying. Fortunately, *(mentioned above)* `cpp-gc` defines wrappers for all the standard library containers that apply this mutex logic internally and offer the same interface to the user. 291 | 292 | So we could *(and should)* have written the above as this: 293 | 294 | ```cpp 295 | class SymbolTable 296 | { 297 | public: 298 | 299 | // like std::unordered_map but performs the router mutex logic internally 300 | GC::unordered_map> symbols; 301 | 302 | void update(std::string name, GC::ptr new_value) 303 | { 304 | symbols[name] = new_value; 305 | } 306 | }; 307 | template<> struct GC::router 308 | { 309 | // serves as both the "normal" and the "mutable" router function 310 | template 311 | static void route(const SymbolTable &table, F func) 312 | { 313 | GC::route(table.symbols, func); 314 | } 315 | }; 316 | 317 | Note that even though we used the `cpp-gc` wrapper type `GC::unordered_map` it's still considered a "mutable" gc object, and thus we still need to route to it in the mutable router. All it does for us is make usage easier and cleaner by not having to explicitly lock mutexes. 318 | 319 | The next example demonstrates a rather uncommon use case: having a memory buffer that may at any time either contain nothing or a constructed object of a type we need to route to. This requires a bit more effort on your part. This is uncommon because said buffer could just be replaced with a `GC::ptr` object to avoid the headache, with null being the no-object state. Never-the-less, it is shown here as an example in case you need such behavior. 320 | 321 | ```cpp 322 | class MaybeTreeNode 323 | { 324 | private: 325 | // the buffer for the object 326 | alignas(TreeNode) char buf[sizeof(TreeNode)]; 327 | bool contains_tree_node = false; 328 | 329 | // because we'll be constructing destucting it on the fly, 330 | // buf is a mutable container of a type we need to route to. 331 | // thus we need to synchronize "re-pointing" it and the router function. 332 | mutable std::mutex buf_mutex; 333 | 334 | // friend the particular router class so it can access our private data 335 | friend struct GC::router; 336 | 337 | public: 338 | void construct() 339 | { 340 | if (contains_tree_node) throw std::runtime_error("baka"); 341 | 342 | // we need to synchronize with the router 343 | std::lock_guard lock(buf_mutex); 344 | 345 | // construct the object 346 | new (buf) TreeNode; 347 | contains_tree_node = true; 348 | } 349 | void destruct() 350 | { 351 | if (!contains_tree_node) throw std::runtime_error("baka"); 352 | 353 | // we need to synchronize with the router 354 | std::lock_guard lock(buf_mutex); 355 | 356 | // destroy the object 357 | reinterpret_cast(buf)->~TreeNode(); 358 | contains_tree_node = false; 359 | } 360 | }; 361 | template<> struct GC::router 362 | { 363 | // serves as both the "normal" and "mutable" router functions via templating 364 | template 365 | static void route(const MaybeTreeNode &maybe, F func) 366 | { 367 | // this must be synchronized with constructing/destucting the buffer object. 368 | std::lock_guard lock(maybe.buf_mutex); 369 | 370 | // because TreeNode contains GC::ptr objects, we need to route to it, 371 | // but only if the MaybeTreeNode actually has it constructed in the buffer. 372 | if (maybe.contains_tree_node) GC::route(*reinterpret_cast(maybe.buf), func); 373 | } 374 | }; 375 | ``` 376 | 377 | ## Disjunctions 378 | 379 | One problem with having garbage collection in a multithreaded environment - at least in a non-blocking manner like `cpp-gc` uses - is that a centralized in-memory database of gc objects, roots, etc. needs to be accessed frequently and potentially by several threads. If for one reason or another your program makes calls to such utilities in rapid succession from several threads simultaneously it can seriously hurt performance due to all the mutex locking. However, `cpp-gc` has a feature specifically-designed to remedy this. 380 | 381 | The centralized gc database mentioned above can actually be split into several disjoint systems. The primary thread of program execution (the one that first calls `main()`) is assigned to the primary disjunction. Upon creation of a new thread, be it a `pthread`, `std::thread`, or anything else, said thread is assigned to a single disjunction, which can never be changed again during the thread's lifetime. 382 | 383 | The name "disjunction" or "disjoint" is meaningful and **very important: two threads can share gc objects if and only if they are in the same disjunction**. Violating this restriction is undefined behavior, and can easily lead to undefined memory accesses, segmentation faults, or access violations. Thankfully, many cases of violating these constraints are safely-checked at runtime, in which case they result in an exception of type `GC::disjunction_error` instead of invoking undefined behavior. However it is still possible to violate these restrictions through raw pointers, references, or global variables. So if you use disjunctions you had better be very cautious about using shared memory. 384 | 385 | By default all threads created are assigned to the primary disjunction. The wrapper class `GC::thread` has an identical interface to `std::thread` except the constructor, which takes an extra first parameter whose type determines what disjunction to put the new thread in. These options are: `GC::primary_disjunction_t` which puts the new thread in the primary disjunction, `GC::inherit_disjunction_t` which puts the new thread in the same disjunction as the calling thread, or `GC::new_disjunction_t` which puts the new thread in a new disjunction. 386 | 387 | If you don't want to bother with the complexity of the disjunction system, just pretend it doesn't exist. The default behavior of putting all threads in the primary disjunction will never cause errors - at worst it will just be slower in threaded contexts, depending on how prolifically you use `cpp-gc` in said threads. Even if you do use disjunctions, I would only recommend using them for separating specific threads that you know have a high degree of contention for accessing the gc system. 388 | 389 | Additionally, keep in mind there is some overhead associated with creating and destroying disjunctions. Short-lived new disjunctions might actually result in poorer performance than just using the primary or inherit options. If you decide to make a new disjunction I suggest you do a before/after speed test and make sure you're not shooting yourself in the foot with a combination of poorer performance and more inter-thread restrictions. 390 | 391 | ## Undefined Behavior 392 | 393 | In this section, we'll cover all the cases that result in undefined behavior and summarize the logic behind these decisions. 394 | 395 | * Dereferencing/Indexing a null `GC::ptr` - obvious. 396 | * Not routing to all your owned objects - messes up reachability traversal and can result in prematurely-deleted objects. 397 | * Routing to the same object twice during the same router event - depending on what the router event is trying to do, this could cause all sorts of problems. 398 | * Not making your router function mutually explusive with re-pointing or adding/removing/etc things you would route to - explained in immense detail above. 399 | * Accessing the pointed-to object of a `ptr` in the destructor of its (potentially-indirect) owner - you and the pointed-to object might have been scheduled for garbage collection together and the order of destruction by the garbage collector is undefined, so it might already have been destroyed. 400 | * Using `GC::adopt(T *obj)` where obj is not an instance of `T` - e.g. obj must not be pointer to base. This is because `GC::adopt()` needs the true type of the object to get its router functions. Thus if `T` is not the true type it would be using the wrong router functions and result in undefined behavior. 401 | * Accessing an object in disjunction A from a thread in disjunction B - this is explained in detail in the above section on disjunctions - due to the way in which disjunctions are managed, they must not share objects. 402 | 403 | ## Usage Examples 404 | 405 | For our example, we'll make a doubly-linked list that supports garbage collection. We'll begin by defining our node type `ListNode`. 406 | 407 | ```cpp 408 | struct ListNode 409 | { 410 | GC::ptr prev; 411 | GC::ptr next; 412 | 413 | // show a message that says we called ctor 414 | ListNode() { std::cerr << "i'm alive!!\n"; } 415 | 416 | // show a message that says we called dtor 417 | ~ListNode() { std::cerr << "i died!!\n"; } 418 | }; 419 | ``` 420 | 421 | Because this type owns `GC::ptr` instances, we need to specialize `GC::router` for our type. 422 | 423 | ```cpp 424 | template<> struct GC::router 425 | { 426 | // "normal" router function 427 | static void route(const ListNode &node, router_fn func) 428 | { 429 | // call GC::route() for each of our GC::ptr values 430 | GC::route(node.prev, func); 431 | GC::route(node.next, func); 432 | } 433 | // "mutable" router function 434 | static void route(const ListNode &node, mutable_router_fn) 435 | { 436 | // we don't have any mutable children, so this can be no-op 437 | } 438 | }; 439 | ``` 440 | 441 | That's all the setup we need - from here on it's smooth sailing. Let's construct the doubly-linked list. 442 | 443 | ```cpp 444 | // creates a linked list that thus contains cycles 445 | void foo() 446 | { 447 | // create the first node 448 | GC::ptr root = GC::make(); 449 | 450 | // we'll make 10 links in the chain 451 | GC::ptr *prev = &root; 452 | for (int i = 0; i < 10; ++i) 453 | { 454 | (*prev)->next = GC::make(); 455 | (*prev)->next->prev = *prev; 456 | 457 | prev = &(*prev)->next; 458 | } 459 | } 460 | ``` 461 | 462 | If you run this, you'll find none of the objects are deallocated immediately. This is because we created a bunch of cycles due to making a *doubly*-linked list. As stated above, `cpp-gc` cleans this up via `GC::collect()`. Let's do that now: 463 | 464 | ```cpp 465 | // the function that called foo() 466 | void bar() 467 | { 468 | // we need to call foo() 469 | foo(); 470 | 471 | std::cerr << "\n\ncalling collect():\n\n"; 472 | 473 | // the default collect settings will clean this up automatically after a while 474 | // but let's say we need this to happen immediately for some reason... 475 | GC::collect(); 476 | } 477 | ``` 478 | 479 | As soon as `GC::collect()` is executed, all the cycles will be dealt with and you should get a lot of messages from destructors. It's that easy. But remember, the default auto-collect strategy is time based. You typically won't ever need to call `GC::collect()` explicitly. 480 | 481 | ## Best Practices 482 | 483 | Here we'll go through a couple of tips for getting the maximum performance out of `cpp-gc` while minimizing the chance for error: 484 | 485 | 1. **Performance** - Don't call `GC::collect()` explicitly. If you start calling `GC::collect()` explicitly, there's a pretty good chance you could be calling it in rapid succession. This will do little more than cripple your performance. The only time you should ever call it explicitly is if you for some reason **need** the objects to be destroyed immediately *(which is unlikely)*. 486 | 487 | 1. **Performance** - When possible, use raw pointers. Let's say you have a `GC::ptr>` that you need to pass to a function. Does the function really need to **own** the value or does it just need access to it? In the vast majority of cases, you'll find you only need access to the object. In these cases, you're much better off performance-wise to have the function take a raw pointer instead. This also has the effect of being less restrictive (i.e. you don't need to pass the object as a specific type of smart pointer). *(this same rule applies to other smart pointers like `std::shared_ptr` as well)*. 488 | 489 | 1. **Performance** - If you find you only use a `GC::ptr` instance to point to another object for normal pointer logic (and if you know that reference isn't isn't the only reference to said object) you should use `GC::ptr*` or `GC::ptr&` instead. This still lets you refer to the `GC::ptr` object (and what it points to) but doesn't require unnecessary increments/decrements on each and every assignment to/from it. This is demonstrated in the example above, where the end-of-list pointer was a raw pointer to a gc pointer. 490 | 491 | 1. **Performance** - Whenever possible (and reasonable - read on), gc allocate objects together. There's a significant spatial overhead associated with each gc allocation (around 8 pointers' worth per allocation). Thus if you need e.g. 1024 dynamic objects, instead of making 1024 allocations, it might be beneficial to allocate an array of 1024 objects and then alias them from the array individually. This can potentially save a lot of space. The downside of course is that they all alias the same array, so none of the objects in the array (the array itself, really) will be deleted while any of the aliases is still reachable. Another common case: if you need a dynamic `T` and a dynamic `U`, gc allocate e.g. `std::pair` and alias the components. If the objects are related and you know the aliasing problem isn't going to be an issue, I suggest you batch-allocate. 492 | 493 | 1. **Safety** - As mentioned in the section on router functions, if your type owns an object that you would route to but that can be re-pointed or modified in some way (e.g. `std::vector>`, `std::unique_ptr>` etc.), re-pointing or adding/removing etc. must be atomic with respect to the router function routing to its contents. Because of this, you'll generally need to use a mutex to synchronize access to the object's contents. To make sure no one else messes up this safety, such an object should be made private and given atomic accessors if necessary. 494 | 495 | 1. **Safety** - Continuing off the last point, do not create a lone (i.e. not in a struct/class) `GC::ptr` where `T` is a mutable container of `GC::ptr`. Modifying a lone instance of it could be rather dangerous due to not having a built-in mutex and encupsulation with atomic accessors. 496 | 497 | 1. **Safety** - If you're unsure if an owned object is "mutable" or not, assume it is. A false positive is slightly inefficient, but a false negative is undefined behavior. 498 | -------------------------------------------------------------------------------- /makefile: -------------------------------------------------------------------------------- 1 | test-opt: 2 | g++ -o test.exe -Wall -std=c++17 -pthread *.cpp -O4 3 | 4 | test: 5 | g++ -o test.exe -Wall -std=c++17 -pthread *.cpp -O0 6 | 7 | sanitize-opt: 8 | clang++ -o test.exe -Wall -std=c++17 -pthread *.cpp -O3 -fsanitize=undefined -fsanitize=address 9 | 10 | sanitize: 11 | clang++ -o test.exe -Wall -std=c++17 -pthread *.cpp -O0 -fsanitize=undefined -fsanitize=address 12 | 13 | clean: 14 | rm -rf *.o 15 | rm -f *.exe 16 | -------------------------------------------------------------------------------- /test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include "GarbageCollection.h" 15 | 16 | 17 | std::mutex __io_mutex; 18 | // prints stuff to cerr in a thread-safe fashion 19 | template 20 | void sync_err_print(Args &&...args) 21 | { 22 | std::lock_guard io_lock(__io_mutex); 23 | (std::cout << ... << std::forward(args)); 24 | } 25 | 26 | 27 | struct alignas(16) sse_t { char d[16]; }; 28 | 29 | struct ptr_set 30 | { 31 | GC::ptr a, b, c, d, e, f, g, h; 32 | int val; 33 | 34 | std::shared_ptr thingy; 35 | 36 | std::tuple, int> tuple_thing; 37 | 38 | GC::ptr> mapydoo1; 39 | GC::ptr> mapydoo2; 40 | 41 | GC::ptr doodle_dud_0; 42 | GC::ptr doodle_dud_1; 43 | 44 | ptr_set() : val(0) 45 | { 46 | auto ptr = new std::unordered_map; 47 | ptr->emplace("hello", 67); 48 | ptr->emplace("world", 4765); 49 | 50 | mapydoo1 = GC::adopt(ptr); 51 | mapydoo2 = GC::adopt>(nullptr); 52 | 53 | doodle_dud_0 = GC::adopt(new int[1024], 1024); 54 | doodle_dud_0 = GC::adopt(new int[2048], 2048); 55 | } 56 | }; 57 | template<> struct GC::router 58 | { 59 | static void route(const ptr_set &set, GC::router_fn func) 60 | { 61 | GC::route(set.a, func); 62 | GC::route(set.b, func); 63 | GC::route(set.c, func); 64 | GC::route(set.d, func); 65 | GC::route(set.e, func); 66 | GC::route(set.f, func); 67 | GC::route(set.g, func); 68 | GC::route(set.h, func); 69 | 70 | GC::route(set.val, func); 71 | 72 | GC::route(set.thingy, func); 73 | 74 | GC::route(set.tuple_thing, func); 75 | 76 | GC::route(set.mapydoo1, func); 77 | GC::route(set.mapydoo2, func); 78 | 79 | GC::route(set.doodle_dud_0, func); 80 | GC::route(set.doodle_dud_1, func); 81 | 82 | //GC::route(set.a, set.a); 83 | } 84 | static void route(const ptr_set &set, GC::mutable_router_fn func) {} // no mutable things to route to 85 | }; 86 | 87 | struct ListNode 88 | { 89 | GC::ptr prev; 90 | GC::ptr next; 91 | 92 | ptr_set set; 93 | 94 | // show a message that says we called ctor 95 | ListNode() { std::this_thread::sleep_for(std::chrono::microseconds(4)); } 96 | 97 | // show a message that says we called dtor 98 | ~ListNode() { std::this_thread::sleep_for(std::chrono::microseconds(4)); } 99 | }; 100 | template<> struct GC::router 101 | { 102 | static void route(const ListNode &node, GC::router_fn func) 103 | { 104 | GC::route(node.prev, func); 105 | GC::route(node.next, func); 106 | 107 | GC::route(node.set, func); 108 | } 109 | static void route(const ListNode &node, GC::mutable_router_fn func) // this is correct 110 | //static void route(ListNode node, GC::mutable_router_fn func) // this is wrong - by-value T could cause deadlocking 111 | { 112 | // only routing to set for future-proofing 113 | GC::route(node.set, func); 114 | } 115 | }; 116 | 117 | // creates a linked list that has a cycle 118 | void foo() 119 | { 120 | { 121 | //GC::collect(); 122 | 123 | // create the first node 124 | GC::ptr root = GC::make(); 125 | 126 | // we'll make 10 links in the chain 127 | GC::ptr *prev = &root; 128 | for (int i = 0; i < 10; ++i) 129 | { 130 | (*prev)->next = GC::make(); 131 | (*prev)->next->prev = *prev; 132 | 133 | prev = &(*prev)->next; 134 | } 135 | 136 | // then we'll merege the ends into a cycle 137 | root->prev = *prev; 138 | (*prev)->next = root; 139 | 140 | using std::swap; 141 | 142 | root.swap(*prev); 143 | swap(root, *prev); 144 | 145 | //GC::collect(); 146 | //std::cerr << "\n\n"; 147 | } 148 | } 149 | 150 | template 151 | struct wrap 152 | { 153 | GC::ptr ptr; 154 | }; 155 | template struct GC::router> 156 | { 157 | template static void route(const wrap &w, F fn) 158 | { 159 | GC::route(w.ptr, fn); 160 | } 161 | }; 162 | 163 | struct virtual_type 164 | { 165 | virtual ~virtual_type() {} 166 | }; 167 | struct non_virtual_type 168 | { 169 | 170 | }; 171 | 172 | struct base1 173 | { 174 | int a; 175 | 176 | virtual ~base1() {} 177 | }; 178 | struct base2 179 | { 180 | int b; 181 | 182 | virtual ~base2() {} 183 | }; 184 | struct derived : base1, base2 185 | { 186 | 187 | }; 188 | 189 | 190 | 191 | 192 | struct TreeNode 193 | { 194 | // left and right sub-trees 195 | GC::ptr left; 196 | GC::ptr right; 197 | 198 | double value; 199 | 200 | enum op_t { val, add, sub, mul, div } op; 201 | }; 202 | template<> struct GC::router 203 | { 204 | template static void route(const TreeNode &node, F func) 205 | { 206 | // route to our GC::ptr instances 207 | GC::route(node.left, func); 208 | GC::route(node.right, func); 209 | // no need to route to anything else 210 | } 211 | }; 212 | 213 | 214 | class SymbolTable 215 | { 216 | private: 217 | std::unordered_map> symbols; 218 | 219 | // we need to route to the contents of symbols, but symbols is a mutable collection. 220 | // we therefore need insert/delete to be synchronous with respect to the router: 221 | mutable std::mutex symbols_mutex; 222 | 223 | // make the particular router class a friend so it can use our private data 224 | friend struct GC::router; 225 | 226 | public: 227 | 228 | // wrapped symbols table type that is safe to modify directly 229 | GC::unordered_map> better_symbols; 230 | 231 | public: 232 | 233 | SymbolTable() = default; 234 | 235 | SymbolTable(const SymbolTable &other) : symbols(other.symbols), better_symbols(other.better_symbols) {} 236 | 237 | SymbolTable &operator=(const SymbolTable &other) 238 | { 239 | if (this != &other) 240 | { 241 | { 242 | std::lock_guard lock(symbols_mutex); 243 | symbols = other.symbols; 244 | } 245 | better_symbols = other.better_symbols; 246 | } 247 | return *this; 248 | } 249 | SymbolTable &operator=(SymbolTable &&other) 250 | { 251 | if (this != &other) 252 | { 253 | { 254 | std::scoped_lock locks(symbols_mutex, other.symbols_mutex); 255 | symbols = std::move(other.symbols); 256 | } 257 | better_symbols = std::move(other.better_symbols); 258 | } 259 | return *this; 260 | } 261 | 262 | public: 263 | void update(std::string name, GC::ptr new_value) 264 | { 265 | { 266 | // modification of the mutable collection of GC::ptr and router must be mutually exclusive 267 | std::lock_guard lock(symbols_mutex); 268 | symbols[name] = new_value; 269 | } 270 | } 271 | void clear() 272 | { 273 | { 274 | std::lock_guard lock(symbols_mutex); 275 | symbols.clear(); 276 | } 277 | better_symbols.clear(); 278 | } 279 | }; 280 | template<> struct GC::router 281 | { 282 | template static void route(const SymbolTable &table, F func) 283 | { 284 | { 285 | // modification of the mutable collection of GC::ptr and router must be mutually exclusive 286 | std::lock_guard lock(table.symbols_mutex); 287 | GC::route(table.symbols, func); 288 | } 289 | GC::route(table.better_symbols, func); 290 | } 291 | }; 292 | 293 | 294 | 295 | 296 | class MaybeTreeNode 297 | { 298 | private: 299 | // the buffer for the object 300 | alignas(TreeNode) char buf[sizeof(TreeNode)]; 301 | bool contains_tree_node = false; 302 | 303 | // because we'll be constructing destucting it on the fly, 304 | // buf is a mutable container of a type we need to route to. 305 | // thus we need to synchronize "re-pointing" it and the router function. 306 | mutable std::mutex buf_mutex; 307 | 308 | // friend the particular router class so it can access our private data 309 | friend struct GC::router; 310 | 311 | public: 312 | void construct() 313 | { 314 | if (contains_tree_node) throw std::runtime_error("baka"); 315 | 316 | // we need to synchronize with the router 317 | std::lock_guard lock(buf_mutex); 318 | 319 | // construct the object 320 | new (buf) TreeNode; 321 | contains_tree_node = true; 322 | } 323 | void destruct() 324 | { 325 | if (!contains_tree_node) throw std::runtime_error("baka"); 326 | 327 | // we need to synchronize with the router 328 | std::lock_guard lock(buf_mutex); 329 | 330 | // destroy the object 331 | reinterpret_cast(buf)->~TreeNode(); 332 | contains_tree_node = false; 333 | } 334 | }; 335 | template<> struct GC::router 336 | { 337 | template static void route(const MaybeTreeNode &maybe, F func) 338 | { 339 | // this must be synchronized with constructing/destucting the buffer object. 340 | std::lock_guard lock(maybe.buf_mutex); 341 | 342 | // because TreeNode contains GC::ptr objects, we need to route to it, 343 | // but only if the MaybeTreeNode actually has it constructed in the buffer. 344 | if (maybe.contains_tree_node) GC::route(*reinterpret_cast(maybe.buf), func); 345 | } 346 | }; 347 | 348 | 349 | #define COMMA , 350 | 351 | 352 | struct alert_t 353 | { 354 | alert_t() { std::cerr << "ctor\n"; } 355 | ~alert_t() { std::cerr << "dtor\n"; } 356 | }; 357 | 358 | 359 | 360 | 361 | 362 | struct atomic_container 363 | { 364 | GC::atomic_ptr atomic_1; 365 | GC::atomic_ptr atomic_2; 366 | GC::atomic_ptr atomic_3; 367 | GC::atomic_ptr atomic_4; 368 | }; 369 | template<> 370 | struct GC::router 371 | { 372 | template static void route(const atomic_container &atomic, F func) 373 | { 374 | GC::route(atomic.atomic_1, func); 375 | GC::route(atomic.atomic_2, func); 376 | GC::route(atomic.atomic_3, func); 377 | GC::route(atomic.atomic_4, func); 378 | } 379 | }; 380 | 381 | 382 | GC::vector> global_vec_ptr; 383 | GC::atomic_ptr global_atomic_ptr; 384 | 385 | thread_local GC::vector> thread_local_vec_ptr; 386 | 387 | template 388 | std::string tostr(T &&v) 389 | { 390 | std::ostringstream ostr; 391 | ostr << std::forward(v); 392 | return ostr.str(); 393 | } 394 | 395 | 396 | 397 | struct self_ptr 398 | { 399 | std::unique_ptr> p; 400 | 401 | self_ptr() { std::cerr << "self_ptr ctor\n"; } 402 | ~self_ptr() { std::cerr << "self_ptr dtor\n"; } 403 | }; 404 | template<> 405 | struct GC::router 406 | { 407 | template 408 | static void route(const self_ptr &p, F func) 409 | { 410 | // we should really be using a mutex here for the std::unique_ptr but this is just a test and i know it's safe ... 411 | GC::route(p.p, func); 412 | } 413 | }; 414 | 415 | struct gc_self_ptr 416 | { 417 | GC::unique_ptr> p; 418 | 419 | gc_self_ptr() { std::cerr << "gc_self_ptr ctor\n"; } 420 | ~gc_self_ptr() { std::cerr << "gc_self_ptr dtor\n"; } 421 | }; 422 | template<> 423 | struct GC::router 424 | { 425 | template 426 | static void route(const gc_self_ptr &p, F func) 427 | { 428 | GC::route(p.p, func); 429 | } 430 | }; 431 | 432 | 433 | struct bool_alerter 434 | { 435 | std::atomic &flag; 436 | 437 | explicit bool_alerter(std::atomic &d) : flag(d) { flag = false; } 438 | ~bool_alerter() { flag = true; } 439 | }; 440 | 441 | struct bool_alerter_self_ptr 442 | { 443 | bool_alerter alerter; 444 | GC::ptr self_p; 445 | 446 | explicit bool_alerter_self_ptr(std::atomic &d) : alerter(d) {} 447 | }; 448 | template<> 449 | struct GC::router< bool_alerter_self_ptr> 450 | { 451 | template 452 | static void route(const bool_alerter_self_ptr &obj, F func) 453 | { 454 | GC::route(obj.self_p, func); 455 | } 456 | }; 457 | 458 | // runs statement and asserts that it throws the right type of exception 459 | #define assert_throws(statement, exception) \ 460 | try { statement; std::cerr << "did not throw\n"; assert(false); } \ 461 | catch (const exception&) {} \ 462 | catch (...) { std::cerr << "threw wrong type\n"; assert(false); } 463 | 464 | // runs statement and asserts that it throws (anything) 465 | #define assert_throws_any(statement) \ 466 | try { statement; std::cerr << "did no throw\n"; assert(false); } \ 467 | catch (...) {} 468 | 469 | // runs statement and asserts that it throws (anything) 470 | #define assert_throws_disjunction(statement) \ 471 | try { statement; std::cerr << "did no throw\n"; assert(false); } \ 472 | catch (const GC::disjunction_error&) {} \ 473 | catch (...) { std::cerr << "threw wrong type\n"; assert(false); } 474 | 475 | // runs statement and asserts that it doesn't throw (anything) 476 | #define assert_nothrow(statement) \ 477 | try { statement; } \ 478 | catch (...) { std::cerr << "threw an exception\n"; assert(false); } 479 | 480 | 481 | 482 | // wraps type T but doesn't allow copy/move 483 | template 484 | class stationary 485 | { 486 | public: 487 | 488 | T value; 489 | 490 | template 491 | stationary(Args &&...args) : value(std::forward(args)...) {} 492 | 493 | stationary(const stationary&) = delete; 494 | stationary &operator=(const stationary&) = delete; 495 | }; 496 | template 497 | struct GC::router> 498 | { 499 | static constexpr bool is_trivial = GC::has_trivial_router::value; 500 | 501 | template 502 | static void route(const stationary &stat, F func) { GC::route(stat.value, func); } 503 | }; 504 | 505 | 506 | 507 | 508 | 509 | 510 | void spooky_scary_dont_do_this() 511 | { 512 | GC::collect(); 513 | #if DRAGAZO_GARBAGE_COLLECT_USE_IGNORE_COLLECT_IN_WRAPPERS 514 | std::thread([] { GC::collect(); }).join(); 515 | #endif 516 | } 517 | 518 | struct ctor_collect_t 519 | { 520 | ctor_collect_t() { spooky_scary_dont_do_this(); } 521 | }; 522 | template<> 523 | struct GC::router 524 | { 525 | template 526 | static void route(const ctor_collect_t &v, F f) {} 527 | }; 528 | 529 | struct dtor_collect_t 530 | { 531 | ~dtor_collect_t() { spooky_scary_dont_do_this(); } 532 | }; 533 | template<> 534 | struct GC::router 535 | { 536 | template 537 | static void route(const dtor_collect_t &v, F f) {} 538 | }; 539 | 540 | struct ctor_dtor_collect_t 541 | { 542 | ctor_dtor_collect_t() { spooky_scary_dont_do_this(); } 543 | ~ctor_dtor_collect_t() { spooky_scary_dont_do_this(); } 544 | }; 545 | template<> 546 | struct GC::router 547 | { 548 | template 549 | static void route(const ctor_dtor_collect_t &v, F f) {} 550 | }; 551 | 552 | 553 | struct cpy_mov_intrin 554 | { 555 | bool src = false; 556 | 557 | cpy_mov_intrin() { std::cerr << "ctor\n"; } 558 | ~cpy_mov_intrin() { std::cerr << (src ? "SRC dtor\n" : "dtor\n"); } 559 | 560 | cpy_mov_intrin(const cpy_mov_intrin &other) { std::cerr << "cpy ctor\n"; } 561 | cpy_mov_intrin(cpy_mov_intrin &&other) { std::cerr << "mov ctor\n"; } 562 | 563 | cpy_mov_intrin &operator=(const cpy_mov_intrin &other) { std::cerr << "cpy asgn\n"; return *this; } 564 | cpy_mov_intrin &operator=(cpy_mov_intrin &&other) { std::cerr << "mov asgn\n"; return *this; } 565 | }; 566 | void intrin_printer(cpy_mov_intrin, cpy_mov_intrin) 567 | { 568 | std::cerr << "in printer\n"; 569 | } 570 | 571 | void vector_printer(std::vector vec) 572 | { 573 | for (int i : vec) std::cerr << i << ' '; 574 | std::cerr << '\n'; 575 | } 576 | 577 | 578 | void print_str(const GC::ptr &p) 579 | { 580 | if (p) std::cerr << *p << '\n'; 581 | } 582 | 583 | 584 | struct access_gc_at_ctor_t 585 | { 586 | GC::ptr p; 587 | 588 | access_gc_at_ctor_t() { std::cerr << "@@@@ ctor gc accessor\n"; } 589 | ~access_gc_at_ctor_t() { std::cerr << "@@@@ dtor gc accessor\n"; GC::collect(); std::cerr << " -- safe\n"; } 590 | }; 591 | template<> 592 | struct GC::router 593 | { 594 | template 595 | static void route(const access_gc_at_ctor_t &obj, F func) 596 | { 597 | GC::route(obj.p, func); 598 | } 599 | }; 600 | GC::ptr access_gc_at_ctor; 601 | 602 | 603 | struct router_allocator 604 | { 605 | mutable GC::ptr p; 606 | GC::ptr self; 607 | }; 608 | template<> 609 | struct GC::router 610 | { 611 | template 612 | static void route(const router_allocator &r, F func) 613 | { 614 | // simulate router locking a mutex and a mutator in the class locking and allocating 615 | r.p = GC::make(45); 616 | 617 | GC::route(r.p, func); 618 | GC::route(r.self, func); 619 | } 620 | }; 621 | 622 | struct thread_func_t 623 | { 624 | int v; 625 | void foo() {} 626 | }; 627 | 628 | 629 | // begins a timer in a new scope - requires a matching timer end point. 630 | #define TIMER_BEGIN() { const auto __timer_begin = std::chrono::high_resolution_clock::now(); 631 | // ends the timer in the current scope - should not be used if there is no timer in the current scope. 632 | // units is the (unquoted) name of a std::chrono time unit. name is the name of the timer (a c-style string). 633 | #define TIMER_END(units, name) const auto __timer_end = std::chrono::high_resolution_clock::now(); \ 634 | std::cerr << "\ntimer elapsed - " name ": " << std::chrono::duration_cast(__timer_end - __timer_begin).count() << " " #units "\n"; } 635 | 636 | // represents a trivial gc type - used for forcing wrapped type conversions 637 | struct gc_t {}; 638 | template<> struct GC::router { static void route(const gc_t&) {} }; 639 | constexpr bool operator==(const gc_t&, const gc_t&) { return true; } 640 | constexpr bool operator!=(const gc_t&, const gc_t&) { return true; } 641 | constexpr bool operator<(const gc_t&, const gc_t&) { return true; } 642 | constexpr bool operator<=(const gc_t&, const gc_t&) { return true; } 643 | constexpr bool operator>(const gc_t&, const gc_t&) { return true; } 644 | constexpr bool operator>=(const gc_t&, const gc_t&) { return true; } 645 | 646 | int main() try 647 | { 648 | std::cerr << "\nstart main: " << std::this_thread::get_id() << "\n\n"; 649 | struct _end_logger_t 650 | { 651 | ~_end_logger_t() { std::cerr << "\nend main: " << std::this_thread::get_id() << "\n\n"; } 652 | } _end_logger; 653 | 654 | TIMER_BEGIN(); 655 | 656 | // -- tests that require no background collector to work -- // 657 | 658 | // these tests make the assumption that no other thread is currently performing a collection action. 659 | // these aren't considered binding tests - i.e. these test conditions aren't actually defined to work properly. 660 | // they're merely to make sure that, given no interference, the desired behavior is taking effect. 661 | 662 | GC::strategy(GC::strategies::manual); 663 | GC::sleep_time(std::chrono::milliseconds(1)); 664 | 665 | // make sure that ref count decrements to zero result in the object being deleted. 666 | // additionally, make sure that if a collection is happening in another thread the ref count guarantee is satisfied. 667 | // i.e. if the ref count decs to zero during a collection the object will be destroyed at least by the end of collection. 668 | // this is in the no-background collect section so we can join the parallel collector thread before the assertion. 669 | { 670 | std::atomic flag; 671 | for (int i = 0; i < 4096; ++i) 672 | { 673 | std::thread test_thread([]() 674 | { 675 | try { GC::collect(); } 676 | catch (...) { std::cerr << "\n\nFLAG TESTER EXCEPTION!!\n\n"; assert(false); } 677 | }); 678 | 679 | { 680 | GC::ptr a = GC::make(flag); 681 | } 682 | 683 | test_thread.join(); 684 | assert(flag); 685 | } 686 | } 687 | 688 | // -- all other tests -- // 689 | 690 | GC::strategy(GC::strategies::timed); 691 | 692 | GC::thread(GC::new_disjunction, [] { 693 | GC::ptr p_router_allocator = GC::make(); 694 | p_router_allocator->self = p_router_allocator; 695 | 696 | std::this_thread::sleep_for(std::chrono::seconds(2)); 697 | }).detach(); 698 | 699 | access_gc_at_ctor = GC::make(); 700 | access_gc_at_ctor->p = access_gc_at_ctor; 701 | 702 | { 703 | thread_func_t tt; 704 | 705 | std::invoke(&thread_func_t::foo, tt); 706 | 707 | std::thread(&thread_func_t::foo, tt).join(); 708 | GC::thread(GC::inherit_disjunction, &thread_func_t::foo, tt).join(); 709 | } 710 | 711 | #if DRAGAZO_GARBAGE_COLLECT_DISJUNCTION_SAFETY_CHECKS 712 | 713 | GC::thread(GC::new_disjunction, [] 714 | { 715 | try 716 | { 717 | std::cerr << "\nstarting disjunction exception checks\n"; 718 | 719 | auto ptr_a = GC::make(); 720 | auto ptr_b = GC::make(); 721 | 722 | // ------------------------------------------------- 723 | 724 | std::cerr << "starting asgn test\n"; 725 | 726 | GC::thread(GC::primary_disjunction, [](GC::ptr &a, GC::ptr &b) 727 | { 728 | assert_nothrow(a = b); 729 | }, std::ref(ptr_a), std::ref(ptr_b)).join(); 730 | GC::thread(GC::inherit_disjunction, [](GC::ptr &a, GC::ptr &b) 731 | { 732 | assert_nothrow(a = b); 733 | }, std::ref(ptr_a), std::ref(ptr_b)).join(); 734 | GC::thread(GC::new_disjunction, [](GC::ptr &a, GC::ptr &b) 735 | { 736 | assert_nothrow(a = b); 737 | }, std::ref(ptr_a), std::ref(ptr_b)).join(); 738 | 739 | // ------------------------------------------------- 740 | 741 | std::cerr << "starting asgn new obj test\n"; 742 | 743 | GC::thread(GC::primary_disjunction, [](GC::ptr &a) 744 | { 745 | assert_throws_disjunction(a = GC::make()); 746 | }, std::ref(ptr_a)).join(); 747 | GC::thread(GC::inherit_disjunction, [](GC::ptr &a) 748 | { 749 | assert_nothrow(a = GC::make()); 750 | }, std::ref(ptr_a)).join(); 751 | GC::thread(GC::new_disjunction, [](GC::ptr &a) 752 | { 753 | assert_throws_disjunction(a = GC::make()); 754 | }, std::ref(ptr_a)).join(); 755 | 756 | // -------------------------------------------------- 757 | 758 | std::cerr << "starting swap test a\n"; 759 | 760 | GC::thread(GC::primary_disjunction, [](GC::ptr &a) 761 | { 762 | auto b = GC::make(); 763 | assert_throws_disjunction(a.swap(b)); 764 | }, std::ref(ptr_a)).join(); 765 | GC::thread(GC::inherit_disjunction, [](GC::ptr &a) 766 | { 767 | auto b = GC::make(); 768 | assert_nothrow(a.swap(b)); 769 | }, std::ref(ptr_a)).join(); 770 | GC::thread(GC::new_disjunction, [](GC::ptr &a) 771 | { 772 | auto b = GC::make(); 773 | assert_throws_disjunction(a.swap(b)); 774 | }, std::ref(ptr_a)).join(); 775 | 776 | // -------------------------------------------------- 777 | 778 | std::cerr << "starting swap test b\n"; 779 | 780 | GC::thread(GC::primary_disjunction, [](GC::ptr &a) 781 | { 782 | auto b = GC::make(); 783 | assert_throws_disjunction(b.swap(a)); 784 | }, std::ref(ptr_a)).join(); 785 | GC::thread(GC::inherit_disjunction, [](GC::ptr &a) 786 | { 787 | auto b = GC::make(); 788 | assert_nothrow(b.swap(a)); 789 | }, std::ref(ptr_a)).join(); 790 | GC::thread(GC::new_disjunction, [](GC::ptr &a) 791 | { 792 | auto b = GC::make(); 793 | assert_throws_disjunction(b.swap(a)); 794 | }, std::ref(ptr_a)).join(); 795 | 796 | // -------------------------------------------------- 797 | 798 | std::cerr << "starting ctor alias test\n"; 799 | 800 | GC::thread(GC::primary_disjunction, [](GC::ptr &a) 801 | { 802 | assert_throws_disjunction(GC::ptr temp(a)); 803 | }, std::ref(ptr_a)).join(); 804 | GC::thread(GC::inherit_disjunction, [](GC::ptr &a) 805 | { 806 | assert_nothrow(GC::ptr temp(a)); 807 | }, std::ref(ptr_a)).join(); 808 | GC::thread(GC::new_disjunction, [](GC::ptr &a) 809 | { 810 | assert_throws_disjunction(GC::ptr temp(a)); 811 | }, std::ref(ptr_a)).join(); 812 | 813 | // -------------------------------------------------- 814 | 815 | std::cerr << "starting value to reference thread pass - expecting 3:\n"; 816 | 817 | GC::thread(GC::primary_disjunction, [](const GC::ptr &a) 818 | { 819 | assert_nothrow(print_str(a)); 820 | }, GC::make(" -- primary disj")).join(); 821 | GC::thread(GC::inherit_disjunction, [](const GC::ptr &a) 822 | { 823 | assert_nothrow(print_str(a)); 824 | }, GC::make(" -- inherit disj")).join(); 825 | GC::thread(GC::new_disjunction, [](const GC::ptr &a) 826 | { 827 | assert_nothrow(print_str(a)); 828 | }, GC::make(" -- new disj")).join(); 829 | 830 | // -------------------------------------------------- 831 | 832 | std::cerr << "starting value to value thread pass - expecting 1:\n"; 833 | 834 | GC::thread(GC::inherit_disjunction, [](GC::ptr a) 835 | { 836 | assert_nothrow(print_str(a)); 837 | }, GC::make(" -- inherit disj")).join(); 838 | } 839 | catch (...) 840 | { 841 | std::cerr << "\n\nAN EXCEPTION SHOULD NOT HAVE GOTTEN HERE!!\n\n"; 842 | assert(false); 843 | } 844 | }).join(); 845 | 846 | #endif 847 | 848 | { 849 | std::cerr << "starting disjunction deletion test\n"; 850 | std::atomic disjunction_deletion_flag; 851 | 852 | for (int i = 0; i < 16; ++i) 853 | { 854 | GC::thread(GC::new_disjunction, [](std::atomic &flag) 855 | { 856 | try 857 | { 858 | auto p = GC::make(flag); 859 | p->self_p = p; 860 | } 861 | catch (...) { std::cerr << "DISJUNCTION DEL TEST EXCEPTION!!\n"; assert(false); } 862 | }, std::ref(disjunction_deletion_flag)).join(); 863 | 864 | assert(disjunction_deletion_flag); 865 | } 866 | } 867 | 868 | // ----------------------------------------------------------------------------------------------------- 869 | 870 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 871 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 872 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 873 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 874 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 875 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 876 | 877 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 878 | static_assert(!GC::has_trivial_router>::value, "trivial assumption failure"); 879 | 880 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 881 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 882 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 883 | 884 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 885 | static_assert(GC::has_trivial_router, 12>>::value, "trivial assumption failure"); 886 | 887 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 888 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 889 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 890 | static_assert(GC::has_trivial_router>::value, "trivial assumption failure"); 891 | 892 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 893 | static_assert(!GC::has_trivial_router>::value, "trivial assumption failure"); 894 | static_assert(!GC::has_trivial_router>::value, "trivial assumption failure"); 895 | static_assert(!GC::has_trivial_router>>::value, "trivial assumption failure"); 896 | static_assert(!GC::has_trivial_router>>::value, "trivial assumption failure"); 897 | static_assert(!GC::has_trivial_router>>::value, "trivial assumption failure"); 898 | 899 | // ----------------------------------------------------------------------------------------------------- 900 | 901 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 902 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 903 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 904 | 905 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 906 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 907 | static_assert(GC::has_trivial_router::value, "trivial assumption failure"); 908 | 909 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 910 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 911 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 912 | 913 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 914 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 915 | static_assert(!GC::has_trivial_router::value, "trivial assumption failure"); 916 | 917 | // ----------------------------------------------------------------------------------------------------- 918 | 919 | static_assert(GC::all_have_trivial_routers<>::value, "trivial assumption failure"); 920 | 921 | // ----------------------------------------------------------------------------------------------------- 922 | 923 | static_assert(std::is_same, std::vector>::value, "smart wrapper opt check"); 924 | static_assert(std::is_same, std::vector>::value, "smart wrapper opt check"); 925 | static_assert(std::is_same, std::unique_ptr>::value, "smart wrapper opt check"); 926 | static_assert(std::is_same, std::unique_ptr>::value, "smart wrapper opt check"); 927 | 928 | static_assert(std::is_same, std::stack>::value, "smart wrapper opt check"); 929 | static_assert(std::is_same>, std::queue>>::value, "smart wrapper opt check"); 930 | static_assert(std::is_same>, std::priority_queue>>::value, "smart wrapper opt check"); 931 | 932 | static_assert(!std::is_same, std::unique_ptr>::value, "smart wrapper opt check"); 933 | static_assert(!std::is_same, std::list>::value, "smart wrapper opt check"); 934 | 935 | static_assert(!std::is_same, std::stack>::value, "smart wrapper opt check"); 936 | static_assert(!std::is_same>, std::queue>>::value, "smart wrapper opt check"); 937 | static_assert(!std::is_same>, std::priority_queue>>::value, "smart wrapper opt check"); 938 | 939 | // ----------------------------------------------------------------------------------------------------- 940 | 941 | static_assert(std::is_same>, GC::make_wrapped_t>>::value, "wrapped const test"); 942 | static_assert(std::is_same>, GC::make_wrapped_t>>::value, "wrapped const test"); 943 | static_assert(std::is_same>, GC::make_wrapped_t>>::value, "wrapped const test"); 944 | static_assert(std::is_same>, const GC::make_wrapped_t>>::value, "wrapped const test"); 945 | static_assert(std::is_same>, const volatile GC::make_wrapped_t>>::value, "wrapped const test"); 946 | static_assert(std::is_same>, const volatile GC::make_wrapped_t>>::value, "wrapped const test"); 947 | static_assert(std::is_same>, GC::make_wrapped_t>>::value, "wrapped const test"); 948 | 949 | // ----------------------------------------------------------------------------------------------------- 950 | 951 | static_assert(std::is_same>::value, "unwrapped primitive test"); 952 | static_assert(std::is_same>::value, "unwrapped primitive test"); 953 | static_assert(std::is_same>::value, "unwrapped primitive test"); 954 | static_assert(std::is_same>::value, "unwrapped primitive test"); 955 | 956 | static_assert(std::is_same>::value, "wrapped primitive test"); 957 | static_assert(std::is_same>::value, "wrapped primitive test"); 958 | static_assert(std::is_same>::value, "wrapped primitive test"); 959 | static_assert(std::is_same>::value, "wrapped primitive test"); 960 | 961 | static_assert(std::is_same>::value, "unwrapped primitive test"); 962 | static_assert(std::is_same>::value, "unwrapped primitive test"); 963 | static_assert(std::is_same>::value, "unwrapped primitive test"); 964 | static_assert(std::is_same>::value, "unwrapped primitive test"); 965 | 966 | static_assert(std::is_same>::value, "wrapped primitive test"); 967 | static_assert(std::is_same>::value, "wrapped primitive test"); 968 | static_assert(std::is_same>::value, "wrapped primitive test"); 969 | static_assert(std::is_same>::value, "wrapped primitive test"); 970 | 971 | static_assert(std::is_same, GC::make_unwrapped_t>>::value, "unwrapped trivial test"); 972 | static_assert(std::is_same, GC::make_unwrapped_t>>::value, "unwrapped trivial test"); 973 | static_assert(std::is_same, GC::make_unwrapped_t>>::value, "unwrapped trivial test"); 974 | static_assert(std::is_same, GC::make_unwrapped_t>>::value, "unwrapped trivial test"); 975 | 976 | static_assert(std::is_same, GC::make_wrapped_t>>::value, "wrapped trivial test"); 977 | static_assert(std::is_same, GC::make_wrapped_t>>::value, "wrapped trivial test"); 978 | static_assert(std::is_same, GC::make_wrapped_t>>::value, "wrapped trivial test"); 979 | static_assert(std::is_same, GC::make_wrapped_t>>::value, "wrapped trivial test"); 980 | 981 | // ----------------------------------------------------------------------------------------------------- 982 | 983 | static_assert(GC::has_trivial_router>::value, "trivial test"); 984 | static_assert(GC::has_trivial_router>::value, "trivial test"); 985 | static_assert(GC::has_trivial_router>::value, "trivial test"); 986 | static_assert(GC::has_trivial_router>::value, "trivial test"); 987 | 988 | static_assert(!GC::has_trivial_router::value, "trivial test"); 989 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 990 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 991 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 992 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 993 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 994 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 995 | 996 | static_assert(std::is_same, GC::make_wrapped_t>>::value, "wrapped variant equivalence"); 997 | static_assert(std::is_same, GC::make_unwrapped_t>>::value, "wrapped variant equivalence"); 998 | static_assert(!std::is_same, GC::make_wrapped_t>>::value, "wrapped variant equivalence"); 999 | static_assert(std::is_same, GC::make_unwrapped_t>>::value, "wrapped variant equivalence"); 1000 | 1001 | static_assert(std::is_same, GC::variant>::value, "wrapped variant equivalence"); 1002 | static_assert(!std::is_same, GC::variant>::value, "wrapped variant equivalence"); 1003 | static_assert(!std::is_same, GC::variant>::value, "wrapped variant equivalence"); 1004 | 1005 | // ----------------------------------------------------------------------------------------------------- 1006 | 1007 | static_assert(GC::has_trivial_router>::value, "trivial test"); 1008 | static_assert(GC::has_trivial_router>::value, "trivial test"); 1009 | static_assert(GC::has_trivial_router>::value, "trivial test"); 1010 | static_assert(GC::has_trivial_router>::value, "trivial test"); 1011 | static_assert(GC::has_trivial_router>::value, "trivial test"); 1012 | 1013 | static_assert(!GC::has_trivial_router::value, "trivial test"); 1014 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 1015 | static_assert(!GC::has_trivial_router>::value, "trivial test"); 1016 | 1017 | static_assert(std::is_same, GC::optional>::value, "wrapped optional equivalence"); 1018 | static_assert(std::is_same, GC::optional>::value, "wrapped optional equivalence"); 1019 | static_assert(std::is_same, GC::optional>::value, "wrapped optional equivalence"); 1020 | static_assert(!std::is_same, GC::optional>::value, "wrapped optional equivalence"); 1021 | 1022 | // ----------------------------------------------------------------------------------------------------- 1023 | 1024 | std::cerr << "\n-------- ctors --------\n"; 1025 | { 1026 | GC::ptr sp = GC::make(); 1027 | GC::ptr gcsp = GC::make(); 1028 | 1029 | sp->p = std::make_unique(sp); 1030 | gcsp->p = std::make_unique(gcsp); 1031 | } 1032 | std::cerr << "-------- collect 1 --------\n"; 1033 | GC::collect(); 1034 | std::cerr << "-------- collect 2 - SHOULD BE EMPTY --------\n"; 1035 | GC::collect(); 1036 | std::cerr << "-------- end --------\n\n"; 1037 | 1038 | 1039 | #if DRAGAZO_GARBAGE_COLLECT_EXTRA_UND_CHECKS 1040 | { 1041 | GC::ptr sink; // sink location to avoid invoking nodiscard 1042 | 1043 | derived *d = new derived; 1044 | base1 *b1 = new base1; 1045 | base2 *b2 = new base2; 1046 | 1047 | assert_nothrow(sink = GC::adopt(d)); 1048 | assert_nothrow(sink = GC::adopt(b1)); 1049 | assert_nothrow(sink = GC::adopt(b2)); 1050 | 1051 | base1 *d_b1 = new derived; 1052 | base2 *d_b2 = new derived; 1053 | 1054 | assert_throws(sink = GC::adopt(d_b1), std::invalid_argument); 1055 | assert_throws(sink = GC::adopt(d_b2), std::invalid_argument); 1056 | } 1057 | #endif 1058 | 1059 | 1060 | GC::ptr> stationary_test = GC::make>(65); 1061 | stationary_test->value = 75; 1062 | 1063 | 1064 | GC::ptr atomic_container_obj = GC::make(); 1065 | 1066 | atomic_container_obj->atomic_1 = GC::make(1.1); 1067 | atomic_container_obj->atomic_2 = GC::make(2.2); 1068 | atomic_container_obj->atomic_3 = GC::make(3.3); 1069 | atomic_container_obj->atomic_4 = GC::make(4.4); 1070 | 1071 | GC::collect(); 1072 | 1073 | global_atomic_ptr = nullptr; 1074 | for (int i = 0; i < 8; ++i) global_vec_ptr.emplace_back(); 1075 | 1076 | for (int i = 0; i < 8; ++i) thread_local_vec_ptr.emplace_back(); 1077 | 1078 | GC::thread(GC::new_disjunction, [] 1079 | { 1080 | for (int i = 0; i < 8; ++i) thread_local_vec_ptr.emplace_back(); 1081 | }).detach(); 1082 | 1083 | GC::ptr arr_ptr_new = GC::make(); 1084 | assert(arr_ptr_new != nullptr); 1085 | 1086 | GC::collect(); 1087 | 1088 | std::cerr << "------ SHOULD BE SELF-CONTAINED ------\n"; 1089 | { 1090 | GC::ptr holder; 1091 | 1092 | { 1093 | auto arr = GC::make(2); 1094 | holder = arr.alias(2); 1095 | } 1096 | std::cerr << "destroyed array - element dtors should follow:\n"; 1097 | } 1098 | std::cerr << "----------------- end ----------------\n\n"; 1099 | 1100 | { // -- array-form GC::ptr tests -- // 1101 | 1102 | static_assert(std::is_same>().get())>::value, "array-form ptr type error"); 1103 | static_assert(std::is_same>().get())>::value, "array-form ptr type error"); 1104 | static_assert(std::is_same>().get())>::value, "array-form ptr type error"); 1105 | static_assert(std::is_same>().get())>::value, "array-form ptr type error"); 1106 | 1107 | GC::ptr arr_test_0 = GC::make(); 1108 | GC::ptr arr_test_1 = GC::make(); 1109 | GC::ptr arr_test_2 = GC::make(12); 1110 | GC::ptr arr_test_3 = GC::make(14); 1111 | GC::ptr arr_test_4 = GC::make(16); 1112 | 1113 | static_assert(std::is_same::value, "array-form ptr error"); 1114 | static_assert(std::is_same::value, "array-form ptr error"); 1115 | static_assert(std::is_same::value, "array-form ptr error"); 1116 | static_assert(std::is_same::value, "array-form ptr error"); 1117 | static_assert(std::is_same::value, "array-form ptr error"); 1118 | 1119 | GC::ptr sink; 1120 | GC::ptr csink; 1121 | GC::ptr vsink; 1122 | GC::ptr cvsink; 1123 | 1124 | GC::ptr arr_test_5; 1125 | 1126 | GC::ptr arr_sub_0_0 = GC::alias(*arr_test_0 + 4, arr_test_0); 1127 | 1128 | assert(arr_sub_0_0 != nullptr); 1129 | 1130 | GC::ptr arr_sub_1_0 = GC::alias(*arr_test_1 + 2, arr_test_0); 1131 | GC::ptr arr_sub_1_1 = GC::alias(&(*arr_test_1)[2], arr_test_0); 1132 | 1133 | assert(arr_sub_1_0 != nullptr); 1134 | assert(arr_sub_1_0 == arr_sub_1_1); 1135 | 1136 | GC::ptr arr_sub_1_2 = GC::alias(*(*arr_test_1 + 2) + 3, arr_test_1); 1137 | GC::ptr arr_sub_1_3 = GC::alias((*arr_test_1)[2] + 3, arr_test_1); 1138 | GC::ptr arr_sub_1_4 = GC::alias(&(*arr_test_1)[2][3], arr_test_1); 1139 | 1140 | assert(arr_sub_1_2 != nullptr); 1141 | assert(arr_sub_1_2 == arr_sub_1_3 && arr_sub_1_3 == arr_sub_1_4); 1142 | 1143 | GC::ptr sc_test_0; 1144 | 1145 | sink = GC::reinterpretCast(arr_test_0); 1146 | vsink = GC::reinterpretCast(arr_test_0); 1147 | cvsink = GC::constCast(arr_test_0); 1148 | csink = GC::constCast(arr_test_4); 1149 | cvsink = GC::staticCast(arr_test_4); 1150 | csink = GC::staticCast(sc_test_0); 1151 | } 1152 | 1153 | GC::ptr> stack_ptr = GC::make>(); 1154 | GC::ptr> queue_ptr = GC::make>(); 1155 | GC::ptr> priority_queue_ptr = GC::make>(); 1156 | 1157 | assert(stack_ptr == stack_ptr.get()); 1158 | assert(queue_ptr == queue_ptr.get()); 1159 | assert(priority_queue_ptr == priority_queue_ptr.get()); 1160 | 1161 | for (int i = 0; i < 8; ++i) 1162 | { 1163 | stack_ptr->push(i); 1164 | queue_ptr->push(i); 1165 | priority_queue_ptr->push(i); 1166 | } 1167 | 1168 | GC::unique_ptr gc_uint(new int(77)); 1169 | 1170 | GC::ptr non_const_arr = GC::make(16); 1171 | GC::ptr const_arr = non_const_arr; 1172 | 1173 | assert(GC::constCast(non_const_arr) == const_arr); 1174 | assert(GC::constCast(const_arr) == non_const_arr); 1175 | 1176 | assert(gc_uint != nullptr); 1177 | 1178 | GC::ptr void_p_test_1 = non_const_arr; 1179 | GC::ptr void_p_test_2 = non_const_arr; 1180 | GC::ptr void_p_test_3 = non_const_arr; 1181 | GC::ptr void_p_test_4 = non_const_arr; 1182 | 1183 | static_assert(std::is_same::element_type, void>::value, "ptr elem type wrong"); 1184 | static_assert(std::is_same::value, "ptr get type wrong"); 1185 | assert(void_p_test_1.get() == non_const_arr.get()); 1186 | 1187 | assert(GC::reinterpretCast(void_p_test_1) == non_const_arr); 1188 | 1189 | GC::ptr void_p_test_5 = const_arr; 1190 | GC::ptr void_p_test_6 = const_arr; 1191 | 1192 | GC::vector gc_vec; 1193 | gc_vec.emplace_back(17); 1194 | 1195 | assert(gc_vec == gc_vec); 1196 | 1197 | std::allocator int_alloc; 1198 | 1199 | GC::vector gc_vec2(std::move(gc_vec), int_alloc); 1200 | 1201 | GC::vector gc_float_vec = { 1, 2, 3, 4 }; 1202 | GC::deque gc_float_deq = { 1, 2, 3, 4 }; 1203 | 1204 | gc_float_deq.push_front(178); 1205 | gc_vec2.pop_back(); 1206 | 1207 | GC::ptr> ptr_unique_ptr = GC::make>(new int(69)); 1208 | **ptr_unique_ptr = 36; 1209 | *ptr_unique_ptr = std::make_unique(345); 1210 | 1211 | int *pint = ptr_unique_ptr.get()->get(); 1212 | *pint = 123; 1213 | 1214 | assert(**ptr_unique_ptr == 123); 1215 | 1216 | auto gc_flist = new GC::forward_list(gc_float_deq.begin(), gc_float_deq.end()); 1217 | 1218 | GC::ptr> ptr_gc_flist = GC::adopt>(gc_flist); 1219 | 1220 | ptr_gc_flist->push_front(111222333.444f); 1221 | 1222 | GC::ptr> ptr_gc_list = GC::make>(); 1223 | ptr_gc_list->emplace_front(17.f); 1224 | ptr_gc_list->push_back(12.f); 1225 | 1226 | GC::ptr> pset = GC::make>(gc_float_vec.begin(), gc_float_vec.end()); 1227 | 1228 | pset->insert(3.14159f); 1229 | 1230 | GC::ptr> puset = GC::make>(pset->begin(), pset->end()); 1231 | 1232 | puset->insert(2.71828f); 1233 | 1234 | GC::ptr> pmset = GC::make>(puset->begin(), puset->end()); 1235 | 1236 | pmset->insert(std::sqrt(2.0f)); 1237 | 1238 | GC::ptr> pumset = GC::make>(pmset->begin(), pmset->end()); 1239 | 1240 | pumset->insert(std::sqrt(10.0f)); 1241 | 1242 | { // -- variant tests -- // 1243 | 1244 | std::variant std_variant = 5.6f; 1245 | GC::variant gc_variant(12); 1246 | std_variant = 6; 1247 | 1248 | auto std_variant_2 = std_variant; 1249 | auto std_variant_3 = std::move(std_variant); 1250 | 1251 | gc_variant = std_variant; 1252 | gc_variant = std::move(std_variant); 1253 | 1254 | std_variant = 5; 1255 | gc_variant = std_variant; 1256 | 1257 | assert(std::get<0>(std_variant) == 5); 1258 | assert(std::get<0>(gc_variant) == 5); 1259 | 1260 | assert(std::get(std_variant) == 5); 1261 | assert(std::get(gc_variant) == 5); 1262 | 1263 | assert(gc_variant.get<0>() == 5); 1264 | assert(gc_variant.get() == 5); 1265 | 1266 | assert(std::get_if<0>(&gc_variant) != nullptr); 1267 | assert(*std::get_if<0>(&gc_variant) == 5); 1268 | assert(std::get_if<1>(&gc_variant) == nullptr); 1269 | assert(std::get_if<2>(&gc_variant) == nullptr); 1270 | 1271 | assert(std::get_if(&gc_variant) != nullptr); 1272 | assert(*std::get_if(&gc_variant) == 5); 1273 | assert(std::get_if(&gc_variant) == nullptr); 1274 | assert(std::get_if(&gc_variant) == nullptr); 1275 | 1276 | std::get(gc_variant) = std::get<0>(gc_variant); 1277 | std::get<0>(std_variant) = std::get(std::move(gc_variant)); 1278 | std::get<0>(gc_variant) = std::get(std::move(std_variant)); 1279 | 1280 | assert(std::get_if<0>((decltype(std_variant)*)nullptr) == nullptr); 1281 | assert(std::get_if((decltype(std_variant)*)nullptr) == nullptr); 1282 | 1283 | assert(std::get_if<0>((decltype(gc_variant)*)nullptr) == nullptr); 1284 | assert(std::get_if((decltype(gc_variant)*)nullptr) == nullptr); 1285 | 1286 | assert(std::holds_alternative(std_variant)); 1287 | assert(std::holds_alternative(gc_variant)); 1288 | 1289 | assert(!std::holds_alternative(std_variant)); 1290 | assert(!std::holds_alternative(gc_variant)); 1291 | 1292 | assert(!std::holds_alternative(std_variant)); 1293 | assert(!std::holds_alternative(gc_variant)); 1294 | 1295 | static_assert(!GC::has_trivial_router::value, "trivial router error"); 1296 | static_assert(std::is_same, GC::variant>::value, "wrapped variant error"); 1297 | static_assert(!std::is_same, GC::variant>::value, "wrapped variant error"); 1298 | 1299 | GC::variant var_cmp_1(std::in_place_index<0>, 42); 1300 | GC::variant var_cmp_2(std::in_place_index<1>, 42); 1301 | 1302 | assert(var_cmp_1.index() == 0); 1303 | assert(var_cmp_2.index() == 1); 1304 | 1305 | assert(std::get<0>(var_cmp_1) == 42); 1306 | assert(std::get<1>(var_cmp_2) == 42); 1307 | 1308 | assert(var_cmp_1 != var_cmp_2); 1309 | assert(var_cmp_1 < var_cmp_2); 1310 | assert(var_cmp_1 <= var_cmp_2); 1311 | 1312 | assert(var_cmp_2 != var_cmp_1); 1313 | assert(var_cmp_2 > var_cmp_1); 1314 | assert(var_cmp_2 >= var_cmp_1); 1315 | 1316 | static_assert(std::variant_size_v> == 3, "variant size error"); 1317 | static_assert(std::variant_size_v> == 3, "variant size error"); 1318 | static_assert(std::variant_size_v> == 3, "variant size error"); 1319 | static_assert(std::variant_size_v> == 3, "variant size error"); 1320 | 1321 | static_assert(std::variant_size_v> == 3, "variant size error"); 1322 | static_assert(std::variant_size_v> == 3, "variant size error"); 1323 | static_assert(std::variant_size_v> == 3, "variant size error"); 1324 | static_assert(std::variant_size_v> == 3, "variant size error"); 1325 | 1326 | static_assert(std::is_same>>::value, "variant alternative error"); 1327 | static_assert(std::is_same>>::value, "variant alternative error"); 1328 | static_assert(std::is_same>>::value, "variant alternative error"); 1329 | 1330 | static_assert(std::is_same>>::value, "variant alternative error"); 1331 | static_assert(std::is_same>>::value, "variant alternative error"); 1332 | static_assert(std::is_same>>::value, "variant alternative error"); 1333 | 1334 | static_assert(std::is_same>>::value, "variant alternative error"); 1335 | static_assert(std::is_same>>::value, "variant alternative error"); 1336 | static_assert(std::is_same>>::value, "variant alternative error"); 1337 | 1338 | static_assert(std::is_same>>::value, "variant alternative error"); 1339 | static_assert(std::is_same>>::value, "variant alternative error"); 1340 | static_assert(std::is_same>>::value, "variant alternative error"); 1341 | 1342 | __gc_variant mgf; 1343 | std::hash variant_hasher; 1344 | variant_hasher(mgf); 1345 | 1346 | const decltype(std_variant) &gc_variant_ref = gc_variant; 1347 | assert(std::holds_alternative(gc_variant_ref)); 1348 | 1349 | auto dyn_gc_variant = GC::make>(); 1350 | assert(dyn_gc_variant->index() == 0); 1351 | *dyn_gc_variant = *dyn_gc_variant; 1352 | *dyn_gc_variant = std::move(*dyn_gc_variant); 1353 | } 1354 | 1355 | { // -- optional tests -- // 1356 | auto sym_1 = GC::make>(); 1357 | auto sym_2 = GC::make>(); 1358 | 1359 | assert(!sym_1->has_value()); 1360 | assert(!sym_2->has_value()); 1361 | 1362 | sym_1->emplace(); 1363 | sym_2->emplace(); 1364 | 1365 | assert(sym_1->has_value()); 1366 | assert(sym_2->has_value()); 1367 | 1368 | sym_1->reset(); 1369 | sym_2->reset(); 1370 | 1371 | assert(!sym_1->has_value()); 1372 | assert(!sym_2->has_value()); 1373 | 1374 | auto sym_3 = GC::make>(16); 1375 | auto sym_4 = GC::make>(32); 1376 | 1377 | assert(sym_3->has_value()); 1378 | assert(sym_4->has_value()); 1379 | 1380 | assert(sym_3->value() == 16); 1381 | assert(sym_4->value() == 32); 1382 | 1383 | assert(*sym_3 < *sym_4); 1384 | assert(*sym_3 <= *sym_4); 1385 | assert(*sym_4 > *sym_3); 1386 | assert(*sym_4 >= *sym_3); 1387 | assert(*sym_4 != *sym_3); 1388 | assert(!(*sym_4 == *sym_3)); 1389 | 1390 | *sym_3 = 5; 1391 | *sym_4 = 5; 1392 | 1393 | assert(sym_3->has_value()); 1394 | assert(sym_4->has_value()); 1395 | 1396 | assert(sym_3->value() == 5); 1397 | assert(sym_4->value() == 5); 1398 | 1399 | assert(!(*sym_3 < *sym_4)); 1400 | assert(*sym_3 <= *sym_4); 1401 | assert(!(*sym_4 > *sym_3)); 1402 | assert(*sym_4 >= *sym_3); 1403 | assert(*sym_4 == *sym_3); 1404 | assert(!(*sym_4 != *sym_3)); 1405 | 1406 | *sym_3 = std::nullopt; 1407 | *sym_4 = std::nullopt; 1408 | 1409 | assert(!sym_1->has_value()); 1410 | assert(!sym_2->has_value()); 1411 | } 1412 | 1413 | GC::ptr> pmap = GC::make>(); 1414 | pmap->emplace(0, "zero"); 1415 | pmap->emplace(1, "one"); 1416 | pmap->emplace(2, "two"); 1417 | pmap->emplace(3, "three"); 1418 | pmap->emplace(4, "four"); 1419 | pmap->emplace(5, "five"); 1420 | pmap->emplace(6, "six"); 1421 | pmap->emplace(7, "seven"); 1422 | pmap->emplace(8, "eight"); 1423 | pmap->emplace(8, "eight-repeat"); 1424 | pmap->emplace(9, "nine"); 1425 | pmap->emplace(10, "ten"); 1426 | 1427 | GC::ptr> pmmap = GC::make>(); 1428 | pmmap->emplace(0, "zero"); 1429 | pmmap->emplace(1, "one"); 1430 | pmmap->emplace(2, "two"); 1431 | pmmap->emplace(3, "three"); 1432 | pmmap->emplace(4, "four"); 1433 | pmmap->emplace(5, "five"); 1434 | pmmap->emplace(6, "six"); 1435 | pmmap->emplace(7, "seven"); 1436 | pmmap->emplace(8, "eight"); 1437 | pmmap->emplace(8, "eight-repeat"); 1438 | pmmap->emplace(9, "nine"); 1439 | pmmap->emplace(10, "ten"); 1440 | 1441 | GC::ptr> pumap = GC::make>(); 1442 | pumap->emplace(0, "zero"); 1443 | pumap->emplace(1, "one"); 1444 | pumap->emplace(2, "two"); 1445 | pumap->emplace(3, "three"); 1446 | pumap->emplace(4, "four"); 1447 | pumap->emplace(5, "five"); 1448 | pumap->emplace(6, "six"); 1449 | pumap->emplace(7, "seven"); 1450 | pumap->emplace(8, "eight"); 1451 | pumap->emplace(8, "eight-repeat"); 1452 | pumap->emplace(9, "nine"); 1453 | pumap->emplace(10, "ten"); 1454 | 1455 | GC::ptr> pummap = GC::make>(); 1456 | pummap->emplace(0, "zero"); 1457 | pummap->emplace(1, "one"); 1458 | pummap->emplace(2, "two"); 1459 | pummap->emplace(3, "three"); 1460 | pummap->emplace(4, "four"); 1461 | pummap->emplace(5, "five"); 1462 | pummap->emplace(6, "six"); 1463 | pummap->emplace(7, "seven"); 1464 | pummap->emplace(8, "eight"); 1465 | pummap->emplace(8, "eight-repeat"); 1466 | pummap->emplace(9, "nine"); 1467 | pummap->emplace(10, "ten"); 1468 | 1469 | { 1470 | auto i1 = GC::make(); 1471 | auto i2 = GC::make(); 1472 | auto i3 = GC::make(); 1473 | 1474 | assert(i1 && i2 && i3); 1475 | 1476 | i1->left = i1; 1477 | 1478 | GC::collect(); 1479 | 1480 | GC::ptr ci1 = i1; 1481 | GC::ptr ci2 = i2; 1482 | GC::ptr ci3 = i3; 1483 | 1484 | GC::ptr _ci1 = GC::constCast(i1); 1485 | GC::ptr _ci2 = GC::constCast(i2); 1486 | GC::ptr _ci3 = GC::constCast(i3); 1487 | 1488 | GC::ptr nci1 = GC::constCast(ci1); 1489 | GC::ptr nci2 = GC::constCast(ci2); 1490 | GC::ptr nci3 = GC::constCast(ci3); 1491 | } 1492 | GC::collect(); 1493 | 1494 | GC::ptr>> merp = GC::make>>(); 1495 | 1496 | GC::ptr node_no_val; 1497 | 1498 | GC::ptr ptr_set_test = GC::make(); 1499 | GC::ptr node_test = GC::make(); 1500 | 1501 | merp->emplace(1, GC::make()); 1502 | merp->emplace(2, GC::make()); 1503 | merp->emplace(3, GC::make()); 1504 | merp->emplace(4, GC::make()); 1505 | 1506 | GC::ptr not_arr = GC::make(); 1507 | GC::ptr yes_arr = GC::make(16); 1508 | 1509 | (**yes_arr)[1][5] = 7; 1510 | yes_arr[2][6][1][5] = 7; 1511 | ****yes_arr = 7; 1512 | 1513 | auto bound_array = GC::make(); 1514 | auto unbound_array = GC::make(5); 1515 | 1516 | (*bound_array)[0] = 12; 1517 | unbound_array[0] = 12; 1518 | 1519 | auto v_test1 = std::make_unique>(16); 1520 | auto v_test2 = std::make_shared>(16); 1521 | auto v_test3 = GC::make>(16); 1522 | 1523 | assert(v_test1->size() == 16); 1524 | assert(v_test2->size() == 16); 1525 | assert(v_test3->size() == 16); 1526 | 1527 | GC::atomic_ptr atomic_test_0; 1528 | GC::atomic_ptr atomic_test_1; 1529 | 1530 | atomic_test_0 = GC::make(2.718f); 1531 | atomic_test_1 = GC::make(3.141f); 1532 | 1533 | using std::swap; 1534 | atomic_test_0.swap(atomic_test_1); 1535 | swap(atomic_test_0, atomic_test_1); 1536 | 1537 | GC::collect(); 1538 | 1539 | GC::ptr arr_of_ptr[16]; 1540 | 1541 | assert(std::is_default_constructible>::value); 1542 | 1543 | { 1544 | GC::ptr[][2][2]> arr_ptr = GC::make[][2][2]>(2); 1545 | 1546 | for (int i = 0; i < 2; ++i) 1547 | for (int j = 0; j < 2; ++j) 1548 | for (int k = 0; k < 2; ++k) 1549 | arr_ptr[i][j][k] = GC::make((i + j)*j + i * j*k + k * i + k + 1); 1550 | 1551 | for (int i = 0; i < 2; ++i) 1552 | for (int j = 0; j < 2; ++j) 1553 | for (int k = 0; k < 2; ++k) 1554 | { 1555 | assert(*arr_ptr[i][j][k] == (i + j)*j + i * j*k + k * i + k + 1); 1556 | } 1557 | 1558 | GC::collect(); 1559 | } 1560 | 1561 | auto sse_t_obj = GC::make(); 1562 | 1563 | GC::ptr ip = GC::make(46); 1564 | GC::ptr ip_self = ip; 1565 | 1566 | GC::ptr const_ip = GC::make(47); 1567 | 1568 | GC::ptr const_2 = ip; 1569 | GC::ptr const_self = const_2; 1570 | 1571 | GC::ptr>> p = GC::make>>(); 1572 | p->ptr = GC::make>(); 1573 | p->ptr->ptr = GC::make(); 1574 | 1575 | ListNode n; 1576 | 1577 | GC::ptr _null_ctor_test1{}; 1578 | GC::ptr null_ctor_test1; 1579 | GC::ptr _null_ctor_test2(nullptr); 1580 | GC::ptr null_ctor_test2 = nullptr; 1581 | 1582 | std::shared_ptr _otest1; 1583 | std::shared_ptr otest1 = nullptr; 1584 | std::unique_ptr _otest2; 1585 | std::unique_ptr otest2 = nullptr; 1586 | 1587 | GC::ptr dp = GC::make(); 1588 | GC::ptr bp1 = GC::make(); 1589 | GC::ptr bp2 = GC::make(); 1590 | 1591 | assert(dp && bp1 && bp2); 1592 | 1593 | auto interp_1 = GC::reinterpretCast(dp); 1594 | auto interp_2 = GC::reinterpretCast(dp); 1595 | auto interp_3 = GC::reinterpretCast(dp); 1596 | auto interp_4 = GC::reinterpretCast(dp); 1597 | auto interp_5 = GC::reinterpretCast(dp); 1598 | 1599 | assert(interp_1.get() == (void*)dp.get()); 1600 | assert(interp_2.get() == (void*)dp.get()); 1601 | assert(interp_3.get() == (void*)dp.get()); 1602 | assert(interp_4.get() == (void*)dp.get()); 1603 | assert(interp_5.get() == (void*)dp.get()); 1604 | 1605 | auto dyn_cast1 = GC::dynamicCast(bp1); 1606 | auto dyn_cast2 = GC::dynamicCast(bp1); 1607 | 1608 | assert(dyn_cast1 == nullptr); 1609 | assert(dyn_cast2 == nullptr); 1610 | 1611 | dp->a = 123; 1612 | dp->b = 456; 1613 | 1614 | GC::ptr dp_as_b1 = dp; 1615 | GC::ptr dp_as_b2 = dp; 1616 | 1617 | GC::ptr _dp_as_b1 = GC::staticCast(dp); 1618 | GC::ptr _dp_as_b2 = GC::staticCast(dp); 1619 | 1620 | assert(dp_as_b1 == _dp_as_b1); 1621 | assert(dp_as_b2 == _dp_as_b2); 1622 | 1623 | assert(dynamic_cast(dp_as_b1.get()) == dp.get()); 1624 | 1625 | GC::ptr back_to_derived_1 = GC::dynamicCast(dp_as_b1); 1626 | GC::ptr back_to_derived_2 = GC::dynamicCast(dp_as_b2); 1627 | 1628 | assert(back_to_derived_1 == back_to_derived_2); 1629 | assert(back_to_derived_1 == dp); 1630 | 1631 | assert(dp_as_b1->a == dp->a); 1632 | assert(dp_as_b2->b == dp->b); 1633 | 1634 | std::unique_ptr raw_up(new int[16]); 1635 | GC::unique_ptr gc_up(new int[16]); 1636 | 1637 | raw_up.reset(); 1638 | gc_up.reset(); 1639 | 1640 | raw_up.reset(new int[16]); 1641 | gc_up.reset(new int[16]); 1642 | 1643 | { 1644 | std::thread t1([]() 1645 | { 1646 | try 1647 | { 1648 | GC::ptr table = GC::make(); 1649 | 1650 | for (int pass = 0; pass < 1024; ++pass) 1651 | { 1652 | for (int i = 0; i < 128; ++i) 1653 | { 1654 | GC::ptr tree = GC::make(); 1655 | tree->left = GC::make(); 1656 | tree->right = GC::make(); 1657 | 1658 | std::string key = tostr(i); 1659 | 1660 | table->update(key, tree); // the one with explicit locks 1661 | table->better_symbols[key] = tree; // the one with implicit locks (wrapper type) 1662 | } 1663 | table->clear(); 1664 | } 1665 | 1666 | GC::ptr pi; 1667 | 1668 | for (int i = 0; i < 8192; ++i) 1669 | { 1670 | GC::ptr temp = GC::make(i); 1671 | pi = temp; 1672 | } 1673 | } 1674 | catch (...) { std::cerr << "\n\nSYMTAB TEST EXCEPTION!!\n\n"; assert(false); } 1675 | }); 1676 | 1677 | GC::collect(); 1678 | 1679 | t1.join(); 1680 | } 1681 | 1682 | try 1683 | { 1684 | std::cerr << "beginning ctor collect vec\n"; 1685 | auto ctor_vec = GC::make>(); 1686 | ctor_vec->reserve(20); 1687 | for (int i = 0; i < 10; ++i) ctor_vec->emplace_back(); 1688 | 1689 | std::cerr << "beginning dtor collect vec\n"; 1690 | auto dtor_vec = GC::make>((std::size_t)10); 1691 | while (!dtor_vec->empty()) dtor_vec->pop_back(); 1692 | 1693 | std::cerr << "beginning ctor/dtor collect vec\n"; 1694 | auto ctor_dtor_vec = GC::make>(); 1695 | ctor_dtor_vec->reserve(20); 1696 | for (int i = 0; i < 10; ++i) ctor_dtor_vec->emplace_back(); 1697 | std::cerr << "beginning cleanup\n"; 1698 | while (!ctor_dtor_vec->empty()) ctor_dtor_vec->pop_back(); 1699 | 1700 | std::cerr << "completed ctor/dtor deadlock tests\n\n"; 1701 | } 1702 | catch (const std::exception &ex) 1703 | { 1704 | std::cerr << "CTOR/DTOR VEC TEST EXCEPTION!!\n" << ex.what() << "\n\n"; 1705 | assert(false); 1706 | } 1707 | 1708 | GC::collect(); 1709 | 1710 | { 1711 | TIMER_BEGIN(); 1712 | 1713 | GC::thread threads[4]; 1714 | 1715 | //for (auto &i : threads) i = GC::thread(GC::primary_disjunction, []() 1716 | //for (auto &i : threads) i = GC::thread(GC::inherit_disjunction, []() 1717 | for (auto &i : threads) i = GC::thread(GC::new_disjunction, []() 1718 | { 1719 | try 1720 | { 1721 | GC::ptr x, y, z, w; 1722 | 1723 | for (std::size_t i = 0; i < 1000000; ++i) 1724 | { 1725 | // make 3 unique objects 1726 | x = GC::make(i + 0); 1727 | y = GC::make(i + 1); 1728 | z = GC::make(i + 2); 1729 | 1730 | assert(*x == i + 0); 1731 | assert(*y == i + 1); 1732 | assert(*z == i + 2); 1733 | 1734 | // rotate them 3 times 1735 | for (std::size_t j = 1; j <= 3; ++j) 1736 | { 1737 | w = x; x = y; y = z; z = w; 1738 | assert(*x == i + ((0 + j) % 3)); 1739 | assert(*y == i + ((1 + j) % 3)); 1740 | assert(*z == i + ((2 + j) % 3)); 1741 | } 1742 | } 1743 | } 1744 | catch (...) { std::cerr << "\n\nINTERFERENCE EXCEPTION!!\n\n"; assert(false); } 1745 | 1746 | sync_err_print("finished interference thread with no errors\n"); 1747 | }); 1748 | 1749 | for (auto &i : threads) i.join(); 1750 | 1751 | TIMER_END(milliseconds, "interference test"); 1752 | } 1753 | 1754 | TIMER_END(milliseconds, "all tests"); 1755 | 1756 | return 0; 1757 | } 1758 | catch (const std::exception &ex) 1759 | { 1760 | std::cerr << "\nEXCEPTION!!\n\n" << ex.what() << "\n\n\n"; 1761 | assert(false); 1762 | } 1763 | --------------------------------------------------------------------------------