├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.md ├── include └── lazer.h └── src ├── main.c └── runtime ├── atom.c ├── atom.h ├── hash.c ├── hash.h ├── os ├── heap.c ├── heap.h ├── lock.c ├── lock.h ├── memory.c └── memory.h └── system.h /.gitignore: -------------------------------------------------------------------------------- 1 | /build* 2 | /.history -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0) 2 | project(lazer C) 3 | 4 | file(GLOB_RECURSE sources src/*.c) 5 | 6 | add_executable(lazer ${sources}) 7 | target_include_directories(lazer PRIVATE src/ include/) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 protty 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lazer [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/kprotty/lazer/blob/master/LICENSE) 2 | 3 | Imagine Elixir, but over-engineered and focused on memory usage. It should be safe to say that this is more of a research project than a solution to a practical problem therefore it should not be used in production. Heavily inspired by Erlang BEAM as well as Ponylang. 4 | 5 | ## (makeshift) Compiling 6 | ``` 7 | mkdir build 8 | cd build 9 | cmake -GNinja .. 10 | ninja 11 | ``` -------------------------------------------------------------------------------- /include/lazer.h: -------------------------------------------------------------------------------- 1 | #ifndef LAZER_H 2 | #define LAZER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #endif // LAZER_H -------------------------------------------------------------------------------- /src/main.c: -------------------------------------------------------------------------------- 1 | int main(int argc, const char* argv[]) { 2 | return 0; 3 | } -------------------------------------------------------------------------------- /src/runtime/atom.c: -------------------------------------------------------------------------------- 1 | #include "atom.h" 2 | #include 3 | #include "os/heap.h" 4 | 5 | #define MAX_ATOM_TE 6 | 7 | typedef struct { 8 | uint32_t displacement; 9 | uint32_t atom_ptr; 10 | } atom_cell_t; 11 | 12 | typedef struct { 13 | lzr_atom_t* heap; 14 | size_t commit_at; 15 | } atom_heap_t; 16 | 17 | struct lzr_atom_table_t { 18 | size_t size; 19 | size_t mask; 20 | size_t capacity; 21 | atom_cell_t* cells; 22 | atom_cell_t* remap; 23 | atom_heap_t atom_heap; 24 | }; 25 | 26 | #define ATOM_CELL_LOAD_FACTOR 90 27 | #define ATOM_CELL_COMMIT_DEFAULT 1024 28 | #define ATOM_HEAP_COMMIT_SIZE (1 * 1024 * 1024) 29 | 30 | #define MIN(x, y) (((x) < (y)) ? (x) : (y)) 31 | #define MAX(x, y) (((x) > (y)) ? (x) : (y)) 32 | #define NEXT_POW_2(value) (1ULL << (64 - __builtin_clzl((value) - 1))) 33 | #define ALIGN(value, alignment) ((value) + ((alignment) - (((value) % (alignment))))) 34 | 35 | void atom_heap_init(atom_heap_t* self, size_t max_atoms) { 36 | const size_t heap_size = ALIGN(MAX_ATOM_SIZE * max_atoms, LZR_HEAP_CHUNK_SIZE); 37 | self->heap = (lzr_atom_t*) lzr_heap_reserve(heap_size / LZR_HEAP_CHUNK_SIZE); 38 | 39 | self->commit_at = ((size_t) self->heap) + ATOM_HEAP_COMMIT_SIZE; 40 | lzr_memory_commit((void*) self->heap, ATOM_HEAP_COMMIT_SIZE); 41 | } 42 | 43 | lzr_atom_t* atom_heap_alloc(atom_heap_t* self, uint32_t hash, const char* text, size_t len) { 44 | const size_t atom_size = ALIGN(sizeof(lzr_atom_t) + 1 + len, 8); 45 | if (((size_t) self->heap) + atom_size > self->commit_at) { 46 | lzr_memory_commit((void*) self->commit_at, ATOM_HEAP_COMMIT_SIZE); 47 | self->commit_at += ATOM_HEAP_COMMIT_SIZE; 48 | } 49 | 50 | lzr_atom_t* atom = (lzr_atom_t*) self->heap; 51 | atom->data = 0; 52 | atom->actor = 0; 53 | atom->hash = hash; 54 | *lzr_atom_len_ptr(atom) = (len & 0xff); 55 | memcpy(lzr_atom_text_ptr(atom), text, lzr_atom_len(atom)); 56 | 57 | self->heap = (lzr_atom_t*) (((size_t) self->heap) + atom_size); 58 | return atom; 59 | } 60 | 61 | bool table_compare_eq(uint32_t atom_ptr, uint32_t hash, const char* text, size_t len) { 62 | lzr_atom_t* atom = (lzr_atom_t*) LZR_PTR_UNZIP(atom_ptr); 63 | 64 | return hash == atom-> hash && 65 | len == lzr_atom_len(atom) && 66 | memcmp(lzr_atom_text_ptr(atom), text, len) == 0; 67 | } 68 | 69 | atom_cell_t* table_find(lzr_atom_table_t* self, uint32_t hash, const char* text, size_t len) { 70 | uint32_t displacement = 0; 71 | size_t index = hash & self->mask; 72 | 73 | while (true) { 74 | atom_cell_t* cell = &self->cells[index]; 75 | if (cell->atom_ptr == 0 || displacement > cell->displacement) 76 | return NULL; 77 | if (table_compare_eq(cell->atom_ptr, hash, text, len)) 78 | return cell; 79 | 80 | displacement++; 81 | index = (index + 1) & self->mask; 82 | } 83 | } 84 | 85 | atom_cell_t* table_insert(lzr_atom_table_t* self, uint32_t atom_ptr) { 86 | size_t index = ((lzr_atom_t*) LZR_PTR_UNZIP(atom_ptr))->hash & self->mask; 87 | atom_cell_t *inserted_cell = NULL; 88 | atom_cell_t current_cell; 89 | 90 | current_cell.displacement = 0; 91 | current_cell.atom_ptr = atom_ptr; 92 | 93 | while (true) { 94 | atom_cell_t* cell = &self->cells[index]; 95 | if (cell->atom_ptr == 0) { 96 | self->size++; 97 | *cell = current_cell; 98 | return inserted_cell != NULL ? inserted_cell : cell; 99 | 100 | } else if (cell->displacement < current_cell.displacement) { 101 | if (current_cell.atom_ptr == atom_ptr) 102 | inserted_cell = cell; 103 | atom_cell_t temp = current_cell; 104 | current_cell = *cell; 105 | *cell = temp; 106 | } 107 | 108 | current_cell.displacement++; 109 | index = (index + 1) & self->mask; 110 | } 111 | } 112 | 113 | void table_grow(lzr_atom_table_t* self) { 114 | atom_cell_t* old_cells = self->cells; 115 | size_t old_capacity = self->mask + 1; 116 | size_t new_capacity = old_capacity << 1; 117 | assert(new_capacity <= self->capacity); 118 | 119 | self->size = 0; 120 | self->cells = self->remap; 121 | self->mask = new_capacity - 1; 122 | lzr_memory_commit((void*) self->cells, new_capacity * sizeof(atom_cell_t)); 123 | 124 | for (size_t cell_index = 0; cell_index < old_capacity; cell_index++) { 125 | uint32_t atom_ptr = old_cells[cell_index].atom_ptr; 126 | if (atom_ptr != 0) 127 | table_insert(self, atom_ptr); 128 | } 129 | 130 | lzr_memory_decommit((void*) self->remap, old_capacity * sizeof(atom_cell_t)); 131 | self->remap = old_cells; 132 | } 133 | 134 | void lzr_atom_table_init(lzr_atom_table_t* self, size_t max_atoms) { 135 | // allocate the cell tables for both the main cells and the remap cells 136 | // the remap are used as a copying-gc like space when resizing 137 | const size_t cell_size = NEXT_POW_2(MAX(max_atoms, ATOM_CELL_COMMIT_DEFAULT)); 138 | const size_t cell_heap_size = ALIGN(cell_size, LZR_HEAP_CHUNK_SIZE) / LZR_HEAP_CHUNK_SIZE; 139 | self->cells = (atom_cell_t*) lzr_heap_reserve(cell_heap_size); 140 | self->remap = (atom_cell_t*) lzr_heap_reserve(cell_heap_size); 141 | 142 | const size_t cells_to_commit = MIN(cell_size, ATOM_CELL_COMMIT_DEFAULT * sizeof(atom_cell_t)); 143 | lzr_memory_commit((void*) self->cells, cells_to_commit); 144 | 145 | atom_heap_init(&self->atom_heap, max_atoms); 146 | self->mask = cells_to_commit - 1; 147 | self->capacity = cell_size; 148 | self->size = 0; 149 | } 150 | 151 | lzr_atom_t* lzr_atom_table_find(lzr_atom_table_t* self, const char* key, size_t key_len) { 152 | // TODO: Add read-lock here 153 | 154 | uint32_t hash = lzr_hash_bytes(key, key_len); 155 | atom_cell_t* cell = table_find(self, hash, key, key_len); 156 | return (lzr_atom_t*) (cell != NULL ? LZR_PTR_UNZIP(cell->atom_ptr) : NULL); 157 | } 158 | 159 | lzr_atom_t* lzr_atom_table_upsert(lzr_atom_table_t* self, const char* key, size_t key_len) { 160 | // TODO: Add write-lock here 161 | 162 | uint32_t hash = lzr_hash_bytes(key, key_len); 163 | atom_cell_t* cell = table_find(self, hash, key, key_len); 164 | 165 | if (cell == NULL) { 166 | if (++self->size >= (ATOM_CELL_LOAD_FACTOR * self->capacity) / 100) 167 | table_grow(self); 168 | lzr_atom_t* atom = atom_heap_alloc(&self->atom_heap, hash, key, key_len); 169 | cell = table_insert(self, LZR_PTR_ZIP(atom)); 170 | } 171 | 172 | return (lzr_atom_t*) LZR_PTR_UNZIP(cell->atom_ptr); 173 | } -------------------------------------------------------------------------------- /src/runtime/atom.h: -------------------------------------------------------------------------------- 1 | #ifndef LZR_ATOM_H 2 | #define LZR_ATOM_H 3 | 4 | #include "hash.h" 5 | 6 | typedef struct { 7 | uint32_t hash; 8 | uint32_t data; 9 | uint32_t actor; 10 | } lzr_atom_t; 11 | 12 | #define MAX_ATOM_TEXT 255 13 | #define MAX_ATOM_SIZE (sizeof(lzr_atom_t) + sizeof(uint8_t) + MAX_ATOM_TEXT) 14 | 15 | #define lzr_atom_len(atom) (*lzr_atom_len_ptr(atom)) 16 | #define lzr_atom_len_ptr(atom) ((uint8_t*) ((atom) + 1)) 17 | #define lzr_atom_text_ptr(atom) (((char*) ((atom) + 1)) + 1) 18 | 19 | typedef struct lzr_atom_table_t lzr_atom_table_t; 20 | 21 | void lzr_atom_table_init(lzr_atom_table_t* self, size_t max_atoms); 22 | 23 | lzr_atom_t* lzr_atom_table_find(lzr_atom_table_t* self, const char* key, size_t key_len); 24 | 25 | lzr_atom_t* lzr_atom_table_upsert(lzr_atom_table_t* self, const char* key, size_t key_len); 26 | 27 | #endif // LZR_ATOM_H -------------------------------------------------------------------------------- /src/runtime/hash.c: -------------------------------------------------------------------------------- 1 | #include "hash.h" 2 | 3 | uint32_t fx_hash(const char* bytes, size_t len); 4 | uint32_t fnv1a_hash(const char* bytes, size_t len); 5 | 6 | uint32_t lzr_hash_bytes(const char* bytes, size_t len) { 7 | if (len >= 512) 8 | return fx_hash(bytes, len); 9 | return fnv1a_hash(bytes, len); 10 | } 11 | 12 | uint32_t fnv1a_hash(const char* bytes, size_t len) { 13 | uint32_t hash = 0x811c9dc5; 14 | while (len--) 15 | hash = (hash ^ (*bytes++)) * 0x1000193; 16 | return hash; 17 | } 18 | 19 | uint32_t fx_hash(const char* bytes, size_t len) { 20 | #define fx_rotl(type, value, shift) \ 21 | ((value << shift) | (value >> (sizeof(type) * 8 - shift))) 22 | #define fx_hash_word(type, word) \ 23 | (fx_rotl(type, hash, 5) ^ (word) * 0x517cc1b727220a95ULL) 24 | 25 | size_t hash = 0; 26 | while (len >= sizeof(size_t)) { 27 | hash = fx_hash_word(size_t, *((size_t*) bytes)); 28 | bytes += sizeof(size_t); 29 | len -= sizeof(size_t); 30 | } 31 | 32 | while (len--) 33 | hash = fx_hash_word(size_t, (*bytes++)); 34 | 35 | return (uint32_t) (hash >> 32); 36 | #undef fx_rotl 37 | #undef fx_hash_word 38 | } -------------------------------------------------------------------------------- /src/runtime/hash.h: -------------------------------------------------------------------------------- 1 | #ifndef LZR_HASH_H 2 | #define LZR_HASH_H 3 | 4 | #include "system.h" 5 | 6 | uint32_t lzr_hash_bytes(const char* bytes, size_t len); 7 | 8 | #endif // LZR_HASH_H -------------------------------------------------------------------------------- /src/runtime/os/heap.c: -------------------------------------------------------------------------------- 1 | #include "heap.h" 2 | #include "lock.h" 3 | 4 | #define HEAP_SIZE (LZR_HEAP_END - LZR_HEAP_BEGIN) 5 | #define HEAP_PTR(offset) (LZR_HEAP_BEGIN + ((offset) * LZR_HEAP_CHUNK_SIZE)) 6 | 7 | typedef struct { 8 | uint16_t head; 9 | uint16_t tail; 10 | } freelist_t; 11 | 12 | typedef struct { 13 | uint16_t prev; 14 | uint16_t next; 15 | uint16_t size; 16 | bool allocated: 1; 17 | uint16_t predecessor: 14; 18 | } chunk_info_t; 19 | 20 | typedef struct { 21 | lzr_spinlock_t lock; 22 | uint16_t top_heap; 23 | uint16_t top_chunk; 24 | freelist_t free_list; 25 | chunk_info_t chunks[HEAP_SIZE / LZR_HEAP_CHUNK_SIZE]; 26 | } heap_t; 27 | 28 | static uint32_t heap_offset = 0; 29 | static bool heap_committed = false; 30 | static bool heap_initialized = false; 31 | 32 | // memory map the entire 32gb heap into the address space (uncommitted) 33 | void lzr_heap_init() { 34 | assert(heap_initialized == false); 35 | lzr_memory_map((void*) HEAP_PTR(0), HEAP_SIZE, false); 36 | heap_initialized = true; 37 | } 38 | 39 | // reserve a specific amount of the heap for static data. 40 | // needs to be done befoer lzr_heap_commit() 41 | void* lzr_heap_reserve(uint16_t num_chunks) { 42 | assert(heap_committed == false); 43 | void* address = (void*) HEAP_PTR(heap_offset); 44 | heap_offset += num_chunks * LZR_HEAP_CHUNK_SIZE; 45 | return address; 46 | } 47 | 48 | // locks down the heap for future lzr_heap_reserve's and initializes it. 49 | // any further allocates need to be done through lzr_heap_alloc/lzr_heap_free 50 | void lzr_heap_commit() { 51 | heap_t* heap = (heap_t*) HEAP_PTR(heap_offset); 52 | 53 | assert(heap_committed == false); 54 | lzr_memory_commit((void*) heap, sizeof(heap_t)); 55 | heap_committed = true; 56 | 57 | heap->top_heap = 1; 58 | heap->top_chunk = 0; 59 | heap->free_list.head = 0; 60 | heap->free_list.tail = 0; 61 | lzr_spinlock_init(&heap->lock); 62 | 63 | heap->chunks[0].prev = 0; 64 | heap->chunks[0].next = 0; 65 | heap->chunks[0].size = 0; 66 | heap->chunks[0].allocated = true; 67 | heap->chunks[0].predecessor = 0; 68 | } 69 | 70 | void freelist_append(heap_t* heap, uint16_t chunk) { 71 | heap->chunks[chunk].next = 0; 72 | 73 | if (heap->free_list.tail == 0) { 74 | heap->chunks[chunk].prev = 0; 75 | heap->free_list.tail = heap->free_list.head = chunk; 76 | } else { 77 | heap->chunks[heap->free_list.tail].next = chunk; 78 | heap->chunks[chunk].prev = heap->free_list.tail; 79 | heap->free_list.tail = chunk; 80 | } 81 | } 82 | 83 | void freelist_remove(heap_t* heap, uint16_t chunk) { 84 | chunk_info_t* chunk_info = &heap->chunks[chunk]; 85 | 86 | if (heap->free_list.head == chunk) 87 | heap->free_list.head = chunk_info->next; 88 | if (heap->free_list.tail == chunk) 89 | heap->free_list.tail = chunk_info->next; 90 | if (chunk_info->prev != 0) 91 | heap->chunks[chunk_info->prev].next = chunk_info->next; 92 | if (chunk_info->next != 0) 93 | heap->chunks[chunk_info->next].prev = chunk_info->prev; 94 | } 95 | 96 | void* lzr_heap_alloc(uint16_t num_chunks) { 97 | heap_t* heap = (heap_t*) HEAP_PTR(heap_offset); 98 | lzr_spinlock_lock(&heap->lock); 99 | void* address; 100 | 101 | // using Best-Fit search, try and find a free chunk in the free list 102 | uint16_t free_chunk = 0; 103 | for (uint16_t chunk = heap->free_list.head; chunk != 0; chunk = heap->chunks[chunk].next) 104 | if (heap->chunks[chunk].size >= num_chunks) 105 | if (free_chunk == 0 || heap->chunks[free_chunk].size > heap->chunks[chunk].size) 106 | free_chunk = chunk; 107 | 108 | // a free chunk was found, use it 109 | if (free_chunk != 0) { 110 | chunk_info_t* free_chunk_info = &heap->chunks[free_chunk]; 111 | free_chunk_info->allocated = true; 112 | free_chunk_info->next = 0; 113 | 114 | // remove it from the free list and add back the remaining if it was bigget than needed 115 | freelist_remove(heap, free_chunk); 116 | if (free_chunk_info->size > num_chunks) { 117 | uint16_t left_over = free_chunk + num_chunks; 118 | heap->chunks[left_over].size = free_chunk_info->size - num_chunks; 119 | heap->chunks[left_over].predecessor = free_chunk; 120 | heap->chunks[left_over].allocated = false; 121 | freelist_append(heap, left_over); 122 | } 123 | 124 | free_chunk_info->size = num_chunks; 125 | address = (void*) HEAP_PTR(heap_offset + free_chunk); 126 | 127 | // no free chunk was found, bump-allocate from the top of the heap 128 | } else { 129 | uint16_t top = heap->top_heap; 130 | heap->top_heap += num_chunks; 131 | assert(heap->top_heap > top); 132 | 133 | // register a newly made chunk 134 | heap->chunks[top].predecessor = heap->top_chunk; 135 | heap->chunks[top].size = num_chunks; 136 | heap->chunks[top].allocated = true; 137 | 138 | heap->top_chunk = top; 139 | address = (void*) HEAP_PTR(heap_offset + top); 140 | } 141 | 142 | lzr_spinlock_unlock(&heap->lock); 143 | return address; 144 | } 145 | 146 | void lzr_heap_free(void* ptr) { 147 | // assert that the ptr is in the heap & aligned to LZR_HEAP_CHUNK_SIZE (its a valid chunk) 148 | assert((size_t) ptr > LZR_HEAP_BEGIN && (size_t) ptr < LZR_HEAP_END); 149 | assert(((size_t) ptr % LZR_HEAP_CHUNK_SIZE) == 0); 150 | heap_t* heap = (heap_t*) HEAP_PTR(heap_offset); 151 | 152 | // get the chunk info and make sure that it was allocated before freeing 153 | uint16_t ptr_chunk = ((size_t) ptr - heap_offset - LZR_HEAP_BEGIN) / LZR_HEAP_CHUNK_SIZE; 154 | chunk_info_t* ptr_chunk_info = &heap->chunks[ptr_chunk]; 155 | 156 | lzr_spinlock_lock(&heap->lock); 157 | assert(ptr_chunk_info->allocated == true); 158 | ptr_chunk_info->allocated = false; 159 | 160 | // coalesce free chunks from left to right (removing them from the free list) 161 | if (ptr_chunk != heap->top_chunk) { 162 | uint16_t chunk = ptr_chunk + ptr_chunk_info->size; 163 | while (!heap->chunks[chunk].allocated) { 164 | freelist_remove(heap, chunk); 165 | ptr_chunk_info->size += heap->chunks[chunk].size; 166 | chunk += heap->chunks[chunk].size; 167 | if (chunk >= heap->top_heap) 168 | break; 169 | } 170 | } 171 | 172 | // coalesce free chunks right to left following the chain of predecessors 173 | while (ptr_chunk_info->predecessor != 0) { 174 | chunk_info_t* predecessor = &heap->chunks[ptr_chunk_info->predecessor]; 175 | if (predecessor->allocated) 176 | break; 177 | freelist_remove(heap, ptr_chunk_info->predecessor); 178 | predecessor->size += ptr_chunk_info->size; 179 | ptr_chunk = ptr_chunk_info->predecessor; 180 | ptr_chunk_info = predecessor; 181 | } 182 | 183 | // the freed chunk is on the top of the heap: 184 | // move the heap top back down to the freed chunk and discard it 185 | if (ptr_chunk + ptr_chunk_info->size >= heap->top_heap) { 186 | heap->top_chunk = ptr_chunk_info->predecessor; 187 | heap->top_heap = ptr_chunk; 188 | 189 | // the freed chunk is somewhere else in the heap: 190 | // add it to the free list ("add it to the list Helvian") 191 | } else { 192 | freelist_append(heap, ptr_chunk); 193 | } 194 | 195 | lzr_spinlock_unlock(&heap->lock); 196 | } -------------------------------------------------------------------------------- /src/runtime/os/heap.h: -------------------------------------------------------------------------------- 1 | #ifndef LZR_HEAP_H 2 | #define LZR_HEAP_H 3 | 4 | #include "memory.h" 5 | 6 | /* 7 | lazer memory maps a fixed 32gb heap and allocates chunks off of it at a 2mb granularity. 8 | This is because 32gb worth of address space is 35 bits. When address are aligned by 8 bytes 9 | the bottom 3 bits are always zero so a pointer in this space can be compressed down to 10 | 35 - 3 = 32 bits and fit nicely inside a uint32_t. This helps in saving memory later. 11 | */ 12 | #define LZR_HEAP_BEGIN (1ULL << 35) 13 | #define LZR_HEAP_END (1ULL << 36) 14 | #define LZR_HEAP_CHUNK_SIZE (2 * 1024 * 1024) 15 | 16 | // compression and decompression of 8-byte aligned heap pointers 17 | #define LZR_PTR_ZIP(ptr) ((uint32_t) ((((size_t) (ptr)) - LZR_HEAP_BEGIN) >> 3)) 18 | #define LZR_PTR_UNZIP(ptr) ((void*) ((((size_t) (ptr)) << 3) + LZR_HEAP_BEGIN)) 19 | 20 | // the heap needs to initially be mapped into memory unless alloc/free will segfault 21 | void lzr_heap_init(); 22 | 23 | // used for reserving static memory that wont be free before committing the heap 24 | void* lzr_heap_reserve(uint16_t num_chunks); 25 | 26 | // no more reserving. lock it down and make it safe to use alloc/free 27 | void lzr_heap_commit(); 28 | 29 | // allocate `num_chunk` amount of chunks (2mb). 30 | // The caller is responsible for committing it. 31 | void* lzr_heap_alloc(uint16_t num_chunks); 32 | 33 | // free a 2mb aligned pointer in the lazer heap address range. 34 | // again, the caller is responsible for decommit it. 35 | void lzr_heap_free(void* ptr); 36 | 37 | #endif // LZR_HEAP_H -------------------------------------------------------------------------------- /src/runtime/os/lock.c: -------------------------------------------------------------------------------- 1 | #include "lock.h" 2 | 3 | #if defined(LZR_WINDOWS) 4 | #include 5 | #define cpu_yield() Sleep(1) 6 | #define cpu_relax() YieldProcessor() 7 | 8 | #elif defined(LZR_LINUX) 9 | #include 10 | #include 11 | #define cpu_relax() _mm_pause() 12 | #define cpu_yield() sched_yield() 13 | #endif 14 | 15 | void lzr_spinlock_init(lzr_spinlock_t* self) { 16 | atomic_flag_clear_explicit(self, memory_order_relaxed); 17 | } 18 | 19 | void lzr_spinlock_lock(lzr_spinlock_t* self) { 20 | size_t contended = 0; 21 | while (!atomic_flag_test_and_set_explicit(self, memory_order_relaxed)) { 22 | if (++contended > 20) { 23 | cpu_relax(); 24 | } else { 25 | cpu_yield(); 26 | } 27 | } 28 | 29 | atomic_thread_fence(memory_order_release); 30 | } 31 | 32 | void lzr_spinlock_unlock(lzr_spinlock_t* self) { 33 | atomic_thread_fence(memory_order_acquire); 34 | atomic_flag_clear_explicit(self, memory_order_relaxed); 35 | } -------------------------------------------------------------------------------- /src/runtime/os/lock.h: -------------------------------------------------------------------------------- 1 | #ifndef LZR_LOCK_H 2 | #define LZR_LOCK_H 3 | 4 | #include "../system.h" 5 | #include 6 | 7 | typedef atomic_flag lzr_spinlock_t; 8 | 9 | void lzr_spinlock_init(lzr_spinlock_t* self); 10 | 11 | void lzr_spinlock_lock(lzr_spinlock_t* self); 12 | 13 | void lzr_spinlock_unlock(lzr_spinlock_t* self); 14 | 15 | #endif // LZR_LOCK_H 16 | -------------------------------------------------------------------------------- /src/runtime/os/memory.c: -------------------------------------------------------------------------------- 1 | #include "memory.h" 2 | 3 | #if defined(LZR_WINDOWS) 4 | #include 5 | 6 | void* lzr_memory_map(void* addr, size_t bytes, bool commit) { 7 | DWORD alloc_type = MEM_RESERVE | (commit ? MEM_COMMIT : 0); 8 | DWORD protect = commit ? PAGE_NOACCESS : PAGE_READWRITE; 9 | 10 | void* address = VirtualAlloc(addr, bytes, alloc_type, protect); 11 | assert(address != NULL); 12 | return address; 13 | } 14 | 15 | void lzr_memory_unmap(void* addr, size_t bytes) { 16 | BOOL unmapped = VirtualFree(addr, 0, MEM_RELEASE); 17 | assert(unmapped == TRUE); 18 | } 19 | 20 | void lzr_memory_commit(void* addr, size_t bytes) { 21 | void* address = VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE); 22 | assert(address == addr); 23 | } 24 | 25 | void lzr_memory_decommit(void* addr, size_t bytes) { 26 | BOOL decommitted = VirtualFree(addr, bytes, MEM_DECOMMIT); 27 | assert(decommitted == TRUE); 28 | } 29 | 30 | #else 31 | #include 32 | 33 | void* lzr_memory_map(void* addr, size_t bytes, bool commit) { 34 | int protect = PROT_READ | PROT_WRITE; 35 | int flags = MAP_PRIVATE | MAP_ANONYMOUS | (addr != NULL ? MAP_FIXED : 0); 36 | 37 | addr = mmap(addr, bytes, protect, flags, -1, 0); 38 | assert(addr != MAP_FAILED); 39 | return addr; 40 | } 41 | 42 | void lzr_memory_unmap(void* addr, size_t bytes) { 43 | int unmapped = munmap(addr, bytes); 44 | assert(unmapped == 0); 45 | } 46 | 47 | void lzr_memory_commit(void* addr, size_t bytes) { 48 | // linux over-commits memory by default 49 | } 50 | 51 | void lzr_memory_decommit(void* addr, size_t bytes) { 52 | int decommitted = madvise(addr, bytes, MADV_DONTNEED); 53 | assert(decommitted == 0); 54 | } 55 | 56 | #endif -------------------------------------------------------------------------------- /src/runtime/os/memory.h: -------------------------------------------------------------------------------- 1 | #ifndef LZR_MEMORY_H 2 | #define LZR_MEMORY_H 3 | 4 | #include "../system.h" 5 | 6 | // virtually map memory (TODO: add support for executable memory down the road) 7 | void* lzr_memory_map(void* addr, size_t bytes, bool commit); 8 | 9 | // tbh i dont think this will ever be used, but its here if need be. 10 | void lzr_memory_unmap(void* addr, size_t bytes); 11 | 12 | // ensure that virtual address range is mapped to physical memory 13 | void lzr_memory_commit(void* addr, size_t bytes); 14 | 15 | // discard the mapping of physical memory to the virtual address range 16 | void lzr_memory_decommit(void* addr, size_t bytes); 17 | 18 | #endif // LZR_MEMORY_H -------------------------------------------------------------------------------- /src/runtime/system.h: -------------------------------------------------------------------------------- 1 | #ifndef LZR_SYSTEM_H 2 | #define LZR_SYSTEM_H 3 | 4 | #include 5 | #include 6 | 7 | #if defined(_WIN32) || defined(__WIN32__) || defined(_WIN64) 8 | #define LZR_WINDOWS 9 | #define _WIN32_LEAN_AND_MEAN 10 | #elif defined(__linux__) 11 | #define LZR_LINUX 12 | #define _GNU_SOURCE 13 | #else 14 | #error "Operating system not supported" 15 | #endif 16 | 17 | #if defined(_MSC_VER) && !defined(__clang__) 18 | #error "MSVC is frowned upon in these parts" 19 | #endif 20 | 21 | #if defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64) 22 | #define LZR_X86 23 | #elif defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) 24 | #define LZR_ARM 25 | #else 26 | #error "Architecture not supported" 27 | #endif 28 | 29 | #endif // LZR_SYSTEM_H --------------------------------------------------------------------------------