├── .gitignore
├── LockFreeAllocatorTester
├── LockFreeAllocatorTester
│ ├── en.lproj
│ │ └── InfoPlist.strings
│ ├── Default.png
│ ├── Default@2x.png
│ ├── Default-568h@2x.png
│ ├── XAMAppDelegate.h
│ ├── main.m
│ ├── LockFreeAllocatorTester-Prefix.pch
│ ├── LockFreeAllocatorTester-Info.plist
│ └── XAMAppDelegate.m
└── LockFreeAllocatorTester.xcodeproj
│ ├── project.xcworkspace
│ └── contents.xcworkspacedata
│ └── project.pbxproj
├── alloc.h
├── delayed-free.h
├── metadata.h
├── mono-mmap.h
├── sgen-gc.h
├── mono-mmap.c
├── test-queue.h
├── fake-glib.h
├── Makefile
├── states.org
├── test-queue.c
├── lock-free-array-queue.h
├── hazard-pointer.h
├── sgen-gc.c
├── lock-free-alloc.h
├── lock-free-queue.h
├── mono-linked-list-set.h
├── mono-membar.h
├── lock-free-array-queue.c
├── mono-linked-list-set.c
├── lock-free-queue.c
├── hazard-pointer.c
├── test.c
├── lock-free-alloc.c
└── atomic.h
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | test
3 | *.o
4 | *.xcuserdatad/
5 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/en.lproj/InfoPlist.strings:
--------------------------------------------------------------------------------
1 | /* Localized versions of Info.plist keys */
2 |
3 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/Default.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/schani/michael-alloc/HEAD/LockFreeAllocatorTester/LockFreeAllocatorTester/Default.png
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/Default@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/schani/michael-alloc/HEAD/LockFreeAllocatorTester/LockFreeAllocatorTester/Default@2x.png
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/Default-568h@2x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/schani/michael-alloc/HEAD/LockFreeAllocatorTester/LockFreeAllocatorTester/Default-568h@2x.png
--------------------------------------------------------------------------------
/alloc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-alloc.h: Lock free allocator.
3 | *
4 | * (C) Copyright 2011 Novell, Inc
5 | */
6 |
7 | #ifndef __MONO_LOCKFREEALLOC_H__
8 | #define __MONO_LOCKFREEALLOC_H__
9 |
10 |
11 | #endif
12 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester.xcodeproj/project.xcworkspace/contents.xcworkspacedata:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/delayed-free.h:
--------------------------------------------------------------------------------
1 | #ifndef __MONO_UTILS_DELAYED_FREE_H__
2 | #define __MONO_UTILS_DELAYED_FREE_H__
3 |
4 | #include
5 |
6 | #define CRITICAL_SECTION pthread_mutex_t
7 | #define EnterCriticalSection pthread_mutex_lock
8 | #define LeaveCriticalSection pthread_mutex_unlock
9 |
10 | #endif
11 |
--------------------------------------------------------------------------------
/metadata.h:
--------------------------------------------------------------------------------
1 | #ifndef __MONO_METADATA_H__
2 | #define __MONO_METADATA_H__
3 |
4 | #include
5 |
6 | #ifndef MONO_ZERO_LEN_ARRAY
7 | #ifdef __GNUC__
8 | #define MONO_ZERO_LEN_ARRAY 0
9 | #else
10 | #define MONO_ZERO_LEN_ARRAY 1
11 | #endif
12 | #endif
13 |
14 | #define mono_pagesize getpagesize
15 |
16 | #endif
17 |
--------------------------------------------------------------------------------
/mono-mmap.h:
--------------------------------------------------------------------------------
1 | #ifndef __MONO_UTILS_MMAP_H__
2 | #define __MONO_UTILS_MMAP_H__
3 |
4 | #include
5 |
6 | #define mono_mprotect mprotect
7 | #define MONO_MMAP_NONE PROT_NONE
8 | #define MONO_MMAP_READ PROT_READ
9 | #define MONO_MMAP_WRITE PROT_WRITE
10 |
11 | void* mono_valloc (void *addr, size_t len, int prot);
12 |
13 | void mono_vfree (void *addr, size_t len);
14 |
15 | #endif
16 |
--------------------------------------------------------------------------------
/sgen-gc.h:
--------------------------------------------------------------------------------
1 | #ifndef __MONO_METADATA_SGEN_GC_H__
2 | #define __MONO_METADATA_SGEN_GC_H__
3 |
4 | #include "fake-glib.h"
5 |
6 | typedef unsigned long mword;
7 |
8 | void* mono_sgen_alloc_os_memory (size_t size, int activate);
9 | void mono_sgen_free_os_memory (void *addr, size_t size);
10 |
11 | void* mono_sgen_alloc_os_memory_aligned (mword size, mword alignment, gboolean activate);
12 |
13 | #endif
14 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/XAMAppDelegate.h:
--------------------------------------------------------------------------------
1 | //
2 | // XAMAppDelegate.h
3 | // LockFreeAllocatorTester
4 | //
5 | // Created by Mark Probst on 9/11/13.
6 | // Copyright (c) 2013 Mark Probst. All rights reserved.
7 | //
8 |
9 | #import
10 |
11 | @interface XAMAppDelegate : UIResponder
12 |
13 | @property (retain, nonatomic) UIWindow *window;
14 |
15 | @end
16 |
--------------------------------------------------------------------------------
/mono-mmap.c:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "mono-mmap.h"
4 |
5 | void*
6 | mono_valloc (void *addr, size_t len, int prot)
7 | {
8 | addr = mmap (addr, len, prot, MAP_ANON | MAP_PRIVATE, -1, 0);
9 | if (addr == (void*)-1) {
10 | fprintf (stderr, "mmap error: %m\n");
11 | return NULL;
12 | }
13 | return addr;
14 | }
15 |
16 | void
17 | mono_vfree (void *addr, size_t len)
18 | {
19 | munmap (addr, len);
20 | }
21 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/main.m:
--------------------------------------------------------------------------------
1 | //
2 | // main.m
3 | // LockFreeAllocatorTester
4 | //
5 | // Created by Mark Probst on 9/11/13.
6 | // Copyright (c) 2013 Mark Probst. All rights reserved.
7 | //
8 |
9 | #import
10 |
11 | #import "XAMAppDelegate.h"
12 |
13 | int main(int argc, char *argv[])
14 | {
15 | return UIApplicationMain(argc, argv, nil, NSStringFromClass([XAMAppDelegate class]));
16 | }
17 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/LockFreeAllocatorTester-Prefix.pch:
--------------------------------------------------------------------------------
1 | //
2 | // Prefix header for all source files of the 'LockFreeAllocatorTester' target in the 'LockFreeAllocatorTester' project
3 | //
4 |
5 | #import
6 |
7 | #ifndef __IPHONE_3_0
8 | #warning "This project uses features only available in iOS SDK 3.0 and later."
9 | #endif
10 |
11 | #ifdef __OBJC__
12 | #import
13 | #import
14 | #endif
15 |
--------------------------------------------------------------------------------
/test-queue.h:
--------------------------------------------------------------------------------
1 | #ifndef __MONO_UTILS_QUEUE_H__
2 | #define __MONO_UTILS_QUEUE_H__
3 |
4 | #define MONO_LOCK_FREE_QUEUE_SIZE 16
5 |
6 | #define QUEUE_DEBUG 1
7 |
8 | typedef struct _MonoLockFreeQueueNode MonoLockFreeQueueNode;
9 |
10 | struct _MonoLockFreeQueueNode {
11 | #ifdef QUEUE_DEBUG
12 | gint32 in_queue;
13 | #endif
14 | };
15 |
16 | typedef struct {
17 | int index;
18 | MonoLockFreeQueueNode *entries [MONO_LOCK_FREE_QUEUE_SIZE];
19 | } MonoLockFreeQueue;
20 |
21 | void mono_lock_free_queue_init (MonoLockFreeQueue *q);
22 |
23 | void mono_lock_free_queue_enqueue (MonoLockFreeQueue *q, MonoLockFreeQueueNode *node);
24 |
25 | MonoLockFreeQueueNode* mono_lock_free_queue_dequeue (MonoLockFreeQueue *q);
26 |
27 | #endif
28 |
--------------------------------------------------------------------------------
/fake-glib.h:
--------------------------------------------------------------------------------
1 | #ifndef __FAKE_GLIB_H__
2 | #define __FAKE_GLIB_H__
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #define g_assert assert
10 | #define g_assert_not_reached() (assert(0))
11 |
12 | #define g_print printf
13 | #define g_warning printf
14 |
15 | static inline void* g_malloc0 (size_t s) {
16 | void *p = malloc (s);
17 | memset (p, 0, s);
18 | return p;
19 | }
20 | #define g_free free
21 |
22 | typedef void* gpointer;
23 | typedef unsigned char guint8;
24 | typedef int gboolean;
25 | typedef int gint32;
26 | typedef unsigned int guint32;
27 | typedef unsigned long gulong;
28 |
29 | #ifdef __x86_64__
30 | typedef long gint64;
31 | typedef unsigned long guint64;
32 | #endif
33 |
34 | #define TRUE 1
35 | #define FALSE 0
36 |
37 | #endif
38 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | #TEST = -DTEST_DELAYED_FREE
2 | #TEST = -DTEST_QUEUE
3 | #TEST = -DTEST_ALLOC
4 | TEST = -DTEST_LLS
5 |
6 | ALLOC = lock-free-alloc
7 |
8 | QUEUE = lock-free-queue
9 | #QUEUE = test-queue
10 |
11 | OPT = -O0
12 |
13 | CFLAGS = $(TEST) $(OPT) -g -Wall -DMONO_INTERNAL= -Dlock_free_allocator_test_main=main #-DFAILSAFE_DELAYED_FREE
14 |
15 | all : test
16 |
17 | %.o : %.c
18 | gcc $(CFLAGS) -c $<
19 |
20 | hazard-pointer.o : hazard-pointer.c
21 | gcc $(CFLAGS) -c $<
22 |
23 | lock-free-array-queue.o : lock-free-array-queue.c
24 | gcc $(CFLAGS) -c $<
25 |
26 | lock-free-queue.o : lock-free-queue.c
27 | gcc $(CFLAGS) -c $<
28 |
29 | mono-linked-list-set.o : mono-linked-list-set.c
30 | gcc $(CFLAGS) -c $<
31 |
32 | test.o : test.c
33 | gcc $(CFLAGS) -c $<
34 |
35 | test : hazard-pointer.o lock-free-array-queue.o $(QUEUE).o $(ALLOC).o mono-mmap.o sgen-gc.o mono-linked-list-set.o test.o
36 | gcc $(OPT) -g -Wall -o test hazard-pointer.o lock-free-array-queue.o $(QUEUE).o $(ALLOC).o mono-mmap.o sgen-gc.o mono-linked-list-set.o test.o -lpthread
37 |
38 | clean :
39 | rm -f *.o test
40 |
--------------------------------------------------------------------------------
/states.org:
--------------------------------------------------------------------------------
1 | states: null, active, partial, full, empty
2 | location: NULL, NONE, AVAIL, ACTIVE, PARTIAL, PARTIAL_QUEUE
3 |
4 | * desc_alloc
5 | ** location
6 | AVAIL -> out
7 | NULL -> out
8 | * alloc_from_new_sb
9 | ** state
10 | null -> active
11 | ** location
12 | desc_alloc -> ACTIVE
13 | desc_alloc -> desc_retire
14 | * desc_retire
15 | ** location
16 | in -> AVAIL
17 | * alloc_from_active
18 | ** state
19 | active -> full
20 | ** location
21 | ACTIVE -> ACTIVE
22 | ACTIVE -> NONE
23 | ACTIVE -> update_active
24 | * update_active
25 | ** state
26 | active -> partial
27 | partial -> partial
28 | full -> partial
29 | ** location
30 | in -> ACTIVE
31 | in -> heap_put_partial
32 | * heap_put_partial
33 | ** location
34 | in -> PARTIAL
35 | PARTIAL -> list_put_partial
36 | * list_put_partial
37 | ** location
38 | in -> PARTIAL_QUEUE
39 | * alloc_from_partial
40 | ** state
41 | partial -> active
42 | partial -> full
43 | ** location
44 | heap_get_partial -> desc_retire
45 | heap_get_partial -> update_active
46 | * list_get_partial
47 | ** location
48 | PARTIAL_QUEUE -> out
49 | PARTIAL_QUEUE -> desc_retire
50 | * heap_get_partial
51 | ** location
52 | PARTIAL -> out
53 | list_get_partial -> out
54 | * mono_lock_free_free
55 | ** state
56 | full -> partial
57 | active -> empty
58 | partial -> empty
59 | full -> empty
60 |
--------------------------------------------------------------------------------
/test-queue.c:
--------------------------------------------------------------------------------
1 | #include "atomic.h"
2 |
3 | #include "test-queue.h"
4 |
5 | void
6 | mono_lock_free_queue_init (MonoLockFreeQueue *q)
7 | {
8 | int i;
9 | for (i = 0; i < MONO_LOCK_FREE_QUEUE_SIZE; ++i)
10 | q->entries [i] = NULL;
11 | q->index = 0;
12 | }
13 |
14 | void
15 | mono_lock_free_queue_enqueue (MonoLockFreeQueue *q, MonoLockFreeQueueNode *node)
16 | {
17 | int i, j;
18 |
19 | g_assert (!node->in_queue);
20 | node->in_queue = TRUE;
21 |
22 | j = 0;
23 | for (i = q->index; ; i = (i + 1) % MONO_LOCK_FREE_QUEUE_SIZE) {
24 | if (!q->entries [i]) {
25 | if (InterlockedCompareExchangePointer ((gpointer volatile*)&q->entries [i], node, NULL) == NULL) {
26 | if (j > MONO_LOCK_FREE_QUEUE_SIZE)
27 | g_print ("queue iterations: %d\n", j);
28 | return;
29 | }
30 | }
31 | ++j;
32 | }
33 | }
34 |
35 | MonoLockFreeQueueNode*
36 | mono_lock_free_queue_dequeue (MonoLockFreeQueue *q)
37 | {
38 | int index = q->index;
39 | int i;
40 |
41 | for (i = (index + 1) % MONO_LOCK_FREE_QUEUE_SIZE; i != index; i = (i + 1) % MONO_LOCK_FREE_QUEUE_SIZE) {
42 | MonoLockFreeQueueNode *node = q->entries [i];
43 | if (node) {
44 | if (InterlockedCompareExchangePointer ((gpointer volatile*)&q->entries [i], NULL, node) == node) {
45 | g_assert (node->in_queue);
46 | node->in_queue = FALSE;
47 | return node;
48 | }
49 | }
50 | }
51 |
52 | return NULL;
53 | }
54 |
--------------------------------------------------------------------------------
/lock-free-array-queue.h:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-array-queue.h: A lock-free somewhat-queue that doesn't
3 | * require hazard pointers.
4 | *
5 | * (C) Copyright 2011 Xamarin Inc.
6 | */
7 | #ifndef __MONO_LOCK_FREE_ARRAY_QUEUE_H__
8 | #define __MONO_LOCK_FREE_ARRAY_QUEUE_H__
9 |
10 | #include "fake-glib.h"
11 |
12 | typedef struct _MonoLockFreeArrayChunk MonoLockFreeArrayChunk;
13 |
14 | typedef struct {
15 | size_t entry_size;
16 | MonoLockFreeArrayChunk *chunk_list;
17 | } MonoLockFreeArray;
18 |
19 | typedef struct {
20 | MonoLockFreeArray array;
21 | gint32 num_used_entries;
22 | } MonoLockFreeArrayQueue;
23 |
24 | #define MONO_LOCK_FREE_ARRAY_INIT(entry_size) { (entry_size), NULL }
25 | #define MONO_LOCK_FREE_ARRAY_QUEUE_INIT(entry_size) { MONO_LOCK_FREE_ARRAY_INIT ((entry_size) + sizeof (gpointer)), 0 }
26 |
27 | gpointer mono_lock_free_array_nth (MonoLockFreeArray *arr, int index) MONO_INTERNAL;
28 |
29 | typedef gpointer (*MonoLockFreeArrayIterateFunc) (int index, gpointer entry_ptr, gpointer user_data);
30 | gpointer mono_lock_free_array_iterate (MonoLockFreeArray *arr, MonoLockFreeArrayIterateFunc func, gpointer user_data) MONO_INTERNAL;
31 |
32 | void mono_lock_free_array_cleanup (MonoLockFreeArray *arr) MONO_INTERNAL;
33 |
34 | void mono_lock_free_array_queue_push (MonoLockFreeArrayQueue *q, gpointer entry_data_ptr) MONO_INTERNAL;
35 | gboolean mono_lock_free_array_queue_pop (MonoLockFreeArrayQueue *q, gpointer entry_data_ptr) MONO_INTERNAL;
36 |
37 | void mono_lock_free_array_queue_cleanup (MonoLockFreeArrayQueue *q) MONO_INTERNAL;
38 |
39 | #endif
40 |
--------------------------------------------------------------------------------
/hazard-pointer.h:
--------------------------------------------------------------------------------
1 | /*
2 | * hazard-pointer.h: Hazard pointer related code.
3 | *
4 | * (C) Copyright 2011 Novell, Inc
5 | */
6 | #ifndef __MONO_HAZARD_POINTER_H__
7 | #define __MONO_HAZARD_POINTER_H__
8 |
9 | #include "fake-glib.h"
10 |
11 | #include "mono-membar.h"
12 |
13 | #define HAZARD_POINTER_COUNT 3
14 |
15 | typedef struct {
16 | gpointer hazard_pointers [HAZARD_POINTER_COUNT];
17 | } MonoThreadHazardPointers;
18 |
19 | typedef void (*MonoHazardousFreeFunc) (gpointer p);
20 |
21 | void mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func,
22 | gboolean free_func_might_lock, gboolean lock_free_context) MONO_INTERNAL;
23 | void mono_thread_hazardous_try_free_all (void) MONO_INTERNAL;
24 | MonoThreadHazardPointers* mono_hazard_pointer_get (void) MONO_INTERNAL;
25 | gpointer get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index) MONO_INTERNAL;
26 |
27 | #define mono_hazard_pointer_set(hp,i,v) \
28 | do { g_assert ((i) >= 0 && (i) < HAZARD_POINTER_COUNT); \
29 | (hp)->hazard_pointers [(i)] = (v); \
30 | mono_memory_write_barrier (); \
31 | } while (0)
32 |
33 | #define mono_hazard_pointer_get_val(hp,i) \
34 | ((hp)->hazard_pointers [(i)])
35 |
36 | #define mono_hazard_pointer_clear(hp,i) \
37 | do { g_assert ((i) >= 0 && (i) < HAZARD_POINTER_COUNT); \
38 | (hp)->hazard_pointers [(i)] = NULL; \
39 | } while (0)
40 |
41 | void mono_thread_attach (void);
42 |
43 | void mono_thread_smr_init (void) MONO_INTERNAL;
44 | void mono_thread_smr_cleanup (void) MONO_INTERNAL;
45 |
46 | void mono_thread_hazardous_print_stats (void) MONO_INTERNAL;
47 |
48 | #endif /*__MONO_HAZARD_POINTER_H__*/
49 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/LockFreeAllocatorTester-Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleDevelopmentRegion
6 | en
7 | CFBundleDisplayName
8 | ${PRODUCT_NAME}
9 | CFBundleExecutable
10 | ${EXECUTABLE_NAME}
11 | CFBundleIdentifier
12 | Xamarin.${PRODUCT_NAME:rfc1034identifier}
13 | CFBundleInfoDictionaryVersion
14 | 6.0
15 | CFBundleName
16 | ${PRODUCT_NAME}
17 | CFBundlePackageType
18 | APPL
19 | CFBundleShortVersionString
20 | 1.0
21 | CFBundleSignature
22 | ????
23 | CFBundleVersion
24 | 1.0
25 | LSRequiresIPhoneOS
26 |
27 | UIRequiredDeviceCapabilities
28 |
29 | armv7
30 |
31 | UISupportedInterfaceOrientations
32 |
33 | UIInterfaceOrientationPortrait
34 | UIInterfaceOrientationLandscapeLeft
35 | UIInterfaceOrientationLandscapeRight
36 |
37 | UISupportedInterfaceOrientations~ipad
38 |
39 | UIInterfaceOrientationPortrait
40 | UIInterfaceOrientationPortraitUpsideDown
41 | UIInterfaceOrientationLandscapeLeft
42 | UIInterfaceOrientationLandscapeRight
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/sgen-gc.c:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "mono-mmap.h"
4 |
5 | #include "sgen-gc.h"
6 |
7 | static size_t total_alloc = 0;
8 |
9 | /*
10 | * Allocate a big chunk of memory from the OS (usually 64KB to several megabytes).
11 | * This must not require any lock.
12 | */
13 | void*
14 | mono_sgen_alloc_os_memory (size_t size, int activate)
15 | {
16 | size_t pagesize = getpagesize ();
17 | void *ptr;
18 | unsigned long prot_flags = activate? MONO_MMAP_READ|MONO_MMAP_WRITE: MONO_MMAP_NONE;
19 |
20 | size += pagesize - 1;
21 | size &= ~(pagesize - 1);
22 | ptr = mono_valloc (0, size, prot_flags);
23 | /* FIXME: CAS */
24 | total_alloc += size;
25 | return ptr;
26 | }
27 |
28 | /*
29 | * Free the memory returned by mono_sgen_alloc_os_memory (), returning it to the OS.
30 | */
31 | void
32 | mono_sgen_free_os_memory (void *addr, size_t size)
33 | {
34 | size_t pagesize = getpagesize ();
35 |
36 | mono_vfree (addr, size);
37 |
38 | size += pagesize - 1;
39 | size &= ~(pagesize - 1);
40 | /* FIXME: CAS */
41 | total_alloc -= size;
42 | }
43 |
44 | void*
45 | mono_sgen_alloc_os_memory_aligned (mword size, mword alignment, gboolean activate)
46 | {
47 | /* Allocate twice the memory to be able to put the block on an aligned address */
48 | char *mem = mono_sgen_alloc_os_memory (size + alignment, activate);
49 | char *aligned;
50 |
51 | g_assert (mem);
52 |
53 | aligned = (char*)((mword)(mem + (alignment - 1)) & ~(alignment - 1));
54 | g_assert (aligned >= mem && aligned + size <= mem + size + alignment && !((mword)aligned & (alignment - 1)));
55 |
56 | if (aligned > mem)
57 | mono_sgen_free_os_memory (mem, aligned - mem);
58 | if (aligned + size < mem + size + alignment)
59 | mono_sgen_free_os_memory (aligned + size, (mem + size + alignment) - (aligned + size));
60 |
61 | return aligned;
62 | }
63 |
--------------------------------------------------------------------------------
/lock-free-alloc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-alloc.h: Lock free allocator.
3 | *
4 | * (C) Copyright 2011 Novell, Inc
5 | *
6 | * Permission is hereby granted, free of charge, to any person obtaining
7 | * a copy of this software and associated documentation files (the
8 | * "Software"), to deal in the Software without restriction, including
9 | * without limitation the rights to use, copy, modify, merge, publish,
10 | * distribute, sublicense, and/or sell copies of the Software, and to
11 | * permit persons to whom the Software is furnished to do so, subject to
12 | * the following conditions:
13 | *
14 | * The above copyright notice and this permission notice shall be
15 | * included in all copies or substantial portions of the Software.
16 | *
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 | */
25 |
26 | #ifndef __MONO_LOCKFREEALLOC_H__
27 | #define __MONO_LOCKFREEALLOC_H__
28 |
29 | #include "fake-glib.h"
30 |
31 | #include "lock-free-queue.h"
32 |
33 | typedef struct {
34 | MonoLockFreeQueue partial;
35 | unsigned int slot_size;
36 | } MonoLockFreeAllocSizeClass;
37 |
38 | struct _MonoLockFreeAllocDescriptor;
39 |
40 | typedef struct {
41 | struct _MonoLockFreeAllocDescriptor *active;
42 | MonoLockFreeAllocSizeClass *sc;
43 | } MonoLockFreeAllocator;
44 |
45 | void mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size) MONO_INTERNAL;
46 | void mono_lock_free_allocator_init_allocator (MonoLockFreeAllocator *heap, MonoLockFreeAllocSizeClass *sc) MONO_INTERNAL;
47 |
48 | gpointer mono_lock_free_alloc (MonoLockFreeAllocator *heap) MONO_INTERNAL;
49 | void mono_lock_free_free (gpointer ptr) MONO_INTERNAL;
50 |
51 | gboolean mono_lock_free_allocator_check_consistency (MonoLockFreeAllocator *heap) MONO_INTERNAL;
52 |
53 | #endif
54 |
--------------------------------------------------------------------------------
/lock-free-queue.h:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-queue.h: Lock free queue.
3 | *
4 | * (C) Copyright 2011 Novell, Inc
5 | *
6 | *
7 | * Permission is hereby granted, free of charge, to any person obtaining
8 | * a copy of this software and associated documentation files (the
9 | * "Software"), to deal in the Software without restriction, including
10 | * without limitation the rights to use, copy, modify, merge, publish,
11 | * distribute, sublicense, and/or sell copies of the Software, and to
12 | * permit persons to whom the Software is furnished to do so, subject to
13 | * the following conditions:
14 | *
15 | * The above copyright notice and this permission notice shall be
16 | * included in all copies or substantial portions of the Software.
17 | *
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 | */
26 |
27 |
28 | #ifndef __MONO_LOCKFREEQUEUE_H__
29 | #define __MONO_LOCKFREEQUEUE_H__
30 |
31 | //#define QUEUE_DEBUG 1
32 |
33 | typedef struct _MonoLockFreeQueueNode MonoLockFreeQueueNode;
34 |
35 | struct _MonoLockFreeQueueNode {
36 | MonoLockFreeQueueNode * volatile next;
37 | #ifdef QUEUE_DEBUG
38 | gint32 in_queue;
39 | #endif
40 | };
41 |
42 | typedef struct {
43 | MonoLockFreeQueueNode node;
44 | volatile gint32 in_use;
45 | } MonoLockFreeQueueDummy;
46 |
47 | #define MONO_LOCK_FREE_QUEUE_NUM_DUMMIES 2
48 |
49 | typedef struct {
50 | MonoLockFreeQueueNode * volatile head;
51 | MonoLockFreeQueueNode * volatile tail;
52 | MonoLockFreeQueueDummy dummies [MONO_LOCK_FREE_QUEUE_NUM_DUMMIES];
53 | volatile gint32 has_dummy;
54 | } MonoLockFreeQueue;
55 |
56 | void mono_lock_free_queue_init (MonoLockFreeQueue *q) MONO_INTERNAL;
57 |
58 | void mono_lock_free_queue_node_init (MonoLockFreeQueueNode *node, gboolean to_be_freed) MONO_INTERNAL;
59 | void mono_lock_free_queue_node_free (MonoLockFreeQueueNode *node) MONO_INTERNAL;
60 |
61 | void mono_lock_free_queue_enqueue (MonoLockFreeQueue *q, MonoLockFreeQueueNode *node) MONO_INTERNAL;
62 |
63 | MonoLockFreeQueueNode* mono_lock_free_queue_dequeue (MonoLockFreeQueue *q) MONO_INTERNAL;
64 |
65 | #endif
66 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester/XAMAppDelegate.m:
--------------------------------------------------------------------------------
1 | //
2 | // XAMAppDelegate.m
3 | // LockFreeAllocatorTester
4 | //
5 | // Created by Mark Probst on 9/11/13.
6 | // Copyright (c) 2013 Mark Probst. All rights reserved.
7 | //
8 |
9 | #import "XAMAppDelegate.h"
10 |
11 | extern int lock_free_allocator_test_main (void);
12 |
13 | @implementation XAMAppDelegate
14 |
15 | @synthesize window;
16 |
17 | - (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions
18 | {
19 | self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]];
20 | // Override point for customization after application launch.
21 | self.window.backgroundColor = [UIColor whiteColor];
22 | [self.window makeKeyAndVisible];
23 |
24 | dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
25 | for (;;) {
26 | lock_free_allocator_test_main ();
27 | }
28 | });
29 |
30 | return YES;
31 | }
32 |
33 | - (void)applicationWillResignActive:(UIApplication *)application
34 | {
35 | // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state.
36 | // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game.
37 | }
38 |
39 | - (void)applicationDidEnterBackground:(UIApplication *)application
40 | {
41 | // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later.
42 | // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits.
43 | }
44 |
45 | - (void)applicationWillEnterForeground:(UIApplication *)application
46 | {
47 | // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background.
48 | }
49 |
50 | - (void)applicationDidBecomeActive:(UIApplication *)application
51 | {
52 | // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface.
53 | }
54 |
55 | - (void)applicationWillTerminate:(UIApplication *)application
56 | {
57 | // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:.
58 | }
59 |
60 | @end
61 |
--------------------------------------------------------------------------------
/mono-linked-list-set.h:
--------------------------------------------------------------------------------
1 | /*
2 | * mono-linked-list-set.h: A lock-free split ordered list.
3 | *
4 | * Author:
5 | * Rodrigo Kumpera (kumpera@gmail.com)
6 | *
7 | * (C) 2011 Novell, Inc
8 | */
9 |
10 | #ifndef __MONO_SPLIT_ORDERED_LIST_H__
11 | #define __MONO_SPLIT_ORDERED_LIST_H__
12 |
13 | #include
14 |
15 | #include "hazard-pointer.h"
16 | #include "mono-membar.h"
17 |
18 | typedef struct _MonoLinkedListSetNode MonoLinkedListSetNode;
19 |
20 | struct _MonoLinkedListSetNode {
21 | /* next must be the first element in this struct! */
22 | MonoLinkedListSetNode *next;
23 | uintptr_t key;
24 | };
25 |
26 | typedef struct {
27 | MonoLinkedListSetNode *head;
28 | void (*free_node_func)(void *);
29 | } MonoLinkedListSet;
30 |
31 |
32 | static inline gpointer
33 | mono_lls_pointer_unmask (gpointer p)
34 | {
35 | return (gpointer)((uintptr_t)p & ~(uintptr_t)0x3);
36 | }
37 |
38 | static inline uintptr_t
39 | mono_lls_pointer_get_mark (gpointer n)
40 | {
41 | return (uintptr_t)n & 0x1;
42 | }
43 |
44 | /*
45 | Those are low level operations. prev, cur, next are returned in the hazard pointer table.
46 | You must manually clean the hazard pointer table after using them.
47 | */
48 |
49 | void
50 | mono_lls_init (MonoLinkedListSet *list, void (*free_node_func)(void *));
51 |
52 | gboolean
53 | mono_lls_find (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, uintptr_t key) MONO_INTERNAL;
54 |
55 | gboolean
56 | mono_lls_insert (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value) MONO_INTERNAL;
57 |
58 | gboolean
59 | mono_lls_remove (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value) MONO_INTERNAL;
60 |
61 | gpointer
62 | get_hazardous_pointer_with_mask (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index) MONO_INTERNAL;
63 |
64 | /*
65 | Requires the world to be stoped
66 | */
67 | #define MONO_LLS_FOREACH(list, element, type) {\
68 | MonoLinkedListSetNode *__cur; \
69 | for (__cur = (list)->head; __cur; __cur = mono_lls_pointer_unmask (__cur->next)) \
70 | if (!mono_lls_pointer_get_mark (__cur->next)) { \
71 | (element) = (type)__cur; \
72 |
73 | #define MONO_LLS_END_FOREACH }}
74 |
75 | static inline MonoLinkedListSetNode*
76 | mono_lls_info_step (MonoLinkedListSetNode *val, MonoThreadHazardPointers *hp)
77 | {
78 | val = mono_lls_pointer_unmask (val);
79 | mono_hazard_pointer_set (hp, 1, val);
80 | return val;
81 | }
82 |
83 | /*
84 | Provides snapshot iteration
85 | */
86 | #define MONO_LLS_FOREACH_SAFE(list, element, type) {\
87 | MonoThreadHazardPointers *__hp = mono_hazard_pointer_get (); \
88 | MonoLinkedListSetNode *__cur, *__next; \
89 | for (__cur = mono_lls_pointer_unmask (get_hazardous_pointer ((gpointer volatile*)&(list)->head, __hp, 1)); \
90 | __cur; \
91 | __cur = mono_lls_info_step (__next, __hp)) { \
92 | __next = get_hazardous_pointer_with_mask ((gpointer volatile*)&__cur->next, __hp, 0); \
93 | if (!mono_lls_pointer_get_mark (__next)) { \
94 | (element) = (type)__cur;
95 |
96 | #define MONO_LLS_END_FOREACH_SAFE \
97 | } \
98 | } \
99 | mono_hazard_pointer_clear (__hp, 0); \
100 | mono_hazard_pointer_clear (__hp, 1); \
101 | }
102 |
103 | #endif /* __MONO_SPLIT_ORDERED_LIST_H__ */
104 |
--------------------------------------------------------------------------------
/mono-membar.h:
--------------------------------------------------------------------------------
1 | /*
2 | * mono-membar.h: Memory barrier inline functions
3 | *
4 | * Author:
5 | * Mark Probst (mark.probst@gmail.com)
6 | *
7 | * (C) 2007 Novell, Inc
8 | */
9 |
10 | #ifndef _MONO_UTILS_MONO_MEMBAR_H_
11 | #define _MONO_UTILS_MONO_MEMBAR_H_
12 |
13 | #include "fake-glib.h"
14 |
15 | #ifdef __x86_64__
16 | #ifndef _MSC_VER
17 | static inline void mono_memory_barrier (void)
18 | {
19 | __asm__ __volatile__ ("mfence" : : : "memory");
20 | }
21 |
22 | static inline void mono_memory_read_barrier (void)
23 | {
24 | __asm__ __volatile__ ("lfence" : : : "memory");
25 | }
26 |
27 | static inline void mono_memory_write_barrier (void)
28 | {
29 | __asm__ __volatile__ ("sfence" : : : "memory");
30 | }
31 | #else
32 | #include
33 |
34 | static inline void mono_memory_barrier (void)
35 | {
36 | _ReadWriteBarrier ();
37 | }
38 |
39 | static inline void mono_memory_read_barrier (void)
40 | {
41 | _ReadBarrier ();
42 | }
43 |
44 | static inline void mono_memory_write_barrier (void)
45 | {
46 | _WriteBarrier ();
47 | }
48 | #endif
49 | #elif defined(__i386__)
50 | #ifndef _MSC_VER
51 | static inline void mono_memory_barrier (void)
52 | {
53 | __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory");
54 | }
55 |
56 | static inline void mono_memory_read_barrier (void)
57 | {
58 | mono_memory_barrier ();
59 | }
60 |
61 | static inline void mono_memory_write_barrier (void)
62 | {
63 | mono_memory_barrier ();
64 | }
65 | #else
66 | #include
67 |
68 | static inline void mono_memory_barrier (void)
69 | {
70 | _ReadWriteBarrier ();
71 | }
72 |
73 | static inline void mono_memory_read_barrier (void)
74 | {
75 | _ReadBarrier ();
76 | }
77 |
78 | static inline void mono_memory_write_barrier (void)
79 | {
80 | _WriteBarrier ();
81 | }
82 | #endif
83 | #elif defined(sparc) || defined(__sparc__)
84 | static inline void mono_memory_barrier (void)
85 | {
86 | __asm__ __volatile__ ("membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad" : : : "memory");
87 | }
88 |
89 | static inline void mono_memory_read_barrier (void)
90 | {
91 | __asm__ __volatile__ ("membar #LoadLoad" : : : "memory");
92 | }
93 |
94 | static inline void mono_memory_write_barrier (void)
95 | {
96 | __asm__ __volatile__ ("membar #StoreStore" : : : "memory");
97 | }
98 | #elif defined(__s390__)
99 | static inline void mono_memory_barrier (void)
100 | {
101 | __asm__ __volatile__ ("bcr 15,0" : : : "memory");
102 | }
103 |
104 | static inline void mono_memory_read_barrier (void)
105 | {
106 | mono_memory_barrier ();
107 | }
108 |
109 | static inline void mono_memory_write_barrier (void)
110 | {
111 | mono_memory_barrier ();
112 | }
113 | #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__)
114 | static inline void mono_memory_barrier (void)
115 | {
116 | __asm__ __volatile__ ("sync" : : : "memory");
117 | }
118 |
119 | static inline void mono_memory_read_barrier (void)
120 | {
121 | mono_memory_barrier ();
122 | }
123 |
124 | static inline void mono_memory_write_barrier (void)
125 | {
126 | __asm__ __volatile__ ("eieio" : : : "memory");
127 | }
128 |
129 | #elif defined(__arm__)
130 | static inline void mono_memory_barrier (void)
131 | {
132 | __sync_synchronize ();
133 | }
134 |
135 | static inline void mono_memory_read_barrier (void)
136 | {
137 | mono_memory_barrier ();
138 | }
139 |
140 | static inline void mono_memory_write_barrier (void)
141 | {
142 | mono_memory_barrier ();
143 | }
144 | #elif defined(__ia64__)
145 | static inline void mono_memory_barrier (void)
146 | {
147 | __asm__ __volatile__ ("mf" : : : "memory");
148 | }
149 |
150 | static inline void mono_memory_read_barrier (void)
151 | {
152 | mono_memory_barrier ();
153 | }
154 |
155 | static inline void mono_memory_write_barrier (void)
156 | {
157 | mono_memory_barrier ();
158 | }
159 | #elif defined(__alpha__)
160 | static inline void mono_memory_barrier (void)
161 | {
162 | __asm__ __volatile__ ("mb" : : : "memory");
163 | }
164 |
165 | static inline void mono_memory_read_barrier (void)
166 | {
167 | mono_memory_barrier ();
168 | }
169 |
170 | static inline void mono_memory_write_barrier (void)
171 | {
172 | mono_memory_barrier ();
173 | }
174 | #elif defined(__mips__)
175 | static inline void mono_memory_barrier (void)
176 | {
177 | __asm__ __volatile__ ("" : : : "memory");
178 | }
179 |
180 | static inline void mono_memory_read_barrier (void)
181 | {
182 | mono_memory_barrier ();
183 | }
184 |
185 | static inline void mono_memory_write_barrier (void)
186 | {
187 | mono_memory_barrier ();
188 | }
189 | #elif defined(MONO_CROSS_COMPILE)
190 | static inline void mono_memory_barrier (void)
191 | {
192 | }
193 |
194 | static inline void mono_memory_read_barrier (void)
195 | {
196 | }
197 |
198 | static inline void mono_memory_write_barrier (void)
199 | {
200 | }
201 | #endif
202 |
203 | #endif /* _MONO_UTILS_MONO_MEMBAR_H_ */
204 |
--------------------------------------------------------------------------------
/lock-free-array-queue.c:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-array-queue.c: A lock-free somewhat-queue that doesn't
3 | * require hazard pointers.
4 | *
5 | * (C) Copyright 2011 Xamarin Inc.
6 | */
7 |
8 | /*
9 | * The queue is a linked list of arrays (chunks). Chunks are never
10 | * removed from the list, only added to the end, in a lock-free manner.
11 | *
12 | * Adding or removing an entry in the queue is only possible at the
13 | * end. To do so, the thread first has to increment or decrement
14 | * q->num_used_entries. The entry thus added or removed now "belongs"
15 | * to that thread. It first CASes the state to BUSY, writes/reads the
16 | * entry data, and then sets the state to USED or FREE.
17 | */
18 |
19 | #include "metadata.h"
20 | #include "atomic.h"
21 | #include "mono-membar.h"
22 | #include "mono-mmap.h"
23 |
24 | #include "lock-free-array-queue.h"
25 |
26 | struct _MonoLockFreeArrayChunk {
27 | MonoLockFreeArrayChunk *next;
28 | gint32 num_entries;
29 | char entries [MONO_ZERO_LEN_ARRAY];
30 | };
31 |
32 | typedef MonoLockFreeArrayChunk Chunk;
33 |
34 | #define CHUNK_NTH(arr,chunk,index) ((chunk)->entries + (index) * (arr)->entry_size)
35 |
36 | static Chunk*
37 | alloc_chunk (MonoLockFreeArray *arr)
38 | {
39 | int size = mono_pagesize ();
40 | int num_entries = (size - (sizeof (Chunk) - arr->entry_size * MONO_ZERO_LEN_ARRAY)) / arr->entry_size;
41 | Chunk *chunk = mono_valloc (0, size, MONO_MMAP_READ | MONO_MMAP_WRITE);
42 | g_assert (chunk);
43 | chunk->num_entries = num_entries;
44 | return chunk;
45 | }
46 |
47 | static void
48 | free_chunk (Chunk *chunk)
49 | {
50 | mono_vfree (chunk, mono_pagesize ());
51 | }
52 |
53 | gpointer
54 | mono_lock_free_array_nth (MonoLockFreeArray *arr, int index)
55 | {
56 | Chunk *chunk;
57 |
58 | g_assert (index >= 0);
59 |
60 | if (!arr->chunk_list) {
61 | chunk = alloc_chunk (arr);
62 | mono_memory_write_barrier ();
63 | if (InterlockedCompareExchangePointer ((volatile gpointer *)&arr->chunk_list, chunk, NULL) != NULL)
64 | free_chunk (chunk);
65 | }
66 |
67 | chunk = arr->chunk_list;
68 | g_assert (chunk);
69 |
70 | while (index >= chunk->num_entries) {
71 | Chunk *next = chunk->next;
72 | if (!next) {
73 | next = alloc_chunk (arr);
74 | mono_memory_write_barrier ();
75 | if (InterlockedCompareExchangePointer ((volatile gpointer *) &chunk->next, next, NULL) != NULL) {
76 | free_chunk (next);
77 | next = chunk->next;
78 | g_assert (next);
79 | }
80 | }
81 | index -= chunk->num_entries;
82 | chunk = next;
83 | }
84 |
85 | return CHUNK_NTH (arr, chunk, index);
86 | }
87 |
88 | gpointer
89 | mono_lock_free_array_iterate (MonoLockFreeArray *arr, MonoLockFreeArrayIterateFunc func, gpointer user_data)
90 | {
91 | Chunk *chunk;
92 | for (chunk = arr->chunk_list; chunk; chunk = chunk->next) {
93 | int i;
94 | for (i = 0; i < chunk->num_entries; ++i) {
95 | gpointer result = func (i, CHUNK_NTH (arr, chunk, i), user_data);
96 | if (result)
97 | return result;
98 | }
99 | }
100 | return NULL;
101 | }
102 |
103 | void
104 | mono_lock_free_array_cleanup (MonoLockFreeArray *arr)
105 | {
106 | Chunk *chunk;
107 |
108 | chunk = arr->chunk_list;
109 | arr->chunk_list = NULL;
110 | while (chunk) {
111 | Chunk *next = chunk->next;
112 | free_chunk (chunk);
113 | chunk = next;
114 | }
115 | }
116 |
117 | enum {
118 | STATE_FREE,
119 | STATE_USED,
120 | STATE_BUSY
121 | };
122 |
123 | typedef struct {
124 | gint32 state;
125 | gpointer data [MONO_ZERO_LEN_ARRAY];
126 | } Entry;
127 |
128 | typedef MonoLockFreeArrayQueue Queue;
129 |
130 | /* The queue's entry size, calculated from the array's. */
131 | #define ENTRY_SIZE(q) ((q)->array.entry_size - sizeof (gpointer))
132 |
133 | void
134 | mono_lock_free_array_queue_push (MonoLockFreeArrayQueue *q, gpointer entry_data_ptr)
135 | {
136 | int index, num_used;
137 | Entry *entry;
138 |
139 | do {
140 | index = InterlockedIncrement (&q->num_used_entries) - 1;
141 | entry = mono_lock_free_array_nth (&q->array, index);
142 | } while (InterlockedCompareExchange (&entry->state, STATE_BUSY, STATE_FREE) != STATE_FREE);
143 |
144 | mono_memory_write_barrier ();
145 |
146 | memcpy (entry->data, entry_data_ptr, ENTRY_SIZE (q));
147 |
148 | mono_memory_write_barrier ();
149 |
150 | entry->state = STATE_USED;
151 |
152 | mono_memory_barrier ();
153 |
154 | do {
155 | num_used = q->num_used_entries;
156 | if (num_used > index)
157 | break;
158 | } while (InterlockedCompareExchange (&q->num_used_entries, index + 1, num_used) != num_used);
159 |
160 | mono_memory_write_barrier ();
161 | }
162 |
163 | gboolean
164 | mono_lock_free_array_queue_pop (MonoLockFreeArrayQueue *q, gpointer entry_data_ptr)
165 | {
166 | int index;
167 | Entry *entry;
168 |
169 | do {
170 | do {
171 | index = q->num_used_entries;
172 | if (index == 0)
173 | return FALSE;
174 | } while (InterlockedCompareExchange (&q->num_used_entries, index - 1, index) != index);
175 |
176 | entry = mono_lock_free_array_nth (&q->array, index - 1);
177 | } while (InterlockedCompareExchange (&entry->state, STATE_BUSY, STATE_USED) != STATE_USED);
178 |
179 | /* Reading the item must happen before CASing the state. */
180 | mono_memory_barrier ();
181 |
182 | memcpy (entry_data_ptr, entry->data, ENTRY_SIZE (q));
183 |
184 | mono_memory_barrier ();
185 |
186 | entry->state = STATE_FREE;
187 |
188 | mono_memory_write_barrier ();
189 |
190 | return TRUE;
191 | }
192 |
193 | void
194 | mono_lock_free_array_queue_cleanup (MonoLockFreeArrayQueue *q)
195 | {
196 | mono_lock_free_array_cleanup (&q->array);
197 | q->num_used_entries = 0;
198 | }
199 |
--------------------------------------------------------------------------------
/mono-linked-list-set.c:
--------------------------------------------------------------------------------
1 | /*
2 | * mono-split-ordered-list.c: A lock-free split ordered list.
3 | *
4 | * Author:
5 | * Rodrigo Kumpera (kumpera@gmail.com)
6 | *
7 | * (C) 2011 Novell, Inc
8 | *
9 | * This is an implementation of Maged Michael's lock-free linked-list set.
10 | * For more details see:
11 | * "High Performance Dynamic Lock-Free Hash Tables and List-Based Sets"
12 | * Maged M. Michael 2002
13 | *
14 | * http://www.research.ibm.com/people/m/michael/spaa-2002.pdf
15 | */
16 |
17 | #include "mono-linked-list-set.h"
18 |
19 | /*atomics.*/
20 | #include "atomic.h"
21 |
22 | static inline gpointer
23 | mask (gpointer n, uintptr_t bit)
24 | {
25 | return (gpointer)(((uintptr_t)n) | bit);
26 | }
27 |
28 | gpointer
29 | get_hazardous_pointer_with_mask (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
30 | {
31 | gpointer p;
32 |
33 | for (;;) {
34 | /* Get the pointer */
35 | p = *pp;
36 | /* If we don't have hazard pointers just return the
37 | pointer. */
38 | if (!hp)
39 | return p;
40 | /* Make it hazardous */
41 | mono_hazard_pointer_set (hp, hazard_index, mono_lls_pointer_unmask (p));
42 |
43 | mono_memory_barrier ();
44 |
45 | /* Check that it's still the same. If not, try
46 | again. */
47 | if (*pp != p) {
48 | mono_hazard_pointer_clear (hp, hazard_index);
49 | continue;
50 | }
51 | break;
52 | }
53 |
54 | return p;
55 | }
56 |
57 | /*
58 | Initialize @list and will use @free_node_func to release memory.
59 | If @free_node_func is null the caller is responsible for releasing node memory.
60 | @free_node_func must be lock-free. That implies that it cannot use malloc/free.
61 | */
62 | void
63 | mono_lls_init (MonoLinkedListSet *list, void (*free_node_func)(void *))
64 | {
65 | list->head = NULL;
66 | list->free_node_func = free_node_func;
67 | }
68 |
69 | /*
70 | Search @list for element with key @key.
71 | The nodes next, cur and prev are returned in @hp.
72 | Returns true if a node with key @key was found.
73 | This function cannot be called from a signal nor within interrupt context*.
74 | XXX A variant that works within interrupted is possible if needed.
75 |
76 | * interrupt context is when the current thread is reposible for another thread
77 | been suspended at an arbritary point. This is a limitation of our SMR implementation.
78 | */
79 | gboolean
80 | mono_lls_find (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, uintptr_t key)
81 | {
82 | MonoLinkedListSetNode *cur, *next;
83 | MonoLinkedListSetNode **prev;
84 | uintptr_t cur_key;
85 |
86 | try_again:
87 | prev = &list->head;
88 |
89 | /*
90 | * prev is not really a hazardous pointer, but we return prev
91 | * in hazard pointer 2, so we set it here. Note also that
92 | * prev is not a pointer to a node. We use here the fact that
93 | * the first element in a node is the next pointer, so it
94 | * works, but it's not pretty.
95 | */
96 | mono_hazard_pointer_set (hp, 2, prev);
97 |
98 | cur = get_hazardous_pointer_with_mask ((gpointer*)prev, hp, 1);
99 |
100 | while (1) {
101 | if (cur == NULL)
102 | return FALSE;
103 | next = get_hazardous_pointer_with_mask ((gpointer*)&cur->next, hp, 0);
104 | cur_key = cur->key;
105 |
106 | /*
107 | * We need to make sure that we dereference prev below
108 | * after reading cur->next above, so we need a read
109 | * barrier.
110 | */
111 | mono_memory_read_barrier ();
112 |
113 | if (*prev != cur)
114 | goto try_again;
115 |
116 | if (!mono_lls_pointer_get_mark (next)) {
117 | if (cur_key >= key)
118 | return cur_key == key;
119 |
120 | prev = &cur->next;
121 | mono_hazard_pointer_set (hp, 2, cur);
122 | } else {
123 | next = mono_lls_pointer_unmask (next);
124 | if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, next, cur) == cur) {
125 | /* The hazard pointer must be cleared after the CAS. */
126 | mono_memory_write_barrier ();
127 | mono_hazard_pointer_clear (hp, 1);
128 | if (list->free_node_func)
129 | mono_thread_hazardous_free_or_queue (cur, list->free_node_func, FALSE, TRUE);
130 | } else
131 | goto try_again;
132 | }
133 | cur = mono_lls_pointer_unmask (next);
134 | mono_hazard_pointer_set (hp, 1, cur);
135 | }
136 | }
137 |
138 | /*
139 | Insert @value into @list.
140 | The nodes value, cur and prev are returned in @hp.
141 | Return true if @value was inserted by this call. If it returns FALSE, it's the caller
142 | resposibility to release memory.
143 | This function cannot be called from a signal nor with the world stopped.
144 | */
145 | gboolean
146 | mono_lls_insert (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value)
147 | {
148 | MonoLinkedListSetNode *cur, **prev;
149 | /*We must do a store barrier before inserting
150 | to make sure all values in @node are globally visible.*/
151 | mono_memory_barrier ();
152 |
153 | while (1) {
154 | if (mono_lls_find (list, hp, value->key))
155 | return FALSE;
156 | cur = mono_hazard_pointer_get_val (hp, 1);
157 | prev = mono_hazard_pointer_get_val (hp, 2);
158 |
159 | value->next = cur;
160 | mono_hazard_pointer_set (hp, 0, value);
161 | /* The CAS must happen after setting the hazard pointer. */
162 | mono_memory_write_barrier ();
163 | if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, value, cur) == cur)
164 | return TRUE;
165 | }
166 | }
167 |
168 | /*
169 | Search @list for element with key @key.
170 | The nodes next, cur and prev are returned in @hp
171 | Returns true if @value was removed by this call.
172 | This function cannot be called from a signal nor with the world stopped.
173 | */
174 | gboolean
175 | mono_lls_remove (MonoLinkedListSet *list, MonoThreadHazardPointers *hp, MonoLinkedListSetNode *value)
176 | {
177 | MonoLinkedListSetNode *cur, **prev, *next;
178 | while (1) {
179 | if (!mono_lls_find (list, hp, value->key))
180 | return FALSE;
181 |
182 | next = mono_hazard_pointer_get_val (hp, 0);
183 | cur = mono_hazard_pointer_get_val (hp, 1);
184 | prev = mono_hazard_pointer_get_val (hp, 2);
185 |
186 | g_assert (cur == value);
187 |
188 | if (InterlockedCompareExchangePointer ((volatile gpointer*)&cur->next, mask (next, 1), next) != next)
189 | continue;
190 | /* The second CAS must happen before the first. */
191 | mono_memory_write_barrier ();
192 | if (InterlockedCompareExchangePointer ((volatile gpointer*)prev, next, cur) == cur) {
193 | /* The CAS must happen before the hazard pointer clear. */
194 | mono_memory_write_barrier ();
195 | mono_hazard_pointer_clear (hp, 1);
196 | if (list->free_node_func)
197 | mono_thread_hazardous_free_or_queue (value, list->free_node_func, FALSE, TRUE);
198 | } else
199 | mono_lls_find (list, hp, value->key);
200 | return TRUE;
201 | }
202 | }
203 |
--------------------------------------------------------------------------------
/lock-free-queue.c:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-queue.c: Lock free queue.
3 | *
4 | * (C) Copyright 2011 Novell, Inc
5 | *
6 | * Permission is hereby granted, free of charge, to any person obtaining
7 | * a copy of this software and associated documentation files (the
8 | * "Software"), to deal in the Software without restriction, including
9 | * without limitation the rights to use, copy, modify, merge, publish,
10 | * distribute, sublicense, and/or sell copies of the Software, and to
11 | * permit persons to whom the Software is furnished to do so, subject to
12 | * the following conditions:
13 | *
14 | * The above copyright notice and this permission notice shall be
15 | * included in all copies or substantial portions of the Software.
16 | *
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 | */
25 |
26 | /*
27 | * This is an implementation of a lock-free queue, as described in
28 | *
29 | * Simple, Fast, and Practical Non-Blocking and Blocking
30 | * Concurrent Queue Algorithms
31 | * Maged M. Michael, Michael L. Scott
32 | * 1995
33 | *
34 | * A few slight modifications have been made:
35 | *
36 | * We use hazard pointers to rule out the ABA problem, instead of the
37 | * counter as in the paper.
38 | *
39 | * Memory management of the queue entries is done by the caller, not
40 | * by the queue implementation. This implies that the dequeue
41 | * function must return the queue entry, not just the data.
42 | *
43 | * Therefore, the dummy entry must never be returned. We do this by
44 | * re-enqueuing a new dummy entry after we dequeue one and then
45 | * retrying the dequeue. We need more than one dummy because they
46 | * must be hazardly freed.
47 | */
48 |
49 | #include
50 | #include
51 | #include
52 |
53 | #include "mono-membar.h"
54 | #include "hazard-pointer.h"
55 | #include "atomic.h"
56 |
57 | #include "lock-free-queue.h"
58 |
59 | #define INVALID_NEXT ((void*)-1)
60 | #define END_MARKER ((void*)-2)
61 | #define FREE_NEXT ((void*)-3)
62 |
63 | void
64 | mono_lock_free_queue_init (MonoLockFreeQueue *q)
65 | {
66 | int i;
67 | for (i = 0; i < MONO_LOCK_FREE_QUEUE_NUM_DUMMIES; ++i) {
68 | q->dummies [i].node.next = (i == 0) ? END_MARKER : FREE_NEXT;
69 | q->dummies [i].in_use = i == 0 ? 1 : 0;
70 | #ifdef QUEUE_DEBUG
71 | q->dummies [i].node.in_queue = i == 0 ? TRUE : FALSE;
72 | #endif
73 | }
74 |
75 | q->head = q->tail = &q->dummies [0].node;
76 | q->has_dummy = 1;
77 | }
78 |
79 | void
80 | mono_lock_free_queue_node_init (MonoLockFreeQueueNode *node, gboolean to_be_freed)
81 | {
82 | node->next = to_be_freed ? INVALID_NEXT : FREE_NEXT;
83 | #ifdef QUEUE_DEBUG
84 | node->in_queue = FALSE;
85 | #endif
86 | }
87 |
88 | void
89 | mono_lock_free_queue_node_free (MonoLockFreeQueueNode *node)
90 | {
91 | g_assert (node->next == INVALID_NEXT);
92 | #ifdef QUEUE_DEBUG
93 | g_assert (!node->in_queue);
94 | #endif
95 | node->next = FREE_NEXT;
96 | }
97 |
98 | void
99 | mono_lock_free_queue_enqueue (MonoLockFreeQueue *q, MonoLockFreeQueueNode *node)
100 | {
101 | MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
102 | MonoLockFreeQueueNode *tail;
103 |
104 | #ifdef QUEUE_DEBUG
105 | g_assert (!node->in_queue);
106 | node->in_queue = TRUE;
107 | mono_memory_write_barrier ();
108 | #endif
109 |
110 | g_assert (node->next == FREE_NEXT);
111 | node->next = END_MARKER;
112 | for (;;) {
113 | MonoLockFreeQueueNode *next;
114 |
115 | tail = get_hazardous_pointer ((gpointer volatile*)&q->tail, hp, 0);
116 | mono_memory_read_barrier ();
117 | /*
118 | * We never dereference next so we don't need a
119 | * hazardous load.
120 | */
121 | next = tail->next;
122 | mono_memory_read_barrier ();
123 |
124 | /* Are tail and next consistent? */
125 | if (tail == q->tail) {
126 | g_assert (next != INVALID_NEXT && next != FREE_NEXT);
127 | g_assert (next != tail);
128 |
129 | if (next == END_MARKER) {
130 | /*
131 | * Here we require that nodes that
132 | * have been dequeued don't have
133 | * next==END_MARKER. If they did, we
134 | * might append to a node that isn't
135 | * in the queue anymore here.
136 | */
137 | if (InterlockedCompareExchangePointer ((gpointer volatile*)&tail->next, node, END_MARKER) == END_MARKER)
138 | break;
139 | } else {
140 | /* Try to advance tail */
141 | InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, next, tail);
142 | }
143 | }
144 |
145 | mono_memory_write_barrier ();
146 | mono_hazard_pointer_clear (hp, 0);
147 | }
148 |
149 | /* Try to advance tail */
150 | InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, node, tail);
151 |
152 | mono_memory_write_barrier ();
153 | mono_hazard_pointer_clear (hp, 0);
154 | }
155 |
156 | static void
157 | free_dummy (gpointer _dummy)
158 | {
159 | MonoLockFreeQueueDummy *dummy = _dummy;
160 | mono_lock_free_queue_node_free (&dummy->node);
161 | g_assert (dummy->in_use);
162 | mono_memory_write_barrier ();
163 | dummy->in_use = 0;
164 | }
165 |
166 | static MonoLockFreeQueueDummy*
167 | get_dummy (MonoLockFreeQueue *q)
168 | {
169 | int i;
170 | for (i = 0; i < MONO_LOCK_FREE_QUEUE_NUM_DUMMIES; ++i) {
171 | MonoLockFreeQueueDummy *dummy = &q->dummies [i];
172 |
173 | if (dummy->in_use)
174 | continue;
175 |
176 | if (InterlockedCompareExchange (&dummy->in_use, 1, 0) == 0)
177 | return dummy;
178 | }
179 | return NULL;
180 | }
181 |
182 | static gboolean
183 | is_dummy (MonoLockFreeQueue *q, MonoLockFreeQueueNode *n)
184 | {
185 | return n >= &q->dummies [0].node && n < &q->dummies [MONO_LOCK_FREE_QUEUE_NUM_DUMMIES].node;
186 | }
187 |
188 | static gboolean
189 | try_reenqueue_dummy (MonoLockFreeQueue *q)
190 | {
191 | MonoLockFreeQueueDummy *dummy;
192 |
193 | if (q->has_dummy)
194 | return FALSE;
195 |
196 | dummy = get_dummy (q);
197 | if (!dummy)
198 | return FALSE;
199 |
200 | if (InterlockedCompareExchange (&q->has_dummy, 1, 0) != 0) {
201 | dummy->in_use = 0;
202 | return FALSE;
203 | }
204 |
205 | mono_lock_free_queue_enqueue (q, &dummy->node);
206 |
207 | return TRUE;
208 | }
209 |
210 | MonoLockFreeQueueNode*
211 | mono_lock_free_queue_dequeue (MonoLockFreeQueue *q)
212 | {
213 | MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
214 | MonoLockFreeQueueNode *head;
215 |
216 | retry:
217 | for (;;) {
218 | MonoLockFreeQueueNode *tail, *next;
219 |
220 | head = get_hazardous_pointer ((gpointer volatile*)&q->head, hp, 0);
221 | tail = (MonoLockFreeQueueNode*)q->tail;
222 | mono_memory_read_barrier ();
223 | next = head->next;
224 | mono_memory_read_barrier ();
225 |
226 | /* Are head, tail and next consistent? */
227 | if (head == q->head) {
228 | g_assert (next != INVALID_NEXT && next != FREE_NEXT);
229 | g_assert (next != head);
230 |
231 | /* Is queue empty or tail behind? */
232 | if (head == tail) {
233 | if (next == END_MARKER) {
234 | /* Queue is empty */
235 | mono_hazard_pointer_clear (hp, 0);
236 |
237 | /*
238 | * We only continue if we
239 | * reenqueue the dummy
240 | * ourselves, so as not to
241 | * wait for threads that might
242 | * not actually run.
243 | */
244 | if (!is_dummy (q, head) && try_reenqueue_dummy (q))
245 | continue;
246 |
247 | return NULL;
248 | }
249 |
250 | /* Try to advance tail */
251 | InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, next, tail);
252 | } else {
253 | g_assert (next != END_MARKER);
254 | /* Try to dequeue head */
255 | if (InterlockedCompareExchangePointer ((gpointer volatile*)&q->head, next, head) == head)
256 | break;
257 | }
258 | }
259 |
260 | mono_memory_write_barrier ();
261 | mono_hazard_pointer_clear (hp, 0);
262 | }
263 |
264 | /*
265 | * The head is dequeued now, so we know it's this thread's
266 | * responsibility to free it - no other thread can.
267 | */
268 | mono_memory_write_barrier ();
269 | mono_hazard_pointer_clear (hp, 0);
270 |
271 | g_assert (head->next);
272 | /*
273 | * Setting next here isn't necessary for correctness, but we
274 | * do it to make sure that we catch dereferencing next in a
275 | * node that's not in the queue anymore.
276 | */
277 | head->next = INVALID_NEXT;
278 | #if QUEUE_DEBUG
279 | g_assert (head->in_queue);
280 | head->in_queue = FALSE;
281 | mono_memory_write_barrier ();
282 | #endif
283 |
284 | if (is_dummy (q, head)) {
285 | g_assert (q->has_dummy);
286 | q->has_dummy = 0;
287 | mono_memory_write_barrier ();
288 | mono_thread_hazardous_free_or_queue (head, free_dummy, FALSE, TRUE);
289 | if (try_reenqueue_dummy (q))
290 | goto retry;
291 | return NULL;
292 | }
293 |
294 | /* The caller must hazardously free the node. */
295 | return head;
296 | }
297 |
--------------------------------------------------------------------------------
/hazard-pointer.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 |
7 | #include "mono-membar.h"
8 | #include "delayed-free.h"
9 | #include "mono-mmap.h"
10 | #include "lock-free-array-queue.h"
11 | #include "hazard-pointer.h"
12 |
13 | #define mono_pagesize getpagesize
14 |
15 | typedef struct {
16 | gpointer p;
17 | MonoHazardousFreeFunc free_func;
18 | gboolean might_lock;
19 | } DelayedFreeItem;
20 |
21 | static struct {
22 | long long hazardous_pointer_count;
23 | } mono_stats;
24 |
25 | typedef struct {
26 | int small_id;
27 | } MonoInternalThread;
28 |
29 | static CRITICAL_SECTION small_id_mutex;
30 | static int small_id_table_size = 0;
31 | static int small_id_next = 0;
32 | static int highest_small_id = -1;
33 | static MonoInternalThread **small_id_table = NULL;
34 |
35 | /* The hazard table */
36 | #if MONO_SMALL_CONFIG
37 | #define HAZARD_TABLE_MAX_SIZE 256
38 | #else
39 | #define HAZARD_TABLE_MAX_SIZE 16384 /* There cannot be more threads than this number. */
40 | #endif
41 | static volatile int hazard_table_size = 0;
42 | static MonoThreadHazardPointers * volatile hazard_table = NULL;
43 |
44 | /* The table where we keep pointers to blocks to be freed but that
45 | have to wait because they're guarded by a hazard pointer. */
46 | static MonoLockFreeArrayQueue delayed_free_queue = MONO_LOCK_FREE_ARRAY_QUEUE_INIT (sizeof (DelayedFreeItem));
47 |
48 | static void*
49 | mono_gc_alloc_fixed (size_t size, void *dummy)
50 | {
51 | g_assert (dummy == NULL);
52 | return g_malloc0 (size);
53 | }
54 |
55 | static void
56 | mono_gc_free_fixed (void *ptr)
57 | {
58 | free (ptr);
59 | }
60 |
61 | /*
62 | * Allocate a small thread id.
63 | *
64 | * FIXME: The biggest part of this function is very similar to
65 | * domain_id_alloc() in domain.c and should be merged.
66 | */
67 | static int
68 | small_id_alloc (MonoInternalThread *thread)
69 | {
70 | int id = -1, i;
71 |
72 | EnterCriticalSection (&small_id_mutex);
73 |
74 | if (!small_id_table) {
75 | small_id_table_size = 2;
76 | /*
77 | * Enabling this causes problems, because SGEN doesn't track/update the TLS slot holding
78 | * the current thread.
79 | */
80 | //small_id_table = mono_gc_alloc_fixed (small_id_table_size * sizeof (MonoInternalThread*), mono_gc_make_root_descr_all_refs (small_id_table_size));
81 | small_id_table = mono_gc_alloc_fixed (small_id_table_size * sizeof (MonoInternalThread*), NULL);
82 | }
83 | for (i = small_id_next; i < small_id_table_size; ++i) {
84 | if (!small_id_table [i]) {
85 | id = i;
86 | break;
87 | }
88 | }
89 | if (id == -1) {
90 | for (i = 0; i < small_id_next; ++i) {
91 | if (!small_id_table [i]) {
92 | id = i;
93 | break;
94 | }
95 | }
96 | }
97 | if (id == -1) {
98 | MonoInternalThread **new_table;
99 | int new_size = small_id_table_size * 2;
100 | if (new_size >= (1 << 16))
101 | g_assert_not_reached ();
102 | id = small_id_table_size;
103 | //new_table = mono_gc_alloc_fixed (new_size * sizeof (MonoInternalThread*), mono_gc_make_root_descr_all_refs (new_size));
104 | new_table = mono_gc_alloc_fixed (new_size * sizeof (MonoInternalThread*), NULL);
105 | memcpy (new_table, small_id_table, small_id_table_size * sizeof (void*));
106 | mono_gc_free_fixed (small_id_table);
107 | small_id_table = new_table;
108 | small_id_table_size = new_size;
109 | }
110 | thread->small_id = id;
111 | g_assert (small_id_table [id] == NULL);
112 | small_id_table [id] = thread;
113 | small_id_next++;
114 | if (small_id_next > small_id_table_size)
115 | small_id_next = 0;
116 |
117 | g_assert (id < HAZARD_TABLE_MAX_SIZE);
118 | if (id >= hazard_table_size) {
119 | #if MONO_SMALL_CONFIG
120 | hazard_table = g_malloc0 (sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE);
121 | hazard_table_size = HAZARD_TABLE_MAX_SIZE;
122 | #else
123 | gpointer page_addr;
124 | int pagesize = mono_pagesize ();
125 | int num_pages = (hazard_table_size * sizeof (MonoThreadHazardPointers) + pagesize - 1) / pagesize;
126 |
127 | if (hazard_table == NULL) {
128 | hazard_table = mono_valloc (NULL,
129 | sizeof (MonoThreadHazardPointers) * HAZARD_TABLE_MAX_SIZE,
130 | MONO_MMAP_NONE);
131 | }
132 |
133 | g_assert (hazard_table != NULL);
134 | page_addr = (guint8*)hazard_table + num_pages * pagesize;
135 |
136 | mono_mprotect (page_addr, pagesize, MONO_MMAP_READ | MONO_MMAP_WRITE);
137 |
138 | ++num_pages;
139 | hazard_table_size = num_pages * pagesize / sizeof (MonoThreadHazardPointers);
140 |
141 | #endif
142 | g_assert (id < hazard_table_size);
143 | for (i = 0; i < HAZARD_POINTER_COUNT; ++i)
144 | hazard_table [id].hazard_pointers [i] = NULL;
145 | }
146 |
147 | if (id > highest_small_id) {
148 | highest_small_id = id;
149 | mono_memory_write_barrier ();
150 | }
151 |
152 | LeaveCriticalSection (&small_id_mutex);
153 |
154 | return id;
155 | }
156 |
157 | static void
158 | small_id_free (int id)
159 | {
160 | g_assert (id >= 0 && id < small_id_table_size);
161 | g_assert (small_id_table [id] != NULL);
162 |
163 | small_id_table [id] = NULL;
164 | }
165 |
166 | static gboolean
167 | is_pointer_hazardous (gpointer p)
168 | {
169 | int i, j;
170 | int highest = highest_small_id;
171 |
172 | g_assert (highest < hazard_table_size);
173 |
174 | for (i = 0; i <= highest; ++i) {
175 | for (j = 0; j < HAZARD_POINTER_COUNT; ++j) {
176 | if (hazard_table [i].hazard_pointers [j] == p)
177 | return TRUE;
178 | }
179 | }
180 |
181 | return FALSE;
182 | }
183 |
184 | static pthread_key_t this_internal_thread_key;
185 |
186 | static MonoInternalThread*
187 | mono_thread_internal_current (void)
188 | {
189 | MonoInternalThread *internal = pthread_getspecific (this_internal_thread_key);
190 | if (!internal) {
191 | internal = malloc (sizeof (MonoInternalThread));
192 | memset (internal, 0, sizeof (MonoInternalThread));
193 | pthread_setspecific (this_internal_thread_key, internal);
194 | }
195 | return internal;
196 | }
197 |
198 | MonoThreadHazardPointers*
199 | mono_hazard_pointer_get (void)
200 | {
201 | MonoInternalThread *current_thread = mono_thread_internal_current ();
202 |
203 | if (!(current_thread && current_thread->small_id >= 0)) {
204 | static MonoThreadHazardPointers emerg_hazard_table;
205 | g_warning ("Thread %p may have been prematurely finalized", current_thread);
206 | return &emerg_hazard_table;
207 | }
208 |
209 | return &hazard_table [current_thread->small_id];
210 | }
211 |
212 | /* Can be called with hp==NULL, in which case it acts as an ordinary
213 | pointer fetch. It's used that way indirectly from
214 | mono_jit_info_table_add(), which doesn't have to care about hazards
215 | because it holds the respective domain lock. */
216 | gpointer
217 | get_hazardous_pointer (gpointer volatile *pp, MonoThreadHazardPointers *hp, int hazard_index)
218 | {
219 | gpointer p;
220 |
221 | for (;;) {
222 | /* Get the pointer */
223 | p = *pp;
224 | /* If we don't have hazard pointers just return the
225 | pointer. */
226 | if (!hp)
227 | return p;
228 | /* Make it hazardous */
229 | mono_hazard_pointer_set (hp, hazard_index, p);
230 |
231 | mono_memory_barrier ();
232 |
233 | /* Check that it's still the same. If not, try
234 | again. */
235 | if (*pp != p) {
236 | mono_hazard_pointer_clear (hp, hazard_index);
237 | continue;
238 | }
239 | break;
240 | }
241 |
242 | return p;
243 | }
244 |
245 | static gboolean
246 | try_free_delayed_free_item (gboolean lock_free_context)
247 | {
248 | DelayedFreeItem item;
249 | gboolean popped = mono_lock_free_array_queue_pop (&delayed_free_queue, &item);
250 |
251 | if (!popped)
252 | return FALSE;
253 |
254 | if ((lock_free_context && item.might_lock) || (is_pointer_hazardous (item.p))) {
255 | mono_lock_free_array_queue_push (&delayed_free_queue, &item);
256 | return FALSE;
257 | }
258 |
259 | item.free_func (item.p);
260 |
261 | return TRUE;
262 | }
263 |
264 | void
265 | mono_thread_hazardous_free_or_queue (gpointer p, MonoHazardousFreeFunc free_func,
266 | gboolean free_func_might_lock, gboolean lock_free_context)
267 | {
268 | int i;
269 |
270 | if (lock_free_context)
271 | g_assert (!free_func_might_lock);
272 | if (free_func_might_lock)
273 | g_assert (!lock_free_context);
274 |
275 | /* First try to free a few entries in the delayed free
276 | table. */
277 | for (i = 0; i < 3; ++i)
278 | try_free_delayed_free_item (lock_free_context);
279 |
280 | /* Now see if the pointer we're freeing is hazardous. If it
281 | isn't, free it. Otherwise put it in the delay list. */
282 | if (is_pointer_hazardous (p)) {
283 | DelayedFreeItem item = { p, free_func, free_func_might_lock };
284 |
285 | ++mono_stats.hazardous_pointer_count;
286 |
287 | mono_lock_free_array_queue_push (&delayed_free_queue, &item);
288 | } else {
289 | free_func (p);
290 | }
291 | }
292 |
293 | void
294 | mono_thread_hazardous_try_free_all (void)
295 | {
296 | while (try_free_delayed_free_item (FALSE))
297 | ;
298 | }
299 |
300 | void
301 | mono_thread_attach (void)
302 | {
303 | small_id_alloc (mono_thread_internal_current ());
304 | }
305 |
306 | void
307 | mono_thread_smr_init (void)
308 | {
309 | pthread_mutex_init (&small_id_mutex, NULL);
310 | pthread_key_create (&this_internal_thread_key, NULL);
311 | }
312 |
313 | void
314 | mono_thread_hazardous_print_stats (void)
315 | {
316 | g_print ("hazardous pointers: %lld\n", mono_stats.hazardous_pointer_count);
317 |
318 | mono_lock_free_array_queue_cleanup (&delayed_free_queue);
319 | }
320 |
--------------------------------------------------------------------------------
/test.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 |
6 | #include "hazard-pointer.h"
7 | #include "atomic.h"
8 | #include "lock-free-alloc.h"
9 | #include "mono-linked-list-set.h"
10 |
11 | #ifdef TEST_ALLOC
12 | #define USE_SMR
13 |
14 | #define ACTION_BUFFER_SIZE 16
15 |
16 | typedef struct {
17 | int action;
18 | int index;
19 | gpointer p;
20 | } ThreadAction;
21 |
22 | typedef struct {
23 | pthread_t thread;
24 | int increment;
25 | volatile gboolean have_attached;
26 |
27 | ThreadAction action_buffer [ACTION_BUFFER_SIZE];
28 | int next_action_index;
29 | } ThreadData;
30 |
31 | #endif
32 |
33 | #ifdef TEST_QUEUE
34 | #define USE_SMR
35 |
36 | typedef struct {
37 | pthread_t thread;
38 | int increment;
39 | volatile gboolean have_attached;
40 |
41 | gint32 next_enqueue_counter;
42 | gint32 last_dequeue_counter;
43 | } ThreadData;
44 | #endif
45 |
46 | #ifdef TEST_DELAYED_FREE
47 | typedef struct {
48 | pthread_t thread;
49 | int increment;
50 | volatile gboolean have_attached;
51 | } ThreadData;
52 | #endif
53 |
54 | #ifdef TEST_LLS
55 | #define USE_SMR
56 |
57 | typedef struct {
58 | pthread_t thread;
59 | int increment;
60 | volatile gboolean have_attached;
61 | } ThreadData;
62 | #endif
63 |
64 | #define NUM_THREADS 4
65 |
66 | static ThreadData thread_datas [NUM_THREADS];
67 |
68 | static void
69 | attach_and_wait_for_threads_to_attach (ThreadData *data)
70 | {
71 | int i;
72 |
73 | mono_thread_attach ();
74 | data->have_attached = TRUE;
75 |
76 | retry:
77 | for (i = 0; i < NUM_THREADS; ++i) {
78 | if (!thread_datas [i].have_attached) {
79 | usleep (5000);
80 | goto retry;
81 | }
82 | }
83 | }
84 |
85 | #ifdef TEST_ALLOC
86 |
87 | #define TEST_SIZE 64
88 |
89 | static MonoLockFreeAllocSizeClass test_sc;
90 | static MonoLockFreeAllocator test_heap;
91 |
92 | static void
93 | init_heap (void)
94 | {
95 | mono_lock_free_allocator_init_size_class (&test_sc, TEST_SIZE);
96 | mono_lock_free_allocator_init_allocator (&test_heap, &test_sc);
97 | }
98 |
99 | enum {
100 | ACTION_NONE,
101 | ACTION_ALLOC,
102 | ACTION_FREE
103 | };
104 |
105 | #define NUM_ENTRIES 1024
106 | #define NUM_ITERATIONS 100000000
107 |
108 | static gpointer entries [NUM_ENTRIES];
109 |
110 | //static volatile guint64 atomic_test;
111 |
112 | static void
113 | log_action (ThreadData *data, int action, int index, gpointer p)
114 | {
115 | data->action_buffer [data->next_action_index].action = action;
116 | data->action_buffer [data->next_action_index].index = index;
117 | data->action_buffer [data->next_action_index].p = p;
118 |
119 | data->next_action_index = (data->next_action_index + 1) % ACTION_BUFFER_SIZE;
120 | }
121 |
122 | static void
123 | dump_action_logs (void)
124 | {
125 | int i, j;
126 |
127 | for (i = 0; i < NUM_THREADS; ++i) {
128 | g_print ("action log for thread %d:\n\n", i);
129 |
130 | j = thread_datas [i].next_action_index;
131 | do {
132 | ThreadAction *action = &thread_datas [i].action_buffer [j];
133 | switch (action->action) {
134 | case ACTION_NONE:
135 | break;
136 |
137 | case ACTION_ALLOC:
138 | g_print ("%6d %p alloc\n", action->index, action->p);
139 | break;
140 |
141 | case ACTION_FREE:
142 | g_print ("%6d %p free\n", action->index, action->p);
143 | break;
144 |
145 | default:
146 | g_assert_not_reached ();
147 | }
148 |
149 | j = (j + 1) % ACTION_BUFFER_SIZE;
150 | } while (j != thread_datas [i].next_action_index);
151 |
152 | g_print ("\n\n");
153 | }
154 | }
155 |
156 | static void*
157 | thread_func (void *_data)
158 | {
159 | ThreadData *data = _data;
160 | int increment = data->increment;
161 | int i, index;
162 |
163 | attach_and_wait_for_threads_to_attach (data);
164 |
165 | index = 0;
166 | for (i = 0; i < NUM_ITERATIONS; ++i) {
167 | gpointer p;
168 | retry:
169 | p = entries [index];
170 | if (p) {
171 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&entries [index], NULL, p) != p)
172 | goto retry;
173 | g_assert (*(int*)p == index << 10);
174 | *(int*)p = -1;
175 | mono_lock_free_free (p);
176 |
177 | log_action (data, ACTION_FREE, index, p);
178 | } else {
179 | p = mono_lock_free_alloc (&test_heap);
180 |
181 | /*
182 | int j;
183 |
184 | for (j = 0; j < NUM_ENTRIES; ++j)
185 | g_assert (entries [j] != p);
186 | */
187 |
188 | *(int*)p = index << 10;
189 |
190 | log_action (data, ACTION_ALLOC, index, p);
191 |
192 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&entries [index], p, NULL) != NULL) {
193 | //g_print ("immediate free %p\n", p);
194 | *(int*)p = -1;
195 | mono_lock_free_free (p);
196 |
197 | log_action (data, ACTION_FREE, index, p);
198 |
199 | goto retry;
200 | }
201 | }
202 |
203 | index += increment;
204 | while (index >= NUM_ENTRIES)
205 | index -= NUM_ENTRIES;
206 |
207 | /*
208 | guint64 a = atomic_test;
209 | g_assert ((a & 0xffffffff) == (a >> 32));
210 | guint64 new_a = (index | ((guint64)index << 32));
211 | atomic64_cmpxchg ((volatile gint64*)&atomic_test, a, new_a);
212 | */
213 |
214 | if (i % (NUM_ITERATIONS / 20) == 0)
215 | g_print ("thread %d: %d\n", increment, i);
216 | }
217 |
218 | return NULL;
219 | }
220 |
221 | static void
222 | test_init (void)
223 | {
224 | init_heap ();
225 | mono_lock_free_alloc (&test_heap);
226 | }
227 |
228 | static gboolean
229 | test_finish (void)
230 | {
231 | if (mono_lock_free_allocator_check_consistency (&test_heap)) {
232 | g_print ("heap consistent\n");
233 | return TRUE;
234 | }
235 | return FALSE;
236 | }
237 |
238 | #endif
239 |
240 | #ifdef TEST_QUEUE
241 |
242 | #define NUM_ENTRIES 16
243 | #define NUM_ITERATIONS 10000000
244 |
245 | typedef struct _TableEntry TableEntry;
246 |
247 | typedef struct {
248 | MonoLockFreeQueueNode node;
249 | TableEntry *table_entry;
250 | ThreadData *thread_data;
251 | gint32 counter;
252 | } QueueEntry;
253 |
254 | struct _TableEntry {
255 | gboolean mmap;
256 | QueueEntry *queue_entry;
257 | };
258 |
259 | static MonoLockFreeQueue queue;
260 | static TableEntry entries [NUM_ENTRIES];
261 |
262 | static QueueEntry*
263 | alloc_entry (TableEntry *e, ThreadData *thread_data)
264 | {
265 | QueueEntry *qe;
266 |
267 | if (e->mmap)
268 | qe = mmap (NULL, getpagesize (), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
269 | else
270 | qe = g_malloc0 (sizeof (QueueEntry));
271 |
272 | mono_lock_free_queue_node_init (&qe->node, FALSE);
273 |
274 | qe->thread_data = thread_data;
275 | qe->counter = thread_data->next_enqueue_counter++;
276 |
277 | return qe;
278 | }
279 |
280 | static void
281 | free_entry_memory (QueueEntry *qe, gboolean mmap)
282 | {
283 | if (mmap)
284 | munmap (qe, getpagesize ());
285 | else
286 | g_free (qe);
287 | }
288 |
289 | static ThreadData thread_datas [NUM_THREADS];
290 |
291 | static void
292 | free_entry (void *data)
293 | {
294 | QueueEntry *e = data;
295 | g_assert (e->table_entry->queue_entry == e);
296 | e->table_entry->queue_entry = NULL;
297 | mono_lock_free_queue_node_free (&e->node);
298 | free_entry_memory (e, e->table_entry->mmap);
299 | }
300 |
301 | static void*
302 | thread_func (void *_data)
303 | {
304 | ThreadData *data = _data;
305 | int increment = data->increment;
306 | int index;
307 | int i;
308 |
309 | attach_and_wait_for_threads_to_attach (data);
310 |
311 | index = 0;
312 | for (i = 0; i < NUM_ITERATIONS; ++i) {
313 | TableEntry *e = &entries [index];
314 |
315 | if (e->queue_entry) {
316 | QueueEntry *qe = (QueueEntry*)mono_lock_free_queue_dequeue (&queue);
317 | if (qe) {
318 | if (qe->thread_data == data) {
319 | g_assert (qe->counter > data->last_dequeue_counter);
320 | data->last_dequeue_counter = qe->counter;
321 | }
322 |
323 | /*
324 | * Calling free_entry() directly here
325 | * effectively disables hazardous
326 | * pointers. The test will then crash
327 | * sooner or later.
328 | */
329 | mono_thread_hazardous_free_or_queue (qe, free_entry);
330 | //free_entry (qe);
331 | }
332 | } else {
333 | QueueEntry *qe = alloc_entry (e, data);
334 | qe->table_entry = e;
335 | if (InterlockedCompareExchangePointer ((gpointer volatile*)&e->queue_entry, qe, NULL) == NULL) {
336 | mono_lock_free_queue_enqueue (&queue, &qe->node);
337 | } else {
338 | qe->table_entry = NULL;
339 | free_entry_memory (qe, e->mmap);
340 | }
341 | }
342 |
343 | index += increment;
344 | while (index >= NUM_ENTRIES)
345 | index -= NUM_ENTRIES;
346 | }
347 |
348 | return NULL;
349 | }
350 |
351 | static void
352 | test_init (void)
353 | {
354 | int i;
355 |
356 | mono_lock_free_queue_init (&queue);
357 |
358 | /*
359 | for (i = 0; i < NUM_ENTRIES; i += 97)
360 | entries [i].mmap = TRUE;
361 | */
362 |
363 | for (i = 0; i < NUM_THREADS; ++i)
364 | thread_datas [i].last_dequeue_counter = -1;
365 | }
366 |
367 | static gboolean
368 | test_finish (void)
369 | {
370 | QueueEntry *qe;
371 | int i;
372 |
373 | while ((qe = (QueueEntry*)mono_lock_free_queue_dequeue (&queue)))
374 | free_entry (qe);
375 |
376 | for (i = 0; i < NUM_ENTRIES; ++i)
377 | g_assert (!entries [i].queue_entry);
378 |
379 | return TRUE;
380 | }
381 |
382 | #endif
383 |
384 | #ifdef TEST_DELAYED_FREE
385 | #define NUM_ENTRIES 32768
386 | #define NUM_ITERATIONS 1000000
387 |
388 | static gint32 entries [NUM_ENTRIES];
389 |
390 | static void
391 | free_func (gpointer data)
392 | {
393 | int i = (long)data;
394 |
395 | if (InterlockedCompareExchange (&entries [i], 0, 1) != 1)
396 | g_assert_not_reached ();
397 | }
398 |
399 | static void*
400 | thread_func (void *_data)
401 | {
402 | ThreadData *data = _data;
403 | int increment = data->increment;
404 | int index, i;
405 |
406 | index = 0;
407 | for (i = 0; i < NUM_ITERATIONS; ++i) {
408 | if (InterlockedCompareExchange (&entries [index], 1, 0) == 0) {
409 | MonoDelayedFreeItem item = { (gpointer)(long)index, free_func };
410 | mono_delayed_free_push (item);
411 | } else {
412 | MonoDelayedFreeItem item;
413 | if (mono_delayed_free_pop (&item))
414 | item.free_func (item.p);
415 | }
416 |
417 | index += increment;
418 | while (index >= NUM_ENTRIES)
419 | index -= NUM_ENTRIES;
420 | }
421 |
422 | return NULL;
423 | }
424 |
425 | static void
426 | test_init (void)
427 | {
428 | }
429 |
430 | static gboolean
431 | test_finish (void)
432 | {
433 | int i;
434 | MonoDelayedFreeItem item;
435 |
436 | while (mono_delayed_free_pop (&item))
437 | item.free_func (item.p);
438 |
439 | for (i = 0; i < NUM_ENTRIES; ++i)
440 | g_assert (!entries [i]);
441 |
442 | return TRUE;
443 | }
444 | #endif
445 |
446 | #ifdef TEST_LLS
447 | enum {
448 | STATE_FREE,
449 | STATE_ALLOCING,
450 | STATE_FREEING,
451 | STATE_USED
452 | };
453 |
454 | #define NUM_ENTRIES 32
455 | #define NUM_ITERATIONS 1000000
456 |
457 | static gint32 entries [NUM_ENTRIES];
458 |
459 | static MonoLinkedListSet list;
460 |
461 | static void
462 | free_node_func (void *_node)
463 | {
464 | MonoLinkedListSetNode *node = _node;
465 | int index = node->key >> 2;
466 | g_assert (index >= 0 && index < NUM_ENTRIES);
467 | if (InterlockedCompareExchange (&entries [index], STATE_FREE, STATE_FREEING) != STATE_FREEING)
468 | g_assert_not_reached ();
469 | free (node);
470 | }
471 |
472 | static void*
473 | thread_func (void *_data)
474 | {
475 | ThreadData *data = _data;
476 | int increment = data->increment;
477 | MonoThreadHazardPointers *hp;
478 | int index, i;
479 | gboolean result;
480 |
481 | attach_and_wait_for_threads_to_attach (data);
482 |
483 | hp = mono_hazard_pointer_get ();
484 |
485 | index = 0;
486 | for (i = 0; i < NUM_ITERATIONS; ++i) {
487 | gint32 state = entries [index];
488 |
489 | if (state == STATE_FREE) {
490 | if (InterlockedCompareExchange (&entries [index], STATE_ALLOCING, STATE_FREE) == STATE_FREE) {
491 | MonoLinkedListSetNode *node = malloc (sizeof (MonoLinkedListSetNode));
492 | node->key = index << 2;
493 |
494 | result = mono_lls_insert (&list, hp, node);
495 | g_assert (result);
496 |
497 | if (InterlockedCompareExchange (&entries [index], STATE_USED, STATE_ALLOCING) != STATE_ALLOCING)
498 | g_assert_not_reached ();
499 |
500 | mono_hazard_pointer_clear (hp, 0);
501 | mono_hazard_pointer_clear (hp, 1);
502 | mono_hazard_pointer_clear (hp, 2);
503 | }
504 | } else if (state == STATE_USED) {
505 | if (InterlockedCompareExchange (&entries [index], STATE_FREEING, STATE_USED) == STATE_USED) {
506 | MonoLinkedListSetNode *node;
507 |
508 | result = mono_lls_find (&list, hp, index << 2);
509 | g_assert (result);
510 |
511 | node = mono_hazard_pointer_get_val (hp, 1);
512 | g_assert (node->key == index << 2);
513 |
514 | mono_hazard_pointer_clear (hp, 0);
515 | mono_hazard_pointer_clear (hp, 1);
516 | mono_hazard_pointer_clear (hp, 2);
517 |
518 | result = mono_lls_remove (&list, hp, node);
519 | g_assert (result);
520 |
521 | mono_hazard_pointer_clear (hp, 0);
522 | mono_hazard_pointer_clear (hp, 1);
523 | mono_hazard_pointer_clear (hp, 2);
524 | }
525 | } else {
526 | mono_thread_hazardous_try_free_all ();
527 | }
528 |
529 | index += increment;
530 | while (index >= NUM_ENTRIES)
531 | index -= NUM_ENTRIES;
532 | }
533 |
534 | return NULL;
535 | }
536 |
537 | static void
538 | test_init (void)
539 | {
540 | mono_lls_init (&list, free_node_func);
541 | }
542 |
543 | static gboolean
544 | test_finish (void)
545 | {
546 | MonoLinkedListSetNode *node;
547 | int i;
548 |
549 | MONO_LLS_FOREACH ((&list), node)
550 | int index = node->key >> 2;
551 | g_assert (index >= 0 && index < NUM_ENTRIES);
552 | g_assert (entries [index] == STATE_USED);
553 | entries [index] = STATE_FREE;
554 | MONO_LLS_END_FOREACH
555 |
556 | for (i = 0; i < NUM_ENTRIES; ++i)
557 | g_assert (entries [i] == STATE_FREE);
558 |
559 | return TRUE;
560 | }
561 | #endif
562 |
563 | int
564 | lock_free_allocator_test_main (void)
565 | {
566 | int i;
567 | gboolean result;
568 |
569 | #ifdef USE_SMR
570 | mono_thread_smr_init ();
571 |
572 | mono_thread_attach ();
573 | #endif
574 |
575 | test_init ();
576 |
577 | thread_datas [0].increment = 1;
578 | if (NUM_THREADS >= 2)
579 | thread_datas [1].increment = 3;
580 | if (NUM_THREADS >= 3)
581 | thread_datas [2].increment = 5;
582 | if (NUM_THREADS >= 4)
583 | thread_datas [3].increment = 7;
584 |
585 | for (i = 0; i < NUM_THREADS; ++i)
586 | pthread_create (&thread_datas [i].thread, NULL, thread_func, &thread_datas [i]);
587 |
588 | for (i = 0; i < NUM_THREADS; ++i)
589 | pthread_join (thread_datas [i].thread, NULL);
590 |
591 | #ifdef USE_SMR
592 | mono_thread_hazardous_try_free_all ();
593 | #endif
594 |
595 | result = test_finish ();
596 |
597 | #ifdef USE_SMR
598 | mono_thread_hazardous_print_stats ();
599 | #endif
600 |
601 | return result ? 0 : 1;
602 | }
603 |
--------------------------------------------------------------------------------
/lock-free-alloc.c:
--------------------------------------------------------------------------------
1 | /*
2 | * lock-free-alloc.c: Lock free allocator.
3 | *
4 | * (C) Copyright 2011 Novell, Inc
5 | *
6 | * Permission is hereby granted, free of charge, to any person obtaining
7 | * a copy of this software and associated documentation files (the
8 | * "Software"), to deal in the Software without restriction, including
9 | * without limitation the rights to use, copy, modify, merge, publish,
10 | * distribute, sublicense, and/or sell copies of the Software, and to
11 | * permit persons to whom the Software is furnished to do so, subject to
12 | * the following conditions:
13 | *
14 | * The above copyright notice and this permission notice shall be
15 | * included in all copies or substantial portions of the Software.
16 | *
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 | */
25 |
26 | /*
27 | * This is a simplified version of the lock-free allocator described in
28 | *
29 | * Scalable Lock-Free Dynamic Memory Allocation
30 | * Maged M. Michael, PLDI 2004
31 | *
32 | * I could not get Michael's allocator working bug free under heavy
33 | * stress tests. The paper doesn't provide correctness proof and after
34 | * failing to formalize the ownership of descriptors I devised this
35 | * simpler allocator.
36 | *
37 | * Allocation within superblocks proceeds exactly like in Michael's
38 | * allocator. The simplification is that a thread has to "acquire" a
39 | * descriptor before it can allocate from its superblock. While it owns
40 | * the descriptor no other thread can acquire and hence allocate from
41 | * it. A consequence of this is that the ABA problem cannot occur, so
42 | * we don't need the tag field and don't have to use 64 bit CAS.
43 | *
44 | * Descriptors are stored in two locations: The partial queue and the
45 | * active field. They can only be in at most one of those at one time.
46 | * If a thread wants to allocate, it needs to get a descriptor. It
47 | * tries the active descriptor first, CASing it to NULL. If that
48 | * doesn't work, it gets a descriptor out of the partial queue. Once it
49 | * has the descriptor it owns it because it is not referenced anymore.
50 | * It allocates a slot and then gives the descriptor back (unless it is
51 | * FULL).
52 | *
53 | * Note that it is still possible that a slot is freed while an
54 | * allocation is in progress from the same superblock. Ownership in
55 | * this case is not complicated, though. If the block was FULL and the
56 | * free set it to PARTIAL, the free now owns the block (because FULL
57 | * blocks are not referenced from partial and active) and has to give it
58 | * back. If the block was PARTIAL then the free doesn't own the block
59 | * (because it's either still referenced, or an alloc owns it). A
60 | * special case of this is that it has changed from PARTIAL to EMPTY and
61 | * now needs to be retired. Technically, the free wouldn't have to do
62 | * anything in this case because the first thing an alloc does when it
63 | * gets ownership of a descriptor is to check whether it is EMPTY and
64 | * retire it if that is the case. As an optimization, our free does try
65 | * to acquire the descriptor (by CASing the active field, which, if it
66 | * is lucky, points to that descriptor) and if it can do so, retire it.
67 | * If it can't, it tries to retire other descriptors from the partial
68 | * queue, so that we can be sure that even if no more allocations
69 | * happen, descriptors are still retired. This is analogous to what
70 | * Michael's allocator does.
71 | *
72 | * Another difference to Michael's allocator is not related to
73 | * concurrency, however: We don't point from slots to descriptors.
74 | * Instead we allocate superblocks aligned and point from the start of
75 | * the superblock to the descriptor, so we only need one word of
76 | * metadata per superblock.
77 | *
78 | * FIXME: Having more than one allocator per size class is probably
79 | * buggy because it was never tested.
80 | */
81 |
82 | #include "fake-glib.h"
83 | #include
84 |
85 | #include "mono-mmap.h"
86 | #include "mono-membar.h"
87 | #include "hazard-pointer.h"
88 | #include "atomic.h"
89 | #include "lock-free-queue.h"
90 | #include "sgen-gc.h"
91 |
92 | #include "lock-free-alloc.h"
93 |
94 | //#define DESC_AVAIL_DUMMY
95 |
96 | enum {
97 | STATE_FULL,
98 | STATE_PARTIAL,
99 | STATE_EMPTY
100 | };
101 |
102 | typedef union {
103 | gint32 value;
104 | struct {
105 | guint32 avail : 15;
106 | guint32 count : 15;
107 | guint32 state : 2;
108 | } data;
109 | } Anchor;
110 |
111 | typedef struct _MonoLockFreeAllocDescriptor Descriptor;
112 | struct _MonoLockFreeAllocDescriptor {
113 | MonoLockFreeQueueNode node;
114 | MonoLockFreeAllocator *heap;
115 | volatile Anchor anchor;
116 | unsigned int slot_size;
117 | unsigned int max_count;
118 | gpointer sb;
119 | #ifndef DESC_AVAIL_DUMMY
120 | Descriptor * volatile next;
121 | #endif
122 | gboolean in_use; /* used for debugging only */
123 | };
124 |
125 | #define NUM_DESC_BATCH 64
126 |
127 | #define SB_SIZE 16384
128 | #define SB_HEADER_SIZE 16
129 | #define SB_USABLE_SIZE (SB_SIZE - SB_HEADER_SIZE)
130 |
131 | #define SB_HEADER_FOR_ADDR(a) ((gpointer)((gulong)(a) & ~(gulong)(SB_SIZE-1)))
132 | #define DESCRIPTOR_FOR_ADDR(a) (*(Descriptor**)SB_HEADER_FOR_ADDR (a))
133 |
134 | static gpointer
135 | alloc_sb (Descriptor *desc)
136 | {
137 | gpointer sb_header = mono_sgen_alloc_os_memory_aligned (SB_SIZE, SB_SIZE, TRUE);
138 | g_assert (sb_header == SB_HEADER_FOR_ADDR (sb_header));
139 | DESCRIPTOR_FOR_ADDR (sb_header) = desc;
140 | //g_print ("sb %p for %p\n", sb_header, desc);
141 | return (char*)sb_header + SB_HEADER_SIZE;
142 | }
143 |
144 | static void
145 | free_sb (gpointer sb)
146 | {
147 | gpointer sb_header = SB_HEADER_FOR_ADDR (sb);
148 | g_assert ((char*)sb_header + SB_HEADER_SIZE == sb);
149 | mono_sgen_free_os_memory (sb_header, SB_SIZE);
150 | //g_print ("free sb %p\n", sb_header);
151 | }
152 |
153 | #ifndef DESC_AVAIL_DUMMY
154 | static Descriptor * volatile desc_avail;
155 |
156 | static Descriptor*
157 | desc_alloc (void)
158 | {
159 | MonoThreadHazardPointers *hp = mono_hazard_pointer_get ();
160 | Descriptor *desc;
161 |
162 | for (;;) {
163 | gboolean success;
164 |
165 | desc = get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1);
166 | if (desc) {
167 | Descriptor *next = desc->next;
168 | success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc);
169 | } else {
170 | size_t desc_size = sizeof (Descriptor);
171 | Descriptor *d;
172 | int i;
173 |
174 | desc = mono_sgen_alloc_os_memory (desc_size * NUM_DESC_BATCH, TRUE);
175 |
176 | /* Organize into linked list. */
177 | d = desc;
178 | for (i = 0; i < NUM_DESC_BATCH; ++i) {
179 | Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size));
180 | d->next = next;
181 | mono_lock_free_queue_node_init (&d->node, TRUE);
182 | d = next;
183 | }
184 |
185 | mono_memory_write_barrier ();
186 |
187 | success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL);
188 |
189 | if (!success)
190 | mono_sgen_free_os_memory (desc, desc_size * NUM_DESC_BATCH);
191 | }
192 |
193 | mono_hazard_pointer_clear (hp, 1);
194 |
195 | if (success)
196 | break;
197 | }
198 |
199 | g_assert (!desc->in_use);
200 | desc->in_use = TRUE;
201 |
202 | return desc;
203 | }
204 |
205 | static void
206 | desc_enqueue_avail (gpointer _desc)
207 | {
208 | Descriptor *desc = _desc;
209 | Descriptor *old_head;
210 |
211 | g_assert (desc->anchor.data.state == STATE_EMPTY);
212 | g_assert (!desc->in_use);
213 |
214 | do {
215 | old_head = desc_avail;
216 | desc->next = old_head;
217 | mono_memory_write_barrier ();
218 | } while (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc, old_head) != old_head);
219 | }
220 |
221 | static void
222 | desc_retire (Descriptor *desc)
223 | {
224 | g_assert (desc->anchor.data.state == STATE_EMPTY);
225 | g_assert (desc->in_use);
226 | desc->in_use = FALSE;
227 | free_sb (desc->sb);
228 | mono_thread_hazardous_free_or_queue (desc, desc_enqueue_avail, FALSE, TRUE);
229 | }
230 | #else
231 | MonoLockFreeQueue available_descs;
232 |
233 | static Descriptor*
234 | desc_alloc (void)
235 | {
236 | Descriptor *desc = (Descriptor*)mono_lock_free_queue_dequeue (&available_descs);
237 |
238 | if (desc)
239 | return desc;
240 |
241 | return calloc (1, sizeof (Descriptor));
242 | }
243 |
244 | static void
245 | desc_retire (Descriptor *desc)
246 | {
247 | free_sb (desc->sb);
248 | mono_lock_free_queue_enqueue (&available_descs, &desc->node);
249 | }
250 | #endif
251 |
252 | static Descriptor*
253 | list_get_partial (MonoLockFreeAllocSizeClass *sc)
254 | {
255 | for (;;) {
256 | Descriptor *desc = (Descriptor*) mono_lock_free_queue_dequeue (&sc->partial);
257 | if (!desc)
258 | return NULL;
259 | if (desc->anchor.data.state != STATE_EMPTY)
260 | return desc;
261 | desc_retire (desc);
262 | }
263 | }
264 |
265 | static void
266 | desc_put_partial (gpointer _desc)
267 | {
268 | Descriptor *desc = _desc;
269 |
270 | g_assert (desc->anchor.data.state != STATE_FULL);
271 |
272 | mono_lock_free_queue_node_free (&desc->node);
273 | mono_lock_free_queue_enqueue (&desc->heap->sc->partial, &desc->node);
274 | }
275 |
276 | static void
277 | list_put_partial (Descriptor *desc)
278 | {
279 | g_assert (desc->anchor.data.state != STATE_FULL);
280 | mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE);
281 | }
282 |
283 | static void
284 | list_remove_empty_desc (MonoLockFreeAllocSizeClass *sc)
285 | {
286 | int num_non_empty = 0;
287 | for (;;) {
288 | Descriptor *desc = (Descriptor*) mono_lock_free_queue_dequeue (&sc->partial);
289 | if (!desc)
290 | return;
291 | /*
292 | * We don't need to read atomically because we're the
293 | * only thread that references this descriptor.
294 | */
295 | if (desc->anchor.data.state == STATE_EMPTY) {
296 | desc_retire (desc);
297 | } else {
298 | g_assert (desc->heap->sc == sc);
299 | mono_thread_hazardous_free_or_queue (desc, desc_put_partial, FALSE, TRUE);
300 | if (++num_non_empty >= 2)
301 | return;
302 | }
303 | }
304 | }
305 |
306 | static Descriptor*
307 | heap_get_partial (MonoLockFreeAllocator *heap)
308 | {
309 | return list_get_partial (heap->sc);
310 | }
311 |
312 | static void
313 | heap_put_partial (Descriptor *desc)
314 | {
315 | list_put_partial (desc);
316 | }
317 |
318 | static gboolean
319 | set_anchor (Descriptor *desc, Anchor old_anchor, Anchor new_anchor)
320 | {
321 | if (old_anchor.data.state == STATE_EMPTY)
322 | g_assert (new_anchor.data.state == STATE_EMPTY);
323 |
324 | return InterlockedCompareExchange (&desc->anchor.value, new_anchor.value, old_anchor.value) == old_anchor.value;
325 | }
326 |
327 | static gpointer
328 | alloc_from_active_or_partial (MonoLockFreeAllocator *heap)
329 | {
330 | Descriptor *desc;
331 | Anchor old_anchor, new_anchor;
332 | gpointer addr;
333 |
334 | retry:
335 | desc = heap->active;
336 | if (desc) {
337 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, NULL, desc) != desc)
338 | goto retry;
339 | } else {
340 | desc = heap_get_partial (heap);
341 | if (!desc)
342 | return NULL;
343 | }
344 |
345 | /* Now we own the desc. */
346 |
347 | do {
348 | unsigned int next;
349 |
350 | new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
351 | if (old_anchor.data.state == STATE_EMPTY) {
352 | /* We must free it because we own it. */
353 | desc_retire (desc);
354 | goto retry;
355 | }
356 | g_assert (old_anchor.data.state == STATE_PARTIAL);
357 | g_assert (old_anchor.data.count > 0);
358 |
359 | addr = (char*)desc->sb + old_anchor.data.avail * desc->slot_size;
360 |
361 | mono_memory_read_barrier ();
362 |
363 | next = *(unsigned int*)addr;
364 | g_assert (next < SB_USABLE_SIZE / desc->slot_size);
365 |
366 | new_anchor.data.avail = next;
367 | --new_anchor.data.count;
368 |
369 | if (new_anchor.data.count == 0)
370 | new_anchor.data.state = STATE_FULL;
371 | } while (!set_anchor (desc, old_anchor, new_anchor));
372 |
373 | /* If the desc is partial we have to give it back. */
374 | if (new_anchor.data.state == STATE_PARTIAL) {
375 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, desc, NULL) != NULL)
376 | heap_put_partial (desc);
377 | }
378 |
379 | return addr;
380 | }
381 |
382 | static gpointer
383 | alloc_from_new_sb (MonoLockFreeAllocator *heap)
384 | {
385 | unsigned int slot_size, count, i;
386 | Descriptor *desc = desc_alloc ();
387 |
388 | desc->sb = alloc_sb (desc);
389 |
390 | slot_size = desc->slot_size = heap->sc->slot_size;
391 | count = SB_USABLE_SIZE / slot_size;
392 |
393 | /* Organize blocks into linked list. */
394 | for (i = 1; i < count - 1; ++i)
395 | *(unsigned int*)((char*)desc->sb + i * slot_size) = i + 1;
396 |
397 | desc->heap = heap;
398 | /*
399 | * Setting avail to 1 because 0 is the block we're allocating
400 | * right away.
401 | */
402 | desc->anchor.data.avail = 1;
403 | desc->slot_size = heap->sc->slot_size;
404 | desc->max_count = count;
405 |
406 | desc->anchor.data.count = desc->max_count - 1;
407 | desc->anchor.data.state = STATE_PARTIAL;
408 |
409 | mono_memory_write_barrier ();
410 |
411 | /* Make it active or free it again. */
412 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, desc, NULL) == NULL) {
413 | return desc->sb;
414 | } else {
415 | desc->anchor.data.state = STATE_EMPTY;
416 | desc_retire (desc);
417 | return NULL;
418 | }
419 | }
420 |
421 | gpointer
422 | mono_lock_free_alloc (MonoLockFreeAllocator *heap)
423 | {
424 | gpointer addr;
425 |
426 | for (;;) {
427 |
428 | addr = alloc_from_active_or_partial (heap);
429 | if (addr)
430 | break;
431 |
432 | addr = alloc_from_new_sb (heap);
433 | if (addr)
434 | break;
435 | }
436 |
437 | return addr;
438 | }
439 |
440 | void
441 | mono_lock_free_free (gpointer ptr)
442 | {
443 | Anchor old_anchor, new_anchor;
444 | Descriptor *desc;
445 | gpointer sb;
446 | MonoLockFreeAllocator *heap = NULL;
447 |
448 | desc = DESCRIPTOR_FOR_ADDR (ptr);
449 | sb = desc->sb;
450 | g_assert (SB_HEADER_FOR_ADDR (ptr) == SB_HEADER_FOR_ADDR (sb));
451 |
452 | do {
453 | new_anchor = old_anchor = *(volatile Anchor*)&desc->anchor.value;
454 | *(unsigned int*)ptr = old_anchor.data.avail;
455 | new_anchor.data.avail = ((char*)ptr - (char*)sb) / desc->slot_size;
456 | g_assert (new_anchor.data.avail < SB_USABLE_SIZE / desc->slot_size);
457 |
458 | if (old_anchor.data.state == STATE_FULL)
459 | new_anchor.data.state = STATE_PARTIAL;
460 |
461 | if (++new_anchor.data.count == desc->max_count) {
462 | heap = desc->heap;
463 | new_anchor.data.state = STATE_EMPTY;
464 | }
465 | } while (!set_anchor (desc, old_anchor, new_anchor));
466 |
467 | if (new_anchor.data.state == STATE_EMPTY) {
468 | g_assert (old_anchor.data.state != STATE_EMPTY);
469 |
470 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&heap->active, NULL, desc) == desc) {
471 | /* We own it, so we free it. */
472 | desc_retire (desc);
473 | } else {
474 | /*
475 | * Somebody else must free it, so we do some
476 | * freeing for others.
477 | */
478 | list_remove_empty_desc (heap->sc);
479 | }
480 | } else if (old_anchor.data.state == STATE_FULL) {
481 | /*
482 | * Nobody owned it, now we do, so we need to give it
483 | * back.
484 | */
485 |
486 | g_assert (new_anchor.data.state == STATE_PARTIAL);
487 |
488 | if (InterlockedCompareExchangePointer ((gpointer * volatile)&desc->heap->active, desc, NULL) != NULL)
489 | heap_put_partial (desc);
490 | }
491 | }
492 |
493 | #define g_assert_OR_PRINT(c, format, ...) do { \
494 | if (!(c)) { \
495 | if (print) \
496 | g_print ((format), ## __VA_ARGS__); \
497 | else \
498 | g_assert (FALSE); \
499 | } \
500 | } while (0)
501 |
502 | static void
503 | descriptor_check_consistency (Descriptor *desc, gboolean print)
504 | {
505 | int count = desc->anchor.data.count;
506 | int max_count = SB_USABLE_SIZE / desc->slot_size;
507 | #if _MSC_VER
508 | gboolean* linked = alloca(max_count*sizeof(gboolean));
509 | #else
510 | gboolean linked [max_count];
511 | #endif
512 | int i, last;
513 | unsigned int index;
514 |
515 | #ifndef DESC_AVAIL_DUMMY
516 | Descriptor *avail;
517 |
518 | for (avail = desc_avail; avail; avail = avail->next)
519 | g_assert_OR_PRINT (desc != avail, "descriptor is in the available list\n");
520 | #endif
521 |
522 | g_assert_OR_PRINT (desc->slot_size == desc->heap->sc->slot_size, "slot size doesn't match size class\n");
523 |
524 | if (print)
525 | g_print ("descriptor %p is ", desc);
526 |
527 | switch (desc->anchor.data.state) {
528 | case STATE_FULL:
529 | if (print)
530 | g_print ("full\n");
531 | g_assert_OR_PRINT (count == 0, "count is not zero: %d\n", count);
532 | break;
533 | case STATE_PARTIAL:
534 | if (print)
535 | g_print ("partial\n");
536 | g_assert_OR_PRINT (count < max_count, "count too high: is %d but must be below %d\n", count, max_count);
537 | break;
538 | case STATE_EMPTY:
539 | if (print)
540 | g_print ("empty\n");
541 | g_assert_OR_PRINT (count == max_count, "count is wrong: is %d but should be %d\n", count, max_count);
542 | break;
543 | default:
544 | g_assert_OR_PRINT (FALSE, "invalid state\n");
545 | }
546 |
547 | for (i = 0; i < max_count; ++i)
548 | linked [i] = FALSE;
549 |
550 | index = desc->anchor.data.avail;
551 | last = -1;
552 | for (i = 0; i < count; ++i) {
553 | gpointer addr = (char*)desc->sb + index * desc->slot_size;
554 | g_assert_OR_PRINT (index >= 0 && index < max_count,
555 | "index %d for %dth available slot, linked from %d, not in range [0 .. %d)\n",
556 | index, i, last, max_count);
557 | g_assert_OR_PRINT (!linked [index], "%dth available slot %d linked twice\n", i, index);
558 | if (linked [index])
559 | break;
560 | linked [index] = TRUE;
561 | last = index;
562 | index = *(unsigned int*)addr;
563 | }
564 | }
565 |
566 | gboolean
567 | mono_lock_free_allocator_check_consistency (MonoLockFreeAllocator *heap)
568 | {
569 | Descriptor *active = heap->active;
570 | Descriptor *desc;
571 | if (active) {
572 | g_assert (active->anchor.data.state == STATE_PARTIAL);
573 | descriptor_check_consistency (active, FALSE);
574 | }
575 | while ((desc = (Descriptor*)mono_lock_free_queue_dequeue (&heap->sc->partial))) {
576 | g_assert (desc->anchor.data.state == STATE_PARTIAL || desc->anchor.data.state == STATE_EMPTY);
577 | descriptor_check_consistency (desc, FALSE);
578 | }
579 | return TRUE;
580 | }
581 |
582 | void
583 | mono_lock_free_allocator_init_size_class (MonoLockFreeAllocSizeClass *sc, unsigned int slot_size)
584 | {
585 | g_assert (slot_size <= SB_USABLE_SIZE / 2);
586 |
587 | mono_lock_free_queue_init (&sc->partial);
588 | sc->slot_size = slot_size;
589 | }
590 |
591 | void
592 | mono_lock_free_allocator_init_allocator (MonoLockFreeAllocator *heap, MonoLockFreeAllocSizeClass *sc)
593 | {
594 | heap->sc = sc;
595 | heap->active = NULL;
596 | }
597 |
--------------------------------------------------------------------------------
/LockFreeAllocatorTester/LockFreeAllocatorTester.xcodeproj/project.pbxproj:
--------------------------------------------------------------------------------
1 | // !$*UTF8*$!
2 | {
3 | archiveVersion = 1;
4 | classes = {
5 | };
6 | objectVersion = 46;
7 | objects = {
8 |
9 | /* Begin PBXBuildFile section */
10 | 8E9E685917E286EC00D29ABA /* lock-free-array-queue.c in Sources */ = {isa = PBXBuildFile; fileRef = 8E9E685717E286EC00D29ABA /* lock-free-array-queue.c */; };
11 | 8EA0D32717E1153900A63F1D /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8EA0D32617E1153900A63F1D /* UIKit.framework */; };
12 | 8EA0D32917E1153900A63F1D /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8EA0D32817E1153900A63F1D /* Foundation.framework */; };
13 | 8EA0D32B17E1153900A63F1D /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8EA0D32A17E1153900A63F1D /* CoreGraphics.framework */; };
14 | 8EA0D33117E1153900A63F1D /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 8EA0D32F17E1153900A63F1D /* InfoPlist.strings */; };
15 | 8EA0D33317E1153900A63F1D /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D33217E1153900A63F1D /* main.m */; };
16 | 8EA0D33717E1153900A63F1D /* XAMAppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D33617E1153900A63F1D /* XAMAppDelegate.m */; };
17 | 8EA0D33917E1153900A63F1D /* Default.png in Resources */ = {isa = PBXBuildFile; fileRef = 8EA0D33817E1153900A63F1D /* Default.png */; };
18 | 8EA0D33B17E1153900A63F1D /* Default@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 8EA0D33A17E1153900A63F1D /* Default@2x.png */; };
19 | 8EA0D33D17E1153900A63F1D /* Default-568h@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 8EA0D33C17E1153900A63F1D /* Default-568h@2x.png */; };
20 | 8EA0D35917E1158100A63F1D /* hazard-pointer.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D34717E1158100A63F1D /* hazard-pointer.c */; };
21 | 8EA0D35A17E1158100A63F1D /* lock-free-alloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D34917E1158100A63F1D /* lock-free-alloc.c */; };
22 | 8EA0D35B17E1158100A63F1D /* lock-free-queue.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D34B17E1158100A63F1D /* lock-free-queue.c */; };
23 | 8EA0D35C17E1158100A63F1D /* mono-linked-list-set.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D34E17E1158100A63F1D /* mono-linked-list-set.c */; };
24 | 8EA0D35D17E1158100A63F1D /* mono-mmap.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D35117E1158100A63F1D /* mono-mmap.c */; };
25 | 8EA0D35E17E1158100A63F1D /* sgen-gc.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D35317E1158100A63F1D /* sgen-gc.c */; };
26 | 8EA0D36017E1158100A63F1D /* test.c in Sources */ = {isa = PBXBuildFile; fileRef = 8EA0D35717E1158100A63F1D /* test.c */; };
27 | /* End PBXBuildFile section */
28 |
29 | /* Begin PBXFileReference section */
30 | 8E9E685717E286EC00D29ABA /* lock-free-array-queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "lock-free-array-queue.c"; path = "../../lock-free-array-queue.c"; sourceTree = ""; };
31 | 8E9E685817E286EC00D29ABA /* lock-free-array-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lock-free-array-queue.h"; path = "../../lock-free-array-queue.h"; sourceTree = ""; };
32 | 8EA0D32317E1153900A63F1D /* LockFreeAllocatorTester.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = LockFreeAllocatorTester.app; sourceTree = BUILT_PRODUCTS_DIR; };
33 | 8EA0D32617E1153900A63F1D /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; };
34 | 8EA0D32817E1153900A63F1D /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };
35 | 8EA0D32A17E1153900A63F1D /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; };
36 | 8EA0D32E17E1153900A63F1D /* LockFreeAllocatorTester-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "LockFreeAllocatorTester-Info.plist"; sourceTree = ""; };
37 | 8EA0D33017E1153900A63F1D /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; };
38 | 8EA0D33217E1153900A63F1D /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; };
39 | 8EA0D33417E1153900A63F1D /* LockFreeAllocatorTester-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "LockFreeAllocatorTester-Prefix.pch"; sourceTree = ""; };
40 | 8EA0D33517E1153900A63F1D /* XAMAppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = XAMAppDelegate.h; sourceTree = ""; };
41 | 8EA0D33617E1153900A63F1D /* XAMAppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = XAMAppDelegate.m; sourceTree = ""; };
42 | 8EA0D33817E1153900A63F1D /* Default.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = Default.png; sourceTree = ""; };
43 | 8EA0D33A17E1153900A63F1D /* Default@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Default@2x.png"; sourceTree = ""; };
44 | 8EA0D33C17E1153900A63F1D /* Default-568h@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Default-568h@2x.png"; sourceTree = ""; };
45 | 8EA0D34317E1158100A63F1D /* alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = alloc.h; path = ../../alloc.h; sourceTree = ""; };
46 | 8EA0D34417E1158100A63F1D /* atomic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = atomic.h; path = ../../atomic.h; sourceTree = ""; };
47 | 8EA0D34617E1158100A63F1D /* delayed-free.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "delayed-free.h"; path = "../../delayed-free.h"; sourceTree = ""; };
48 | 8EA0D34717E1158100A63F1D /* hazard-pointer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "hazard-pointer.c"; path = "../../hazard-pointer.c"; sourceTree = ""; };
49 | 8EA0D34817E1158100A63F1D /* hazard-pointer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "hazard-pointer.h"; path = "../../hazard-pointer.h"; sourceTree = ""; };
50 | 8EA0D34917E1158100A63F1D /* lock-free-alloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "lock-free-alloc.c"; path = "../../lock-free-alloc.c"; sourceTree = ""; };
51 | 8EA0D34A17E1158100A63F1D /* lock-free-alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lock-free-alloc.h"; path = "../../lock-free-alloc.h"; sourceTree = ""; };
52 | 8EA0D34B17E1158100A63F1D /* lock-free-queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "lock-free-queue.c"; path = "../../lock-free-queue.c"; sourceTree = ""; };
53 | 8EA0D34C17E1158100A63F1D /* lock-free-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lock-free-queue.h"; path = "../../lock-free-queue.h"; sourceTree = ""; };
54 | 8EA0D34D17E1158100A63F1D /* metadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = metadata.h; path = ../../metadata.h; sourceTree = ""; };
55 | 8EA0D34E17E1158100A63F1D /* mono-linked-list-set.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "mono-linked-list-set.c"; path = "../../mono-linked-list-set.c"; sourceTree = ""; };
56 | 8EA0D34F17E1158100A63F1D /* mono-linked-list-set.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "mono-linked-list-set.h"; path = "../../mono-linked-list-set.h"; sourceTree = ""; };
57 | 8EA0D35017E1158100A63F1D /* mono-membar.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "mono-membar.h"; path = "../../mono-membar.h"; sourceTree = ""; };
58 | 8EA0D35117E1158100A63F1D /* mono-mmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "mono-mmap.c"; path = "../../mono-mmap.c"; sourceTree = ""; };
59 | 8EA0D35217E1158100A63F1D /* mono-mmap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "mono-mmap.h"; path = "../../mono-mmap.h"; sourceTree = ""; };
60 | 8EA0D35317E1158100A63F1D /* sgen-gc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "sgen-gc.c"; path = "../../sgen-gc.c"; sourceTree = ""; };
61 | 8EA0D35417E1158100A63F1D /* sgen-gc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "sgen-gc.h"; path = "../../sgen-gc.h"; sourceTree = ""; };
62 | 8EA0D35717E1158100A63F1D /* test.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = test.c; path = ../../test.c; sourceTree = ""; };
63 | /* End PBXFileReference section */
64 |
65 | /* Begin PBXFrameworksBuildPhase section */
66 | 8EA0D32017E1153900A63F1D /* Frameworks */ = {
67 | isa = PBXFrameworksBuildPhase;
68 | buildActionMask = 2147483647;
69 | files = (
70 | 8EA0D32717E1153900A63F1D /* UIKit.framework in Frameworks */,
71 | 8EA0D32917E1153900A63F1D /* Foundation.framework in Frameworks */,
72 | 8EA0D32B17E1153900A63F1D /* CoreGraphics.framework in Frameworks */,
73 | );
74 | runOnlyForDeploymentPostprocessing = 0;
75 | };
76 | /* End PBXFrameworksBuildPhase section */
77 |
78 | /* Begin PBXGroup section */
79 | 8EA0D31A17E1153900A63F1D = {
80 | isa = PBXGroup;
81 | children = (
82 | 8EA0D32C17E1153900A63F1D /* LockFreeAllocatorTester */,
83 | 8EA0D32517E1153900A63F1D /* Frameworks */,
84 | 8EA0D32417E1153900A63F1D /* Products */,
85 | );
86 | sourceTree = "";
87 | };
88 | 8EA0D32417E1153900A63F1D /* Products */ = {
89 | isa = PBXGroup;
90 | children = (
91 | 8EA0D32317E1153900A63F1D /* LockFreeAllocatorTester.app */,
92 | );
93 | name = Products;
94 | sourceTree = "";
95 | };
96 | 8EA0D32517E1153900A63F1D /* Frameworks */ = {
97 | isa = PBXGroup;
98 | children = (
99 | 8EA0D32617E1153900A63F1D /* UIKit.framework */,
100 | 8EA0D32817E1153900A63F1D /* Foundation.framework */,
101 | 8EA0D32A17E1153900A63F1D /* CoreGraphics.framework */,
102 | );
103 | name = Frameworks;
104 | sourceTree = "";
105 | };
106 | 8EA0D32C17E1153900A63F1D /* LockFreeAllocatorTester */ = {
107 | isa = PBXGroup;
108 | children = (
109 | 8E9E685717E286EC00D29ABA /* lock-free-array-queue.c */,
110 | 8E9E685817E286EC00D29ABA /* lock-free-array-queue.h */,
111 | 8EA0D34317E1158100A63F1D /* alloc.h */,
112 | 8EA0D34417E1158100A63F1D /* atomic.h */,
113 | 8EA0D34617E1158100A63F1D /* delayed-free.h */,
114 | 8EA0D34717E1158100A63F1D /* hazard-pointer.c */,
115 | 8EA0D34817E1158100A63F1D /* hazard-pointer.h */,
116 | 8EA0D34917E1158100A63F1D /* lock-free-alloc.c */,
117 | 8EA0D34A17E1158100A63F1D /* lock-free-alloc.h */,
118 | 8EA0D34B17E1158100A63F1D /* lock-free-queue.c */,
119 | 8EA0D34C17E1158100A63F1D /* lock-free-queue.h */,
120 | 8EA0D34D17E1158100A63F1D /* metadata.h */,
121 | 8EA0D34E17E1158100A63F1D /* mono-linked-list-set.c */,
122 | 8EA0D34F17E1158100A63F1D /* mono-linked-list-set.h */,
123 | 8EA0D35017E1158100A63F1D /* mono-membar.h */,
124 | 8EA0D35117E1158100A63F1D /* mono-mmap.c */,
125 | 8EA0D35217E1158100A63F1D /* mono-mmap.h */,
126 | 8EA0D35317E1158100A63F1D /* sgen-gc.c */,
127 | 8EA0D35417E1158100A63F1D /* sgen-gc.h */,
128 | 8EA0D35717E1158100A63F1D /* test.c */,
129 | 8EA0D33517E1153900A63F1D /* XAMAppDelegate.h */,
130 | 8EA0D33617E1153900A63F1D /* XAMAppDelegate.m */,
131 | 8EA0D32D17E1153900A63F1D /* Supporting Files */,
132 | );
133 | path = LockFreeAllocatorTester;
134 | sourceTree = "";
135 | };
136 | 8EA0D32D17E1153900A63F1D /* Supporting Files */ = {
137 | isa = PBXGroup;
138 | children = (
139 | 8EA0D32E17E1153900A63F1D /* LockFreeAllocatorTester-Info.plist */,
140 | 8EA0D32F17E1153900A63F1D /* InfoPlist.strings */,
141 | 8EA0D33217E1153900A63F1D /* main.m */,
142 | 8EA0D33417E1153900A63F1D /* LockFreeAllocatorTester-Prefix.pch */,
143 | 8EA0D33817E1153900A63F1D /* Default.png */,
144 | 8EA0D33A17E1153900A63F1D /* Default@2x.png */,
145 | 8EA0D33C17E1153900A63F1D /* Default-568h@2x.png */,
146 | );
147 | name = "Supporting Files";
148 | sourceTree = "";
149 | };
150 | /* End PBXGroup section */
151 |
152 | /* Begin PBXNativeTarget section */
153 | 8EA0D32217E1153900A63F1D /* LockFreeAllocatorTester */ = {
154 | isa = PBXNativeTarget;
155 | buildConfigurationList = 8EA0D34017E1153900A63F1D /* Build configuration list for PBXNativeTarget "LockFreeAllocatorTester" */;
156 | buildPhases = (
157 | 8EA0D31F17E1153900A63F1D /* Sources */,
158 | 8EA0D32017E1153900A63F1D /* Frameworks */,
159 | 8EA0D32117E1153900A63F1D /* Resources */,
160 | );
161 | buildRules = (
162 | );
163 | dependencies = (
164 | );
165 | name = LockFreeAllocatorTester;
166 | productName = LockFreeAllocatorTester;
167 | productReference = 8EA0D32317E1153900A63F1D /* LockFreeAllocatorTester.app */;
168 | productType = "com.apple.product-type.application";
169 | };
170 | /* End PBXNativeTarget section */
171 |
172 | /* Begin PBXProject section */
173 | 8EA0D31B17E1153900A63F1D /* Project object */ = {
174 | isa = PBXProject;
175 | attributes = {
176 | CLASSPREFIX = XAM;
177 | LastUpgradeCheck = 0460;
178 | ORGANIZATIONNAME = "Mark Probst";
179 | };
180 | buildConfigurationList = 8EA0D31E17E1153900A63F1D /* Build configuration list for PBXProject "LockFreeAllocatorTester" */;
181 | compatibilityVersion = "Xcode 3.2";
182 | developmentRegion = English;
183 | hasScannedForEncodings = 0;
184 | knownRegions = (
185 | en,
186 | );
187 | mainGroup = 8EA0D31A17E1153900A63F1D;
188 | productRefGroup = 8EA0D32417E1153900A63F1D /* Products */;
189 | projectDirPath = "";
190 | projectRoot = "";
191 | targets = (
192 | 8EA0D32217E1153900A63F1D /* LockFreeAllocatorTester */,
193 | );
194 | };
195 | /* End PBXProject section */
196 |
197 | /* Begin PBXResourcesBuildPhase section */
198 | 8EA0D32117E1153900A63F1D /* Resources */ = {
199 | isa = PBXResourcesBuildPhase;
200 | buildActionMask = 2147483647;
201 | files = (
202 | 8EA0D33117E1153900A63F1D /* InfoPlist.strings in Resources */,
203 | 8EA0D33917E1153900A63F1D /* Default.png in Resources */,
204 | 8EA0D33B17E1153900A63F1D /* Default@2x.png in Resources */,
205 | 8EA0D33D17E1153900A63F1D /* Default-568h@2x.png in Resources */,
206 | );
207 | runOnlyForDeploymentPostprocessing = 0;
208 | };
209 | /* End PBXResourcesBuildPhase section */
210 |
211 | /* Begin PBXSourcesBuildPhase section */
212 | 8EA0D31F17E1153900A63F1D /* Sources */ = {
213 | isa = PBXSourcesBuildPhase;
214 | buildActionMask = 2147483647;
215 | files = (
216 | 8EA0D33317E1153900A63F1D /* main.m in Sources */,
217 | 8EA0D33717E1153900A63F1D /* XAMAppDelegate.m in Sources */,
218 | 8EA0D35917E1158100A63F1D /* hazard-pointer.c in Sources */,
219 | 8EA0D35A17E1158100A63F1D /* lock-free-alloc.c in Sources */,
220 | 8EA0D35B17E1158100A63F1D /* lock-free-queue.c in Sources */,
221 | 8EA0D35C17E1158100A63F1D /* mono-linked-list-set.c in Sources */,
222 | 8EA0D35D17E1158100A63F1D /* mono-mmap.c in Sources */,
223 | 8EA0D35E17E1158100A63F1D /* sgen-gc.c in Sources */,
224 | 8EA0D36017E1158100A63F1D /* test.c in Sources */,
225 | 8E9E685917E286EC00D29ABA /* lock-free-array-queue.c in Sources */,
226 | );
227 | runOnlyForDeploymentPostprocessing = 0;
228 | };
229 | /* End PBXSourcesBuildPhase section */
230 |
231 | /* Begin PBXVariantGroup section */
232 | 8EA0D32F17E1153900A63F1D /* InfoPlist.strings */ = {
233 | isa = PBXVariantGroup;
234 | children = (
235 | 8EA0D33017E1153900A63F1D /* en */,
236 | );
237 | name = InfoPlist.strings;
238 | sourceTree = "";
239 | };
240 | /* End PBXVariantGroup section */
241 |
242 | /* Begin XCBuildConfiguration section */
243 | 8EA0D33E17E1153900A63F1D /* Debug */ = {
244 | isa = XCBuildConfiguration;
245 | buildSettings = {
246 | ALWAYS_SEARCH_USER_PATHS = NO;
247 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
248 | CLANG_CXX_LIBRARY = "libc++";
249 | CLANG_ENABLE_OBJC_ARC = YES;
250 | CLANG_WARN_CONSTANT_CONVERSION = YES;
251 | CLANG_WARN_EMPTY_BODY = YES;
252 | CLANG_WARN_ENUM_CONVERSION = YES;
253 | CLANG_WARN_INT_CONVERSION = YES;
254 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
255 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
256 | COPY_PHASE_STRIP = NO;
257 | GCC_C_LANGUAGE_STANDARD = gnu99;
258 | GCC_DYNAMIC_NO_PIC = NO;
259 | GCC_OPTIMIZATION_LEVEL = 0;
260 | GCC_PREPROCESSOR_DEFINITIONS = (
261 | "DEBUG=1",
262 | "$(inherited)",
263 | );
264 | GCC_SYMBOLS_PRIVATE_EXTERN = NO;
265 | GCC_WARN_ABOUT_RETURN_TYPE = YES;
266 | GCC_WARN_UNINITIALIZED_AUTOS = YES;
267 | GCC_WARN_UNUSED_VARIABLE = YES;
268 | IPHONEOS_DEPLOYMENT_TARGET = 6.1;
269 | ONLY_ACTIVE_ARCH = YES;
270 | SDKROOT = iphoneos;
271 | TARGETED_DEVICE_FAMILY = "1,2";
272 | };
273 | name = Debug;
274 | };
275 | 8EA0D33F17E1153900A63F1D /* Release */ = {
276 | isa = XCBuildConfiguration;
277 | buildSettings = {
278 | ALWAYS_SEARCH_USER_PATHS = NO;
279 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
280 | CLANG_CXX_LIBRARY = "libc++";
281 | CLANG_ENABLE_OBJC_ARC = YES;
282 | CLANG_WARN_CONSTANT_CONVERSION = YES;
283 | CLANG_WARN_EMPTY_BODY = YES;
284 | CLANG_WARN_ENUM_CONVERSION = YES;
285 | CLANG_WARN_INT_CONVERSION = YES;
286 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
287 | "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
288 | COPY_PHASE_STRIP = YES;
289 | GCC_C_LANGUAGE_STANDARD = gnu99;
290 | GCC_WARN_ABOUT_RETURN_TYPE = YES;
291 | GCC_WARN_UNINITIALIZED_AUTOS = YES;
292 | GCC_WARN_UNUSED_VARIABLE = YES;
293 | IPHONEOS_DEPLOYMENT_TARGET = 6.1;
294 | OTHER_CFLAGS = "-DNS_BLOCK_ASSERTIONS=1";
295 | SDKROOT = iphoneos;
296 | TARGETED_DEVICE_FAMILY = "1,2";
297 | VALIDATE_PRODUCT = YES;
298 | };
299 | name = Release;
300 | };
301 | 8EA0D34117E1153900A63F1D /* Debug */ = {
302 | isa = XCBuildConfiguration;
303 | buildSettings = {
304 | GCC_PRECOMPILE_PREFIX_HEADER = YES;
305 | GCC_PREFIX_HEADER = "LockFreeAllocatorTester/LockFreeAllocatorTester-Prefix.pch";
306 | GCC_PREPROCESSOR_DEFINITIONS = (
307 | "DEBUG=1",
308 | "$(inherited)",
309 | "MONO_INTERNAL=",
310 | TEST_ALLOC,
311 | USE_GCC_ATOMIC_OPS,
312 | );
313 | GCC_VERSION = com.apple.compilers.llvmgcc42;
314 | INFOPLIST_FILE = "LockFreeAllocatorTester/LockFreeAllocatorTester-Info.plist";
315 | PRODUCT_NAME = "$(TARGET_NAME)";
316 | WRAPPER_EXTENSION = app;
317 | };
318 | name = Debug;
319 | };
320 | 8EA0D34217E1153900A63F1D /* Release */ = {
321 | isa = XCBuildConfiguration;
322 | buildSettings = {
323 | GCC_PRECOMPILE_PREFIX_HEADER = YES;
324 | GCC_PREFIX_HEADER = "LockFreeAllocatorTester/LockFreeAllocatorTester-Prefix.pch";
325 | GCC_PREPROCESSOR_DEFINITIONS = (
326 | "MONO_INTERNAL=",
327 | TEST_ALLOC,
328 | USE_GCC_ATOMIC_OPS,
329 | );
330 | GCC_VERSION = com.apple.compilers.llvmgcc42;
331 | INFOPLIST_FILE = "LockFreeAllocatorTester/LockFreeAllocatorTester-Info.plist";
332 | PRODUCT_NAME = "$(TARGET_NAME)";
333 | WRAPPER_EXTENSION = app;
334 | };
335 | name = Release;
336 | };
337 | /* End XCBuildConfiguration section */
338 |
339 | /* Begin XCConfigurationList section */
340 | 8EA0D31E17E1153900A63F1D /* Build configuration list for PBXProject "LockFreeAllocatorTester" */ = {
341 | isa = XCConfigurationList;
342 | buildConfigurations = (
343 | 8EA0D33E17E1153900A63F1D /* Debug */,
344 | 8EA0D33F17E1153900A63F1D /* Release */,
345 | );
346 | defaultConfigurationIsVisible = 0;
347 | defaultConfigurationName = Release;
348 | };
349 | 8EA0D34017E1153900A63F1D /* Build configuration list for PBXNativeTarget "LockFreeAllocatorTester" */ = {
350 | isa = XCConfigurationList;
351 | buildConfigurations = (
352 | 8EA0D34117E1153900A63F1D /* Debug */,
353 | 8EA0D34217E1153900A63F1D /* Release */,
354 | );
355 | defaultConfigurationIsVisible = 0;
356 | defaultConfigurationName = Release;
357 | };
358 | /* End XCConfigurationList section */
359 | };
360 | rootObject = 8EA0D31B17E1153900A63F1D /* Project object */;
361 | }
362 |
--------------------------------------------------------------------------------
/atomic.h:
--------------------------------------------------------------------------------
1 | /*
2 | * atomic.h: Atomic operations
3 | *
4 | * Author:
5 | * Dick Porter (dick@ximian.com)
6 | *
7 | * (C) 2002 Ximian, Inc.
8 | */
9 |
10 | #include "fake-glib.h"
11 |
12 | #ifndef _WAPI_ATOMIC_H_
13 | #define _WAPI_ATOMIC_H_
14 |
15 | #ifdef USE_GCC_ATOMIC_OPS
16 |
17 |
18 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
19 | gint32 exch, gint32 comp)
20 | {
21 | return __sync_val_compare_and_swap (dest, comp, exch);
22 | }
23 |
24 | static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
25 | {
26 | return __sync_val_compare_and_swap (dest, comp, exch);
27 | }
28 |
29 | static inline gint32 InterlockedIncrement(volatile gint32 *val)
30 | {
31 | return __sync_add_and_fetch (val, 1);
32 | }
33 |
34 | static inline gint32 InterlockedDecrement(volatile gint32 *val)
35 | {
36 | return __sync_add_and_fetch (val, -1);
37 | }
38 |
39 | static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
40 | {
41 | gint32 old_val;
42 | do {
43 | old_val = *val;
44 | } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
45 | return old_val;
46 | }
47 |
48 | static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
49 | gpointer new_val)
50 | {
51 | gpointer old_val;
52 | do {
53 | old_val = *val;
54 | } while (__sync_val_compare_and_swap (val, old_val, new_val) != old_val);
55 | return old_val;
56 | }
57 |
58 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
59 | {
60 | return __sync_fetch_and_add (val, add);
61 | }
62 |
63 | #else
64 |
65 | #if defined(__NetBSD__)
66 | #include
67 |
68 | #if __NetBSD_Version__ > 499004000
69 | #include
70 | #define HAVE_ATOMIC_OPS
71 | #endif
72 |
73 | #endif
74 |
75 | #include "fake-glib.h"
76 |
77 | #if defined(__NetBSD__) && defined(HAVE_ATOMIC_OPS)
78 |
79 | #define WAPI_ATOMIC_ASM
80 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
81 | gint32 exch, gint32 comp)
82 | {
83 | return atomic_cas_32((uint32_t*)dest, comp, exch);
84 | }
85 |
86 | static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
87 | {
88 | return atomic_cas_ptr(dest, comp, exch);
89 | }
90 |
91 | static inline gint32 InterlockedIncrement(volatile gint32 *val)
92 | {
93 | return atomic_inc_32_nv((uint32_t*)val);
94 | }
95 |
96 | static inline gint32 InterlockedDecrement(volatile gint32 *val)
97 | {
98 | return atomic_dec_32_nv((uint32_t*)val);
99 | }
100 |
101 | static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
102 | {
103 | return atomic_swap_32((uint32_t*)val, new_val);
104 | }
105 |
106 | static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
107 | gpointer new_val)
108 | {
109 | return atomic_swap_ptr(val, new_val);
110 | }
111 |
112 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
113 | {
114 | return atomic_add_32_nv((uint32_t*)val, add) - add;
115 | }
116 |
117 | #elif defined(__i386__) || defined(__x86_64__)
118 | #define WAPI_ATOMIC_ASM
119 |
120 | /*
121 | * NB: The *Pointer() functions here assume that
122 | * sizeof(pointer)==sizeof(gint32)
123 | *
124 | * NB2: These asm functions assume 486+ (some of the opcodes dont
125 | * exist on 386). If this becomes an issue, we can get configure to
126 | * fall back to the non-atomic C versions of these calls.
127 | */
128 |
129 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
130 | gint32 exch, gint32 comp)
131 | {
132 | gint32 old;
133 |
134 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
135 | : "=m" (*dest), "=a" (old)
136 | : "r" (exch), "m" (*dest), "a" (comp));
137 | return(old);
138 | }
139 |
140 | static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
141 | {
142 | gpointer old;
143 |
144 | __asm__ __volatile__ ("lock; "
145 | #if defined(__x86_64__) && !defined(__native_client__)
146 | "cmpxchgq"
147 | #else
148 | "cmpxchgl"
149 | #endif
150 | " %2, %0"
151 | : "=m" (*dest), "=a" (old)
152 | : "r" (exch), "m" (*dest), "a" (comp));
153 |
154 | return(old);
155 | }
156 |
157 | #ifdef __x86_64__
158 | static inline gint64 atomic64_cmpxchg(volatile gint64 *v, gint64 old, gint64 new)
159 | {
160 | return (gint64) InterlockedCompareExchangePointer ((volatile gpointer*)v, (gpointer)new, (gpointer)old);
161 | }
162 |
163 | static inline gint64 atomic64_read (const volatile gint64 *v)
164 | {
165 | return *v;
166 | }
167 | #endif
168 |
169 | static inline gint32 InterlockedIncrement(volatile gint32 *val)
170 | {
171 | gint32 tmp;
172 |
173 | __asm__ __volatile__ ("lock; xaddl %0, %1"
174 | : "=r" (tmp), "=m" (*val)
175 | : "0" (1), "m" (*val));
176 |
177 | return(tmp+1);
178 | }
179 |
180 | static inline gint32 InterlockedDecrement(volatile gint32 *val)
181 | {
182 | gint32 tmp;
183 |
184 | __asm__ __volatile__ ("lock; xaddl %0, %1"
185 | : "=r" (tmp), "=m" (*val)
186 | : "0" (-1), "m" (*val));
187 |
188 | return(tmp-1);
189 | }
190 |
191 | /*
192 | * See
193 | * http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
194 | * for the reasons for using cmpxchg and a loop here.
195 | *
196 | * That url is no longer valid, but it's still in the google cache at the
197 | * moment: http://www.google.com/search?q=cache:http://msdn.microsoft.com/library/en-us/dnmag00/html/win320700.asp?frame=true
198 | *
199 | * For the time being, http://msdn.microsoft.com/msdnmag/issues/0700/Win32/
200 | * might work. Bet it will change soon enough though.
201 | */
202 | static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
203 | {
204 | gint32 ret;
205 |
206 | __asm__ __volatile__ ("1:; lock; cmpxchgl %2, %0; jne 1b"
207 | : "=m" (*val), "=a" (ret)
208 | : "r" (new_val), "m" (*val), "a" (*val));
209 |
210 | return(ret);
211 | }
212 |
213 | static inline gpointer InterlockedExchangePointer(volatile gpointer *val,
214 | gpointer new_val)
215 | {
216 | gpointer ret;
217 |
218 | __asm__ __volatile__ ("1:; lock; "
219 | #if defined(__x86_64__) && !defined(__native_client__)
220 | "cmpxchgq"
221 | #else
222 | "cmpxchgl"
223 | #endif
224 | " %2, %0; jne 1b"
225 | : "=m" (*val), "=a" (ret)
226 | : "r" (new_val), "m" (*val), "a" (*val));
227 |
228 | return(ret);
229 | }
230 |
231 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
232 | {
233 | gint32 ret;
234 |
235 | __asm__ __volatile__ ("lock; xaddl %0, %1"
236 | : "=r" (ret), "=m" (*val)
237 | : "0" (add), "m" (*val));
238 |
239 | return(ret);
240 | }
241 |
242 | #elif (defined(sparc) || defined (__sparc__)) && defined(__GNUC__)
243 | #define WAPI_ATOMIC_ASM
244 |
245 | G_GNUC_UNUSED
246 | static inline gint32 InterlockedCompareExchange(volatile gint32 *_dest, gint32 _exch, gint32 _comp)
247 | {
248 | register volatile gint32 *dest asm("g1") = _dest;
249 | register gint32 comp asm("o4") = _comp;
250 | register gint32 exch asm("o5") = _exch;
251 |
252 | __asm__ __volatile__(
253 | /* cas [%%g1], %%o4, %%o5 */
254 | ".word 0xdbe0500c"
255 | : "=r" (exch)
256 | : "0" (exch), "r" (dest), "r" (comp)
257 | : "memory");
258 |
259 | return exch;
260 | }
261 |
262 | G_GNUC_UNUSED
263 | static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *_dest, gpointer _exch, gpointer _comp)
264 | {
265 | register volatile gpointer *dest asm("g1") = _dest;
266 | register gpointer comp asm("o4") = _comp;
267 | register gpointer exch asm("o5") = _exch;
268 |
269 | __asm__ __volatile__(
270 | #ifdef SPARCV9
271 | /* casx [%%g1], %%o4, %%o5 */
272 | ".word 0xdbf0500c"
273 | #else
274 | /* cas [%%g1], %%o4, %%o5 */
275 | ".word 0xdbe0500c"
276 | #endif
277 | : "=r" (exch)
278 | : "0" (exch), "r" (dest), "r" (comp)
279 | : "memory");
280 |
281 | return exch;
282 | }
283 |
284 | G_GNUC_UNUSED
285 | static inline gint32 InterlockedIncrement(volatile gint32 *_dest)
286 | {
287 | register volatile gint32 *dest asm("g1") = _dest;
288 | register gint32 tmp asm("o4");
289 | register gint32 ret asm("o5");
290 |
291 | __asm__ __volatile__(
292 | "1: ld [%%g1], %%o4\n\t"
293 | " add %%o4, 1, %%o5\n\t"
294 | /* cas [%%g1], %%o4, %%o5 */
295 | " .word 0xdbe0500c\n\t"
296 | " cmp %%o4, %%o5\n\t"
297 | " bne 1b\n\t"
298 | " add %%o5, 1, %%o5"
299 | : "=&r" (tmp), "=&r" (ret)
300 | : "r" (dest)
301 | : "memory", "cc");
302 |
303 | return ret;
304 | }
305 |
306 | G_GNUC_UNUSED
307 | static inline gint32 InterlockedDecrement(volatile gint32 *_dest)
308 | {
309 | register volatile gint32 *dest asm("g1") = _dest;
310 | register gint32 tmp asm("o4");
311 | register gint32 ret asm("o5");
312 |
313 | __asm__ __volatile__(
314 | "1: ld [%%g1], %%o4\n\t"
315 | " sub %%o4, 1, %%o5\n\t"
316 | /* cas [%%g1], %%o4, %%o5 */
317 | " .word 0xdbe0500c\n\t"
318 | " cmp %%o4, %%o5\n\t"
319 | " bne 1b\n\t"
320 | " sub %%o5, 1, %%o5"
321 | : "=&r" (tmp), "=&r" (ret)
322 | : "r" (dest)
323 | : "memory", "cc");
324 |
325 | return ret;
326 | }
327 |
328 | G_GNUC_UNUSED
329 | static inline gint32 InterlockedExchange(volatile gint32 *_dest, gint32 exch)
330 | {
331 | register volatile gint32 *dest asm("g1") = _dest;
332 | register gint32 tmp asm("o4");
333 | register gint32 ret asm("o5");
334 |
335 | __asm__ __volatile__(
336 | "1: ld [%%g1], %%o4\n\t"
337 | " mov %3, %%o5\n\t"
338 | /* cas [%%g1], %%o4, %%o5 */
339 | " .word 0xdbe0500c\n\t"
340 | " cmp %%o4, %%o5\n\t"
341 | " bne 1b\n\t"
342 | " nop"
343 | : "=&r" (tmp), "=&r" (ret)
344 | : "r" (dest), "r" (exch)
345 | : "memory", "cc");
346 |
347 | return ret;
348 | }
349 |
350 | G_GNUC_UNUSED
351 | static inline gpointer InterlockedExchangePointer(volatile gpointer *_dest, gpointer exch)
352 | {
353 | register volatile gpointer *dest asm("g1") = _dest;
354 | register gpointer tmp asm("o4");
355 | register gpointer ret asm("o5");
356 |
357 | __asm__ __volatile__(
358 | #ifdef SPARCV9
359 | "1: ldx [%%g1], %%o4\n\t"
360 | #else
361 | "1: ld [%%g1], %%o4\n\t"
362 | #endif
363 | " mov %3, %%o5\n\t"
364 | #ifdef SPARCV9
365 | /* casx [%%g1], %%o4, %%o5 */
366 | " .word 0xdbf0500c\n\t"
367 | #else
368 | /* cas [%%g1], %%o4, %%o5 */
369 | " .word 0xdbe0500c\n\t"
370 | #endif
371 | " cmp %%o4, %%o5\n\t"
372 | " bne 1b\n\t"
373 | " nop"
374 | : "=&r" (tmp), "=&r" (ret)
375 | : "r" (dest), "r" (exch)
376 | : "memory", "cc");
377 |
378 | return ret;
379 | }
380 |
381 | G_GNUC_UNUSED
382 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *_dest, gint32 add)
383 | {
384 | register volatile gint32 *dest asm("g1") = _dest;
385 | register gint32 tmp asm("o4");
386 | register gint32 ret asm("o5");
387 |
388 | __asm__ __volatile__(
389 | "1: ld [%%g1], %%o4\n\t"
390 | " add %%o4, %3, %%o5\n\t"
391 | /* cas [%%g1], %%o4, %%o5 */
392 | " .word 0xdbe0500c\n\t"
393 | " cmp %%o4, %%o5\n\t"
394 | " bne 1b\n\t"
395 | " add %%o5, %3, %%o5"
396 | : "=&r" (tmp), "=&r" (ret)
397 | : "r" (dest), "r" (add)
398 | : "memory", "cc");
399 |
400 | return ret;
401 | }
402 |
403 | #elif __s390__
404 |
405 | #define WAPI_ATOMIC_ASM
406 |
407 | static inline gint32
408 | InterlockedCompareExchange(volatile gint32 *dest,
409 | gint32 exch, gint32 comp)
410 | {
411 | gint32 old;
412 |
413 | __asm__ __volatile__ ("\tLA\t1,%0\n"
414 | "\tLR\t%1,%3\n"
415 | "\tCS\t%1,%2,0(1)\n"
416 | : "+m" (*dest), "=&r" (old)
417 | : "r" (exch), "r" (comp)
418 | : "1", "cc");
419 | return(old);
420 | }
421 |
422 | #ifndef __s390x__
423 | static inline gpointer
424 | InterlockedCompareExchangePointer(volatile gpointer *dest,
425 | gpointer exch, gpointer comp)
426 | {
427 | gpointer old;
428 |
429 | __asm__ __volatile__ ("\tLA\t1,%0\n"
430 | "\tLR\t%1,%3\n"
431 | "\tCS\t%1,%2,0(1)\n"
432 | : "+m" (*dest), "=&r" (old)
433 | : "r" (exch), "r" (comp)
434 | : "1", "cc");
435 | return(old);
436 | }
437 | # else
438 | static inline gpointer
439 | InterlockedCompareExchangePointer(volatile gpointer *dest,
440 | gpointer exch,
441 | gpointer comp)
442 | {
443 | gpointer old;
444 |
445 | __asm__ __volatile__ ("\tLA\t1,%0\n"
446 | "\tLGR\t%1,%3\n"
447 | "\tCSG\t%1,%2,0(1)\n"
448 | : "+m" (*dest), "=&r" (old)
449 | : "r" (exch), "r" (comp)
450 | : "1", "cc");
451 |
452 | return(old);
453 | }
454 | # endif
455 |
456 | # ifndef __s390x__
457 | static inline gint32
458 | InterlockedIncrement(volatile gint32 *val)
459 | {
460 | gint32 tmp;
461 |
462 | __asm__ __volatile__ ("\tLA\t2,%1\n"
463 | "0:\tL\t%0,%1\n"
464 | "\tLR\t1,%0\n"
465 | "\tAHI\t1,1\n"
466 | "\tCS\t%0,1,0(2)\n"
467 | "\tJNZ\t0b\n"
468 | "\tLR\t%0,1"
469 | : "=r" (tmp), "+m" (*val)
470 | : : "1", "2", "cc");
471 |
472 | return(tmp);
473 | }
474 | # else
475 | static inline gint32
476 | InterlockedIncrement(volatile gint32 *val)
477 | {
478 | gint32 tmp;
479 |
480 | __asm__ __volatile__ ("\tLA\t2,%1\n"
481 | "0:\tLGF\t%0,%1\n"
482 | "\tLGFR\t1,%0\n"
483 | "\tAGHI\t1,1\n"
484 | "\tCS\t%0,1,0(2)\n"
485 | "\tJNZ\t0b\n"
486 | "\tLGFR\t%0,1"
487 | : "=r" (tmp), "+m" (*val)
488 | : : "1", "2", "cc");
489 |
490 | return(tmp);
491 | }
492 | # endif
493 |
494 | # ifndef __s390x__
495 | static inline gint32
496 | InterlockedDecrement(volatile gint32 *val)
497 | {
498 | gint32 tmp;
499 |
500 | __asm__ __volatile__ ("\tLA\t2,%1\n"
501 | "0:\tL\t%0,%1\n"
502 | "\tLR\t1,%0\n"
503 | "\tAHI\t1,-1\n"
504 | "\tCS\t%0,1,0(2)\n"
505 | "\tJNZ\t0b\n"
506 | "\tLR\t%0,1"
507 | : "=r" (tmp), "+m" (*val)
508 | : : "1", "2", "cc");
509 |
510 | return(tmp);
511 | }
512 | # else
513 | static inline gint32
514 | InterlockedDecrement(volatile gint32 *val)
515 | {
516 | gint32 tmp;
517 |
518 | __asm__ __volatile__ ("\tLA\t2,%1\n"
519 | "0:\tLGF\t%0,%1\n"
520 | "\tLGFR\t1,%0\n"
521 | "\tAGHI\t1,-1\n"
522 | "\tCS\t%0,1,0(2)\n"
523 | "\tJNZ\t0b\n"
524 | "\tLGFR\t%0,1"
525 | : "=r" (tmp), "+m" (*val)
526 | : : "1", "2", "cc");
527 |
528 | return(tmp);
529 | }
530 | # endif
531 |
532 | static inline gint32
533 | InterlockedExchange(volatile gint32 *val, gint32 new_val)
534 | {
535 | gint32 ret;
536 |
537 | __asm__ __volatile__ ("\tLA\t1,%0\n"
538 | "0:\tL\t%1,%0\n"
539 | "\tCS\t%1,%2,0(1)\n"
540 | "\tJNZ\t0b"
541 | : "+m" (*val), "=&r" (ret)
542 | : "r" (new_val)
543 | : "1", "cc");
544 |
545 | return(ret);
546 | }
547 |
548 | # ifndef __s390x__
549 | static inline gpointer
550 | InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
551 | {
552 | gpointer ret;
553 |
554 | __asm__ __volatile__ ("\tLA\t1,%0\n"
555 | "0:\tL\t%1,%0\n"
556 | "\tCS\t%1,%2,0(1)\n"
557 | "\tJNZ\t0b"
558 | : "+m" (*val), "=&r" (ret)
559 | : "r" (new_val)
560 | : "1", "cc");
561 |
562 | return(ret);
563 | }
564 | # else
565 | static inline gpointer
566 | InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
567 | {
568 | gpointer ret;
569 |
570 | __asm__ __volatile__ ("\tLA\t1,%0\n"
571 | "0:\tLG\t%1,%0\n"
572 | "\tCSG\t%1,%2,0(1)\n"
573 | "\tJNZ\t0b"
574 | : "+m" (*val), "=&r" (ret)
575 | : "r" (new_val)
576 | : "1", "cc");
577 |
578 | return(ret);
579 | }
580 | # endif
581 |
582 | # ifndef __s390x__
583 | static inline gint32
584 | InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
585 | {
586 | gint32 ret;
587 |
588 | __asm__ __volatile__ ("\tLA\t2,%1\n"
589 | "0:\tL\t%0,%1\n"
590 | "\tLR\t1,%0\n"
591 | "\tAR\t1,%2\n"
592 | "\tCS\t%0,1,0(2)\n"
593 | "\tJNZ\t0b"
594 | : "=&r" (ret), "+m" (*val)
595 | : "r" (add)
596 | : "1", "2", "cc");
597 |
598 | return(ret);
599 | }
600 | # else
601 | static inline gint32
602 | InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
603 | {
604 | gint32 ret;
605 |
606 | __asm__ __volatile__ ("\tLA\t2,%1\n"
607 | "0:\tLGF\t%0,%1\n"
608 | "\tLGFR\t1,%0\n"
609 | "\tAGR\t1,%2\n"
610 | "\tCS\t%0,1,0(2)\n"
611 | "\tJNZ\t0b"
612 | : "=&r" (ret), "+m" (*val)
613 | : "r" (add)
614 | : "1", "2", "cc");
615 |
616 | return(ret);
617 | }
618 | # endif
619 |
620 | #elif defined(__mono_ppc__)
621 | #define WAPI_ATOMIC_ASM
622 |
623 | #ifdef G_COMPILER_CODEWARRIOR
624 | static inline gint32 InterlockedIncrement(volatile register gint32 *val)
625 | {
626 | gint32 result = 0, tmp;
627 | register gint32 result = 0;
628 | register gint32 tmp;
629 |
630 | asm
631 | {
632 | @1:
633 | lwarx tmp, 0, val
634 | addi result, tmp, 1
635 | stwcx. result, 0, val
636 | bne- @1
637 | }
638 |
639 | return result;
640 | }
641 |
642 | static inline gint32 InterlockedDecrement(register volatile gint32 *val)
643 | {
644 | register gint32 result = 0;
645 | register gint32 tmp;
646 |
647 | asm
648 | {
649 | @1:
650 | lwarx tmp, 0, val
651 | addi result, tmp, -1
652 | stwcx. result, 0, val
653 | bne- @1
654 | }
655 |
656 | return result;
657 | }
658 | #define InterlockedCompareExchangePointer(dest,exch,comp) (void*)InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
659 |
660 | static inline gint32 InterlockedCompareExchange(volatile register gint32 *dest, register gint32 exch, register gint32 comp)
661 | {
662 | register gint32 tmp = 0;
663 |
664 | asm
665 | {
666 | @1:
667 | lwarx tmp, 0, dest
668 | cmpw tmp, comp
669 | bne- @2
670 | stwcx. exch, 0, dest
671 | bne- @1
672 | @2:
673 | }
674 |
675 | return tmp;
676 | }
677 | static inline gint32 InterlockedExchange(register volatile gint32 *dest, register gint32 exch)
678 | {
679 | register gint32 tmp = 0;
680 |
681 | asm
682 | {
683 | @1:
684 | lwarx tmp, 0, dest
685 | stwcx. exch, 0, dest
686 | bne- @1
687 | }
688 |
689 | return tmp;
690 | }
691 | #define InterlockedExchangePointer(dest,exch) (void*)InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
692 | #else
693 |
694 | #if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
695 | #define LDREGX "ldarx"
696 | #define STREGCXD "stdcx."
697 | #define CMPREG "cmpd"
698 | #else
699 | #define LDREGX "lwarx"
700 | #define STREGCXD "stwcx."
701 | #define CMPREG "cmpw"
702 | #endif
703 |
704 | static inline gint32 InterlockedIncrement(volatile gint32 *val)
705 | {
706 | gint32 result = 0, tmp;
707 |
708 | __asm__ __volatile__ ("\n1:\n\t"
709 | "lwarx %0, 0, %2\n\t"
710 | "addi %1, %0, 1\n\t"
711 | "stwcx. %1, 0, %2\n\t"
712 | "bne- 1b"
713 | : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
714 | return result + 1;
715 | }
716 |
717 | static inline gint32 InterlockedDecrement(volatile gint32 *val)
718 | {
719 | gint32 result = 0, tmp;
720 |
721 | __asm__ __volatile__ ("\n1:\n\t"
722 | "lwarx %0, 0, %2\n\t"
723 | "addi %1, %0, -1\n\t"
724 | "stwcx. %1, 0, %2\n\t"
725 | "bne- 1b"
726 | : "=&b" (result), "=&b" (tmp): "r" (val): "cc", "memory");
727 | return result - 1;
728 | }
729 |
730 | static inline gpointer InterlockedCompareExchangePointer (volatile gpointer *dest,
731 | gpointer exch, gpointer comp)
732 | {
733 | gpointer tmp = NULL;
734 |
735 | __asm__ __volatile__ ("\n1:\n\t"
736 | LDREGX " %0, 0, %1\n\t"
737 | CMPREG " %0, %2\n\t"
738 | "bne- 2f\n\t"
739 | STREGCXD " %3, 0, %1\n\t"
740 | "bne- 1b\n"
741 | "2:"
742 | : "=&r" (tmp)
743 | : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
744 | return(tmp);
745 | }
746 |
747 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
748 | gint32 exch, gint32 comp) {
749 | gint32 tmp = 0;
750 |
751 | __asm__ __volatile__ ("\n1:\n\t"
752 | "lwarx %0, 0, %1\n\t"
753 | "cmpw %0, %2\n\t"
754 | "bne- 2f\n\t"
755 | "stwcx. %3, 0, %1\n\t"
756 | "bne- 1b\n"
757 | "2:"
758 | : "=&r" (tmp)
759 | : "b" (dest), "r" (comp), "r" (exch): "cc", "memory");
760 | return(tmp);
761 | }
762 |
763 | static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
764 | {
765 | gint32 tmp = 0;
766 |
767 | __asm__ __volatile__ ("\n1:\n\t"
768 | "lwarx %0, 0, %2\n\t"
769 | "stwcx. %3, 0, %2\n\t"
770 | "bne 1b"
771 | : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
772 | return(tmp);
773 | }
774 |
775 | static inline gpointer InterlockedExchangePointer (volatile gpointer *dest, gpointer exch)
776 | {
777 | gpointer tmp = NULL;
778 |
779 | __asm__ __volatile__ ("\n1:\n\t"
780 | LDREGX " %0, 0, %2\n\t"
781 | STREGCXD " %3, 0, %2\n\t"
782 | "bne 1b"
783 | : "=r" (tmp) : "0" (tmp), "b" (dest), "r" (exch): "cc", "memory");
784 | return(tmp);
785 | }
786 |
787 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
788 | {
789 | gint32 result, tmp;
790 | __asm__ __volatile__ ("\n1:\n\t"
791 | "lwarx %0, 0, %2\n\t"
792 | "add %1, %0, %3\n\t"
793 | "stwcx. %1, 0, %2\n\t"
794 | "bne 1b"
795 | : "=&r" (result), "=&r" (tmp)
796 | : "r" (dest), "r" (add) : "cc", "memory");
797 | return(result);
798 | }
799 |
800 | #undef LDREGX
801 | #undef STREGCXD
802 | #undef CMPREG
803 |
804 | #endif /* !G_COMPILER_CODEWARRIOR */
805 |
806 | #elif defined(__arm__)
807 | #define WAPI_ATOMIC_ASM
808 |
809 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp)
810 | {
811 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
812 | gint32 ret, tmp;
813 | __asm__ __volatile__ ( "1:\n"
814 | "mov %0, #0\n"
815 | "ldrex %1, [%2]\n"
816 | "teq %1, %3\n"
817 | "it eq\n"
818 | "strexeq %0, %4, [%2]\n"
819 | "teq %0, #0\n"
820 | "bne 1b\n"
821 | : "=&r" (tmp), "=&r" (ret)
822 | : "r" (dest), "r" (comp), "r" (exch)
823 | : "memory", "cc");
824 |
825 | return ret;
826 | #else
827 | gint32 a, b;
828 |
829 | __asm__ __volatile__ ( "0:\n\t"
830 | "ldr %1, [%2]\n\t"
831 | "cmp %1, %4\n\t"
832 | "mov %0, %1\n\t"
833 | "bne 1f\n\t"
834 | "swp %0, %3, [%2]\n\t"
835 | "cmp %0, %1\n\t"
836 | "swpne %3, %0, [%2]\n\t"
837 | "bne 0b\n\t"
838 | "1:"
839 | : "=&r" (a), "=&r" (b)
840 | : "r" (dest), "r" (exch), "r" (comp)
841 | : "cc", "memory");
842 |
843 | return a;
844 | #endif
845 | }
846 |
847 | static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
848 | {
849 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
850 | gpointer ret, tmp;
851 | __asm__ __volatile__ ( "1:\n"
852 | "mov %0, #0\n"
853 | "ldrex %1, [%2]\n"
854 | "teq %1, %3\n"
855 | "it eq\n"
856 | "strexeq %0, %4, [%2]\n"
857 | "teq %0, #0\n"
858 | "bne 1b\n"
859 | : "=&r" (tmp), "=&r" (ret)
860 | : "r" (dest), "r" (comp), "r" (exch)
861 | : "memory", "cc");
862 |
863 | return ret;
864 | #else
865 | gpointer a, b;
866 |
867 | __asm__ __volatile__ ( "0:\n\t"
868 | "ldr %1, [%2]\n\t"
869 | "cmp %1, %4\n\t"
870 | "mov %0, %1\n\t"
871 | "bne 1f\n\t"
872 | "swpeq %0, %3, [%2]\n\t"
873 | "cmp %0, %1\n\t"
874 | "swpne %3, %0, [%2]\n\t"
875 | "bne 0b\n\t"
876 | "1:"
877 | : "=&r" (a), "=&r" (b)
878 | : "r" (dest), "r" (exch), "r" (comp)
879 | : "cc", "memory");
880 |
881 | return a;
882 | #endif
883 | }
884 |
885 | static inline gint32 InterlockedIncrement(volatile gint32 *dest)
886 | {
887 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
888 | gint32 ret, flag;
889 | __asm__ __volatile__ ( "1:\n"
890 | "ldrex %0, [%2]\n"
891 | "add %0, %0, %3\n"
892 | "strex %1, %0, [%2]\n"
893 | "teq %1, #0\n"
894 | "bne 1b\n"
895 | : "=&r" (ret), "=&r" (flag)
896 | : "r" (dest), "r" (1)
897 | : "memory", "cc");
898 |
899 | return ret;
900 | #else
901 | gint32 a, b, c;
902 |
903 | __asm__ __volatile__ ( "0:\n\t"
904 | "ldr %0, [%3]\n\t"
905 | "add %1, %0, %4\n\t"
906 | "swp %2, %1, [%3]\n\t"
907 | "cmp %0, %2\n\t"
908 | "swpne %1, %2, [%3]\n\t"
909 | "bne 0b"
910 | : "=&r" (a), "=&r" (b), "=&r" (c)
911 | : "r" (dest), "r" (1)
912 | : "cc", "memory");
913 |
914 | return b;
915 | #endif
916 | }
917 |
918 | static inline gint32 InterlockedDecrement(volatile gint32 *dest)
919 | {
920 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
921 | gint32 ret, flag;
922 | __asm__ __volatile__ ( "1:\n"
923 | "ldrex %0, [%2]\n"
924 | "sub %0, %0, %3\n"
925 | "strex %1, %0, [%2]\n"
926 | "teq %1, #0\n"
927 | "bne 1b\n"
928 | : "=&r" (ret), "=&r" (flag)
929 | : "r" (dest), "r" (1)
930 | : "memory", "cc");
931 |
932 | return ret;
933 | #else
934 | gint32 a, b, c;
935 |
936 | __asm__ __volatile__ ( "0:\n\t"
937 | "ldr %0, [%3]\n\t"
938 | "add %1, %0, %4\n\t"
939 | "swp %2, %1, [%3]\n\t"
940 | "cmp %0, %2\n\t"
941 | "swpne %1, %2, [%3]\n\t"
942 | "bne 0b"
943 | : "=&r" (a), "=&r" (b), "=&r" (c)
944 | : "r" (dest), "r" (-1)
945 | : "cc", "memory");
946 |
947 | return b;
948 | #endif
949 | }
950 |
951 | static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
952 | {
953 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
954 | gint32 ret, flag;
955 | __asm__ __volatile__ (
956 | "1:\n"
957 | "ldrex %0, [%3]\n"
958 | "strex %1, %2, [%3]\n"
959 | "teq %1, #0\n"
960 | "bne 1b\n"
961 | : "=&r" (ret), "=&r" (flag)
962 | : "r" (exch), "r" (dest)
963 | : "memory", "cc");
964 | return ret;
965 | #else
966 | gint32 a;
967 |
968 | __asm__ __volatile__ ( "swp %0, %2, [%1]"
969 | : "=&r" (a)
970 | : "r" (dest), "r" (exch));
971 |
972 | return a;
973 | #endif
974 | }
975 |
976 | static inline gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch)
977 | {
978 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
979 | gpointer ret, flag;
980 | __asm__ __volatile__ (
981 | "1:\n"
982 | "ldrex %0, [%3]\n"
983 | "strex %1, %2, [%3]\n"
984 | "teq %1, #0\n"
985 | "bne 1b\n"
986 | : "=&r" (ret), "=&r" (flag)
987 | : "r" (exch), "r" (dest)
988 | : "memory", "cc");
989 | return ret;
990 | #else
991 | gpointer a;
992 |
993 | __asm__ __volatile__ ( "swp %0, %2, [%1]"
994 | : "=&r" (a)
995 | : "r" (dest), "r" (exch));
996 |
997 | return a;
998 | #endif
999 | }
1000 |
1001 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1002 | {
1003 | #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7__)
1004 | gint32 ret, tmp, flag;
1005 | __asm__ __volatile__ ( "1:\n"
1006 | "ldrex %0, [%3]\n"
1007 | "add %1, %0, %4\n"
1008 | "strex %2, %1, [%3]\n"
1009 | "teq %2, #0\n"
1010 | "bne 1b\n"
1011 | : "=&r" (ret), "=&r" (tmp), "=&r" (flag)
1012 | : "r" (dest), "r" (add)
1013 | : "memory", "cc");
1014 |
1015 | return ret;
1016 | #else
1017 | int a, b, c;
1018 |
1019 | __asm__ __volatile__ ( "0:\n\t"
1020 | "ldr %0, [%3]\n\t"
1021 | "add %1, %0, %4\n\t"
1022 | "swp %2, %1, [%3]\n\t"
1023 | "cmp %0, %2\n\t"
1024 | "swpne %1, %2, [%3]\n\t"
1025 | "bne 0b"
1026 | : "=&r" (a), "=&r" (b), "=&r" (c)
1027 | : "r" (dest), "r" (add)
1028 | : "cc", "memory");
1029 |
1030 | return a;
1031 | #endif
1032 | }
1033 |
1034 | #elif defined(__ia64__)
1035 | #define WAPI_ATOMIC_ASM
1036 |
1037 | #ifdef __INTEL_COMPILER
1038 | #include
1039 | #endif
1040 |
1041 | static inline gint32 InterlockedCompareExchange(gint32 volatile *dest,
1042 | gint32 exch, gint32 comp)
1043 | {
1044 | gint32 old;
1045 | guint64 real_comp;
1046 |
1047 | #ifdef __INTEL_COMPILER
1048 | old = _InterlockedCompareExchange (dest, exch, comp);
1049 | #else
1050 | /* cmpxchg4 zero extends the value read from memory */
1051 | real_comp = (guint64)(guint32)comp;
1052 | asm volatile ("mov ar.ccv = %2 ;;\n\t"
1053 | "cmpxchg4.acq %0 = [%1], %3, ar.ccv\n\t"
1054 | : "=r" (old) : "r" (dest), "r" (real_comp), "r" (exch));
1055 | #endif
1056 |
1057 | return(old);
1058 | }
1059 |
1060 | static inline gpointer InterlockedCompareExchangePointer(gpointer volatile *dest,
1061 | gpointer exch, gpointer comp)
1062 | {
1063 | gpointer old;
1064 |
1065 | #ifdef __INTEL_COMPILER
1066 | old = _InterlockedCompareExchangePointer (dest, exch, comp);
1067 | #else
1068 | asm volatile ("mov ar.ccv = %2 ;;\n\t"
1069 | "cmpxchg8.acq %0 = [%1], %3, ar.ccv\n\t"
1070 | : "=r" (old) : "r" (dest), "r" (comp), "r" (exch));
1071 | #endif
1072 |
1073 | return(old);
1074 | }
1075 |
1076 | static inline gint32 InterlockedIncrement(gint32 volatile *val)
1077 | {
1078 | #ifdef __INTEL_COMPILER
1079 | return _InterlockedIncrement (val);
1080 | #else
1081 | gint32 old;
1082 |
1083 | do {
1084 | old = *val;
1085 | } while (InterlockedCompareExchange (val, old + 1, old) != old);
1086 |
1087 | return old + 1;
1088 | #endif
1089 | }
1090 |
1091 | static inline gint32 InterlockedDecrement(gint32 volatile *val)
1092 | {
1093 | #ifdef __INTEL_COMPILER
1094 | return _InterlockedDecrement (val);
1095 | #else
1096 | gint32 old;
1097 |
1098 | do {
1099 | old = *val;
1100 | } while (InterlockedCompareExchange (val, old - 1, old) != old);
1101 |
1102 | return old - 1;
1103 | #endif
1104 | }
1105 |
1106 | static inline gint32 InterlockedExchange(gint32 volatile *dest, gint32 new_val)
1107 | {
1108 | #ifdef __INTEL_COMPILER
1109 | return _InterlockedExchange (dest, new_val);
1110 | #else
1111 | gint32 res;
1112 |
1113 | do {
1114 | res = *dest;
1115 | } while (InterlockedCompareExchange (dest, new_val, res) != res);
1116 |
1117 | return res;
1118 | #endif
1119 | }
1120 |
1121 | static inline gpointer InterlockedExchangePointer(gpointer volatile *dest, gpointer new_val)
1122 | {
1123 | #ifdef __INTEL_COMPILER
1124 | return (gpointer)_InterlockedExchange64 ((gint64*)dest, (gint64)new_val);
1125 | #else
1126 | gpointer res;
1127 |
1128 | do {
1129 | res = *dest;
1130 | } while (InterlockedCompareExchangePointer (dest, new_val, res) != res);
1131 |
1132 | return res;
1133 | #endif
1134 | }
1135 |
1136 | static inline gint32 InterlockedExchangeAdd(gint32 volatile *val, gint32 add)
1137 | {
1138 | gint32 old;
1139 |
1140 | #ifdef __INTEL_COMPILER
1141 | old = _InterlockedExchangeAdd (val, add);
1142 | #else
1143 | do {
1144 | old = *val;
1145 | } while (InterlockedCompareExchange (val, old + add, old) != old);
1146 |
1147 | return old;
1148 | #endif
1149 | }
1150 |
1151 | #elif defined(__alpha__)
1152 | #define WAPI_ATOMIC_ASM
1153 |
1154 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1155 | gint32 exch, gint32 comp)
1156 | {
1157 | gint32 old, temp, temp2;
1158 | long compq = comp, exchq = exch;
1159 |
1160 | __asm__ __volatile__ (
1161 | "1: ldl_l %2, %0\n"
1162 | " mov %2, %1\n"
1163 | " cmpeq %2, %5, %3\n"
1164 | " cmovne %3, %4, %2\n"
1165 | " stl_c %2, %0\n"
1166 | " beq %2, 1b\n"
1167 | : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
1168 | : "r" (exchq), "r" (compq), "m" (*dest));
1169 | return(old);
1170 | }
1171 |
1172 | static inline gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp)
1173 | {
1174 | gpointer old, temp, temp2;
1175 |
1176 | __asm__ __volatile__ (
1177 | "1: ldq_l %2, %0\n"
1178 | " mov %2, %1\n"
1179 | " cmpeq %2, %5, %3\n"
1180 | " cmovne %3, %4, %2\n"
1181 | " stq_c %2, %0\n"
1182 | " beq %2, 1b\n"
1183 | : "=m" (*dest), "=&r" (old), "=&r" (temp), "=&r" (temp2)
1184 | : "r" (exch), "r" (comp), "m" (*dest));
1185 | return(old);
1186 | }
1187 |
1188 | static inline gint32 InterlockedIncrement(volatile gint32 *val)
1189 | {
1190 | gint32 temp, cur;
1191 |
1192 | __asm__ __volatile__ (
1193 | "1: ldl_l %0, %1\n"
1194 | " addl %0, %3, %0\n"
1195 | " mov %0, %2\n"
1196 | " stl_c %0, %1\n"
1197 | " beq %0, 1b\n"
1198 | : "=&r" (temp), "=m" (*val), "=r" (cur)
1199 | : "Ir" (1), "m" (*val));
1200 | return(cur);
1201 | }
1202 |
1203 | static inline gint32 InterlockedDecrement(volatile gint32 *val)
1204 | {
1205 | gint32 temp, cur;
1206 |
1207 | __asm__ __volatile__ (
1208 | "1: ldl_l %0, %1\n"
1209 | " subl %0, %3, %0\n"
1210 | " mov %0, %2\n"
1211 | " stl_c %0, %1\n"
1212 | " beq %0, 1b\n"
1213 | : "=&r" (temp), "=m" (*val), "=r" (cur)
1214 | : "Ir" (1), "m" (*val));
1215 | return(cur);
1216 | }
1217 |
1218 | static inline gint32 InterlockedExchange(volatile gint32 *val, gint32 new_val)
1219 | {
1220 | gint32 ret, temp;
1221 |
1222 | __asm__ __volatile__ (
1223 | "1: ldl_l %1, %0\n"
1224 | " mov %3, %2\n"
1225 | " stl_c %2, %0\n"
1226 | " beq %2, 1b\n"
1227 | : "=m" (*val), "=&r" (ret), "=&r" (temp)
1228 | : "r" (new_val), "m" (*val));
1229 | return(ret);
1230 | }
1231 |
1232 | static inline gpointer InterlockedExchangePointer(volatile gpointer *val, gpointer new_val)
1233 | {
1234 | gpointer ret, temp;
1235 |
1236 | __asm__ __volatile__ (
1237 | "1: ldq_l %1, %0\n"
1238 | " mov %3, %2\n"
1239 | " stq_c %2, %0\n"
1240 | " beq %2, 1b\n"
1241 | : "=m" (*val), "=&r" (ret), "=&r" (temp)
1242 | : "r" (new_val), "m" (*val));
1243 | return(ret);
1244 | }
1245 |
1246 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *val, gint32 add)
1247 | {
1248 | gint32 ret, temp;
1249 |
1250 | __asm__ __volatile__ (
1251 | "1: ldl_l %2, %0\n"
1252 | " mov %2, %1\n"
1253 | " addl %2, %3, %2\n"
1254 | " stl_c %2, %0\n"
1255 | " beq %2, 1b\n"
1256 | : "=m" (*val), "=&r" (ret), "=&r" (temp)
1257 | : "r" (add), "m" (*val));
1258 |
1259 | return(ret);
1260 | }
1261 |
1262 | #elif defined(__mips__)
1263 | #define WAPI_ATOMIC_ASM
1264 |
1265 | static inline gint32 InterlockedIncrement(volatile gint32 *val)
1266 | {
1267 | gint32 tmp, result = 0;
1268 |
1269 | __asm__ __volatile__ (" .set mips32\n"
1270 | "1: ll %0, %2\n"
1271 | " addu %1, %0, 1\n"
1272 | " sc %1, %2\n"
1273 | " beqz %1, 1b\n"
1274 | " .set mips0\n"
1275 | : "=&r" (result), "=&r" (tmp), "=m" (*val)
1276 | : "m" (*val));
1277 | return result + 1;
1278 | }
1279 |
1280 | static inline gint32 InterlockedDecrement(volatile gint32 *val)
1281 | {
1282 | gint32 tmp, result = 0;
1283 |
1284 | __asm__ __volatile__ (" .set mips32\n"
1285 | "1: ll %0, %2\n"
1286 | " subu %1, %0, 1\n"
1287 | " sc %1, %2\n"
1288 | " beqz %1, 1b\n"
1289 | " .set mips0\n"
1290 | : "=&r" (result), "=&r" (tmp), "=m" (*val)
1291 | : "m" (*val));
1292 | return result - 1;
1293 | }
1294 |
1295 | #define InterlockedCompareExchangePointer(dest,exch,comp) InterlockedCompareExchange((volatile gint32 *)(dest), (gint32)(exch), (gint32)(comp))
1296 |
1297 | static inline gint32 InterlockedCompareExchange(volatile gint32 *dest,
1298 | gint32 exch, gint32 comp) {
1299 | gint32 old, tmp;
1300 |
1301 | __asm__ __volatile__ (" .set mips32\n"
1302 | "1: ll %0, %2\n"
1303 | " bne %0, %5, 2f\n"
1304 | " move %1, %4\n"
1305 | " sc %1, %2\n"
1306 | " beqz %1, 1b\n"
1307 | "2: .set mips0\n"
1308 | : "=&r" (old), "=&r" (tmp), "=m" (*dest)
1309 | : "m" (*dest), "r" (exch), "r" (comp));
1310 | return(old);
1311 | }
1312 |
1313 | static inline gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch)
1314 | {
1315 | gint32 result, tmp;
1316 |
1317 | __asm__ __volatile__ (" .set mips32\n"
1318 | "1: ll %0, %2\n"
1319 | " move %1, %4\n"
1320 | " sc %1, %2\n"
1321 | " beqz %1, 1b\n"
1322 | " .set mips0\n"
1323 | : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1324 | : "m" (*dest), "r" (exch));
1325 | return(result);
1326 | }
1327 | #define InterlockedExchangePointer(dest,exch) InterlockedExchange((volatile gint32 *)(dest), (gint32)(exch))
1328 |
1329 | static inline gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add)
1330 | {
1331 | gint32 result, tmp;
1332 |
1333 | __asm__ __volatile__ (" .set mips32\n"
1334 | "1: ll %0, %2\n"
1335 | " addu %1, %0, %4\n"
1336 | " sc %1, %2\n"
1337 | " beqz %1, 1b\n"
1338 | " .set mips0\n"
1339 | : "=&r" (result), "=&r" (tmp), "=m" (*dest)
1340 | : "m" (*dest), "r" (add));
1341 | return result;
1342 | }
1343 |
1344 | #else
1345 |
1346 | extern gint32 InterlockedCompareExchange(volatile gint32 *dest, gint32 exch, gint32 comp);
1347 | extern gpointer InterlockedCompareExchangePointer(volatile gpointer *dest, gpointer exch, gpointer comp);
1348 | extern gint32 InterlockedIncrement(volatile gint32 *dest);
1349 | extern gint32 InterlockedDecrement(volatile gint32 *dest);
1350 | extern gint32 InterlockedExchange(volatile gint32 *dest, gint32 exch);
1351 | extern gpointer InterlockedExchangePointer(volatile gpointer *dest, gpointer exch);
1352 | extern gint32 InterlockedExchangeAdd(volatile gint32 *dest, gint32 add);
1353 |
1354 | #if defined(__hppa__)
1355 | #define WAPI_ATOMIC_ASM
1356 | #endif
1357 |
1358 | #endif
1359 |
1360 | #endif
1361 |
1362 | #endif /* _WAPI_ATOMIC_H_ */
1363 |
--------------------------------------------------------------------------------