├── API_CHANGES ├── INTERNAL_CHANGES ├── Makefile ├── README ├── regex_src ├── regexJIT.c ├── regexJIT.h └── regexMain.c ├── sljit_src ├── sljitConfig.h ├── sljitConfigInternal.h ├── sljitExecAllocator.c ├── sljitLir.c ├── sljitLir.h ├── sljitNativeARM_32.c ├── sljitNativeARM_64.c ├── sljitNativeARM_T2_32.c ├── sljitNativeMIPS_32.c ├── sljitNativeMIPS_64.c ├── sljitNativeMIPS_common.c ├── sljitNativePPC_32.c ├── sljitNativePPC_64.c ├── sljitNativePPC_common.c ├── sljitNativeSPARC_32.c ├── sljitNativeSPARC_common.c ├── sljitNativeTILEGX-encoder.c ├── sljitNativeTILEGX_64.c ├── sljitNativeX86_32.c ├── sljitNativeX86_64.c ├── sljitNativeX86_common.c └── sljitUtils.c └── test_src ├── sljitMain.c └── sljitTest.c /API_CHANGES: -------------------------------------------------------------------------------- 1 | This file is the short summary of the API changes: 2 | 3 | 19.09.2014 - Non-backward compatible 4 | Using I, D, S prefixes in conditional and floating 5 | point operations. And an L prefix to long multiplication 6 | and division (op0 opcodes). 7 | 8 | 11.08.2014 - Non-backward compatible 9 | A currently unused options parameter is added to sljit_emit_enter 10 | and sljit_set_context. 11 | 12 | 06.07.2014 - Non-backward compatible 13 | SCRATCH registers are renamed to Rx and SAVED registers 14 | are renamed to Sx. See the explanation of these registers 15 | in sljitLir.h. 16 | 17 | 31.05.2014 - Non-backward compatible 18 | SLJIT_TEMPORARY_EREGx registers were not renamed to 19 | SLJIT_SCRATCH_EREGx when the change was done on 08.11.2012 20 | 21 | 05.03.2014 - Backward compatible 22 | The sljit_set_target now supports those jumps, which 23 | does not created with SLJIT_REWRITABLE_JUMP flag. 24 | Reason: sljit_emit_ijump does not support conditional 25 | jumps. 26 | 27 | 03.03.2014 - Non-backward compatible 28 | SLJIT_MOV_UI cannot be combined with SLJIT_INT_OP. 29 | Reason: SLJIT_INT_OP flag is not recommended to use 30 | directly, and SLJIT_IMOV has no sign bit. 31 | 32 | 29.01.2014 - Backward compatible 33 | Bits assigned to SLJIT_MEM and SLJIT_IMM flags are changed. 34 | Reason: the most common cases are fits into one byte now, 35 | and more registers can be supported in the future. 36 | 37 | 08.11.2012 - Non-backward compatible 38 | SLJIT_TEMPORARY_REGx registers are renamed to SLJIT_SCRATCH_REGx. 39 | 40 | 07.11.2012 - Non-backward compatible 41 | sljit_emit_cond_value is renamed to sljit_emit_op_flags. An 42 | extra source argument is added which will be used in the future. 43 | 44 | 05.11.2012 - Backward compatible 45 | sljit_emit_cond_value now supports SLJIT_AND and SLJIT_INT_OP 46 | flags, which makes this function complete. 47 | 48 | 01.11.2012 - Non-backward compatible 49 | SLJIT_F* opcodes are renamed to SLJIT_*D to show that 50 | they are double precision operators. Furthermore 51 | SLJIT_*S single precision opcodes are added. 52 | 53 | 01.11.2012 - Non-backward compatible 54 | Register arguments of operations with SLJIT_INT_OP flag 55 | must be computed by another operation with SLJIT_INT_OP flag. 56 | The same way as SLJIT_SINGLE_OP flag works with floating point 57 | numbers. See the description of SLJIT_INT_OP. 58 | 59 | 01.11.2012 - Backward compatible 60 | All operations whose support the SLJIT_INT_OP flag, have an 61 | alternate name now, which includes the SLJIT_INT_OP. These 62 | names starting with I. 63 | 64 | 31.10.2012 - Non-backward compatible 65 | Renaming sljit_w to sljit_sw, sljit_i to sljit_si, sljit_h 66 | to sljit_sh, and sljit_b to sljit_sb. Reason: their sign 67 | bit is part of the type now. 68 | 69 | 20.10.2012 - Non-backward compatible 70 | Renaming SLJIT_C_FLOAT_NAN to SLJIT_C_FLOAT_UNORDERED. 71 | Reason: all architectures call these unordered comparions. 72 | -------------------------------------------------------------------------------- /INTERNAL_CHANGES: -------------------------------------------------------------------------------- 1 | This file is the short summary of the internal changes: 2 | 3 | 18.11.2012 4 | Switching from stdcall to cdecl on x86-32. Fastcall is still the default 5 | on GCC and MSVC. Now Intel C compilers are supported. 6 | 7 | 20.10.2012 8 | Supporting Sparc-32 CPUs. 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ifdef CROSS_COMPILER 2 | CC = $(CROSS_COMPILER) 3 | else 4 | ifndef CC 5 | # default compier 6 | CC = gcc 7 | endif 8 | endif 9 | 10 | ifndef EXTRA_CPPFLAGS 11 | EXTRA_CPPFLAGS= 12 | endif 13 | 14 | ifndef EXTRA_LDFLAGS 15 | EXTRA_LDFLAGS= 16 | endif 17 | 18 | CPPFLAGS = $(EXTRA_CPPFLAGS) -DSLJIT_CONFIG_AUTO=1 -Isljit_src 19 | CFLAGS += -O2 -Wall 20 | REGEX_CFLAGS += $(CFLAGS) -fshort-wchar 21 | LDFLAGS = $(EXTRA_LDFLAGS) 22 | 23 | TARGET = sljit_test regex_test 24 | 25 | BINDIR = bin 26 | SRCDIR = sljit_src 27 | TESTDIR = test_src 28 | REGEXDIR = regex_src 29 | 30 | SLJIT_HEADERS = $(SRCDIR)/sljitLir.h $(SRCDIR)/sljitConfig.h $(SRCDIR)/sljitConfigInternal.h 31 | 32 | SLJIT_LIR_FILES = $(SRCDIR)/sljitLir.c $(SRCDIR)/sljitExecAllocator.c $(SRCDIR)/sljitUtils.c \ 33 | $(SRCDIR)/sljitNativeARM_32.c $(SRCDIR)/sljitNativeARM_T2_32.c $(SRCDIR)/sljitNativeARM_64.c \ 34 | $(SRCDIR)/sljitNativeMIPS_common.c $(SRCDIR)/sljitNativeMIPS_32.c $(SRCDIR)/sljitNativeMIPS_64.c \ 35 | $(SRCDIR)/sljitNativePPC_common.c $(SRCDIR)/sljitNativePPC_32.c $(SRCDIR)/sljitNativePPC_64.c \ 36 | $(SRCDIR)/sljitNativeSPARC_common.c $(SRCDIR)/sljitNativeSPARC_32.c \ 37 | $(SRCDIR)/sljitNativeTILEGX_64.c \ 38 | $(SRCDIR)/sljitNativeX86_common.c $(SRCDIR)/sljitNativeX86_32.c $(SRCDIR)/sljitNativeX86_64.c 39 | 40 | all: $(BINDIR) $(TARGET) 41 | 42 | $(BINDIR) : 43 | mkdir $(BINDIR) 44 | 45 | $(BINDIR)/sljitLir.o : $(BINDIR) $(SLJIT_LIR_FILES) $(SLJIT_HEADERS) 46 | $(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $(SRCDIR)/sljitLir.c 47 | 48 | $(BINDIR)/sljitMain.o : $(TESTDIR)/sljitMain.c $(BINDIR) $(SLJIT_HEADERS) 49 | $(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $(TESTDIR)/sljitMain.c 50 | 51 | $(BINDIR)/sljitTest.o : $(TESTDIR)/sljitTest.c $(BINDIR) $(SLJIT_HEADERS) 52 | $(CC) $(CPPFLAGS) $(CFLAGS) -c -o $@ $(TESTDIR)/sljitTest.c 53 | 54 | $(BINDIR)/regexMain.o : $(REGEXDIR)/regexMain.c $(BINDIR) $(SLJIT_HEADERS) 55 | $(CC) $(CPPFLAGS) $(CFLAGS) $(REGEX_CFLAGS) -c -o $@ $(REGEXDIR)/regexMain.c 56 | 57 | $(BINDIR)/regexJIT.o : $(REGEXDIR)/regexJIT.c $(BINDIR) $(SLJIT_HEADERS) $(REGEXDIR)/regexJIT.h 58 | $(CC) $(CPPFLAGS) $(CFLAGS) $(REGEX_CFLAGS) -c -o $@ $(REGEXDIR)/regexJIT.c 59 | 60 | clean: 61 | rm -f $(BINDIR)/*.o $(BINDIR)/sljit_test $(BINDIR)/regex_test 62 | 63 | sljit_test: $(BINDIR)/sljitMain.o $(BINDIR)/sljitTest.o $(BINDIR)/sljitLir.o 64 | $(CC) $(CFLAGS) $(LDFLAGS) $(BINDIR)/sljitMain.o $(BINDIR)/sljitTest.o $(BINDIR)/sljitLir.o -o $(BINDIR)/$@ -lm -lpthread 65 | 66 | regex_test: $(BINDIR)/regexMain.o $(BINDIR)/regexJIT.o $(BINDIR)/sljitLir.o 67 | $(CC) $(CFLAGS) $(LDFLAGS) $(BINDIR)/regexMain.o $(BINDIR)/regexJIT.o $(BINDIR)/sljitLir.o -o $(BINDIR)/$@ -lm -lpthread 68 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 |  2 | SLJIT - Stack Less JIT Compiler 3 | 4 | Purpose: 5 | A simple, machine independent JIT compiler, which suitable for 6 | translating interpreted byte code to machine code. The sljitLir.h 7 | describes the LIR (low-level intermediate representation) of SLJIT. 8 | 9 | Compatible: 10 | Any modern C (C++) compiler. At least I hope so. 11 | 12 | Using sljit: 13 | Copy the content of sljit_src directory into your project source directory. 14 | Add sljitLir.c source file to your build environment. All other files are 15 | included by sljitLir.c (if required). Define the machine by SLJIT_CONFIG_* 16 | selector. See sljitConfig.h for all possible values. For C++ compilers, 17 | rename sljitLir.c to sljitLir.cpp. 18 | 19 | More info: 20 | http://sljit.sourceforge.net/ 21 | 22 | Contact: 23 | hzmester@freemail.hu 24 | 25 | Special thanks: 26 | Alexander Nasonov 27 | Daniel Richard G. 28 | Giuseppe D'Angelo 29 | Jiong Wang (TileGX support) 30 | -------------------------------------------------------------------------------- /regex_src/regexJIT.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2010 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #ifndef _REGEX_JIT_H_ 28 | #define _REGEX_JIT_H_ 29 | 30 | /* Character type config. */ 31 | #define REGEX_USE_8BIT_CHARS 32 | 33 | #ifdef REGEX_USE_8BIT_CHARS 34 | typedef char regex_char_t; 35 | #else 36 | typedef wchar_t regex_char_t; 37 | #endif 38 | 39 | /* Error codes. */ 40 | #define REGEX_NO_ERROR 0 41 | #define REGEX_MEMORY_ERROR 1 42 | #define REGEX_INVALID_REGEX 2 43 | 44 | /* Note: large, nested {a,b} iterations can blow up the memory consumption 45 | a{n,m} is replaced by aa...aaa?a?a?a?a? (n >= 0, m > 0) 46 | \__n__/\____m___/ 47 | a{n,} is replaced by aa...aaa+ (n > 0) 48 | \_n-1_/ 49 | */ 50 | 51 | /* The value returned by regex_compile. Can be used for multiple matching. */ 52 | struct regex_machine; 53 | 54 | /* A matching state. */ 55 | struct regex_match; 56 | 57 | /* Note: REGEX_MATCH_BEGIN and REGEX_MATCH_END does not change the parsing 58 | (Hence ^ and $ are parsed normally). 59 | Force matching to start from begining of the string (same as ^). */ 60 | #define REGEX_MATCH_BEGIN 0x01 61 | /* Force matching to continue until the last character (same as $). */ 62 | #define REGEX_MATCH_END 0x02 63 | /* Changes . to [^\r\n] 64 | Note: [...] and [^...] are NOT affected at all (as other regex engines do). */ 65 | #define REGEX_NEWLINE 0x04 66 | /* Non greedy matching. In case of Thompson (non-recursive) algorithm, 67 | it (usually) does not have a significant speed gain. */ 68 | #define REGEX_MATCH_NON_GREEDY 0x08 69 | /* Verbose. This define can be commented out, which disables all verbose features. */ 70 | #define REGEX_MATCH_VERBOSE 0x10 71 | 72 | /* If error occures the function returns NULL, and the error code returned in error variable. 73 | You can pass NULL to error if you don't care about the error code. 74 | The re_flags argument contains the default REGEX_MATCH flags. See above. */ 75 | struct regex_machine* regex_compile(const regex_char_t *regex_string, int length, int re_flags, int *error); 76 | void regex_free_machine(struct regex_machine *machine); 77 | 78 | /* Create and init match structure for a given machine. */ 79 | struct regex_match* regex_begin_match(struct regex_machine *machine); 80 | void regex_reset_match(struct regex_match *match); 81 | void regex_free_match(struct regex_match *match); 82 | 83 | /* Pattern matching. 84 | regex_continue_match does not support REGEX_MATCH_VERBOSE flag. */ 85 | void regex_continue_match(struct regex_match *match, const regex_char_t *input_string, int length); 86 | int regex_get_result(struct regex_match *match, int *end, int *id); 87 | /* Returns true, if the best match has already found. */ 88 | int regex_is_match_finished(struct regex_match *match); 89 | 90 | /* Only exists if VERBOSE is defined in regexJIT.c 91 | Do both sanity check and verbose. 92 | (The latter only if REGEX_MATCH_VERBOSE was passed to regex_compile) */ 93 | void regex_continue_match_debug(struct regex_match *match, const regex_char_t *input_string, int length); 94 | 95 | /* Misc. */ 96 | const char* regex_get_platform_name(void); 97 | 98 | #endif 99 | -------------------------------------------------------------------------------- /regex_src/regexMain.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2010 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* Must be the first one. Must not depend on any other include. */ 28 | #include "regexJIT.h" 29 | 30 | #include 31 | 32 | #if defined _WIN32 || defined _WIN64 33 | #define COLOR_RED 34 | #define COLOR_GREEN 35 | #define COLOR_ARCH 36 | #define COLOR_DEFAULT 37 | #else 38 | #define COLOR_RED "\33[31m" 39 | #define COLOR_GREEN "\33[32m" 40 | #define COLOR_ARCH "\33[33m" 41 | #define COLOR_DEFAULT "\33[0m" 42 | #endif 43 | 44 | #ifdef REGEX_USE_8BIT_CHARS 45 | #define S(str) str 46 | #else 47 | #define S(str) L##str 48 | #endif 49 | 50 | #ifdef REGEX_MATCH_VERBOSE 51 | void verbose_test(regex_char_t *pattern, regex_char_t *string) 52 | { 53 | int error; 54 | regex_char_t *ptr; 55 | struct regex_machine* machine; 56 | struct regex_match* match; 57 | int begin, end, id; 58 | 59 | ptr = pattern; 60 | while (*ptr) 61 | ptr++; 62 | 63 | printf("Start test '%s' matches to '%s'\n", pattern, string); 64 | machine = regex_compile(pattern, ptr - pattern, REGEX_MATCH_VERBOSE | REGEX_NEWLINE, &error); 65 | 66 | if (error) { 67 | printf("WARNING: Error %d\n", error); 68 | return; 69 | } 70 | if (!machine) { 71 | printf("ERROR: machine must be exists. Report this bug, please\n"); 72 | return; 73 | } 74 | 75 | match = regex_begin_match(machine); 76 | if (!match) { 77 | printf("WARNING: Not enough memory for matching\n"); 78 | regex_free_machine(machine); 79 | return; 80 | } 81 | 82 | ptr = string; 83 | while (*ptr) 84 | ptr++; 85 | 86 | regex_continue_match_debug(match, string, ptr - string); 87 | 88 | begin = regex_get_result(match, &end, &id); 89 | printf("Math returns: %3d->%3d [%3d]\n", begin, end, id); 90 | 91 | regex_free_match(match); 92 | regex_free_machine(machine); 93 | } 94 | #endif 95 | 96 | struct test_case { 97 | int begin; /* Expected begin. */ 98 | int end; /* Expected end. */ 99 | int id; /* Expected id. */ 100 | int finished; /* -1 : don't care, 0 : false, 1 : true. */ 101 | int flags; /* REGEX_MATCH_* */ 102 | const regex_char_t *pattern; /* NULL : use the previous pattern. */ 103 | const regex_char_t *string; /* NULL : end of tests. */ 104 | }; 105 | 106 | void run_tests(struct test_case* test, int verbose, int silent) 107 | { 108 | int error; 109 | const regex_char_t *ptr; 110 | struct regex_machine* machine = NULL; 111 | struct regex_match* match; 112 | int begin, end, id, finished; 113 | int success = 0, fail = 0; 114 | 115 | if (!verbose && !silent) 116 | printf("Pass -v to enable verbose, -s to disable this hint.\n\n"); 117 | 118 | for ( ; test->string ; test++) { 119 | if (verbose) 120 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 121 | fail++; 122 | 123 | if (test->pattern) { 124 | if (machine) 125 | regex_free_machine(machine); 126 | 127 | ptr = test->pattern; 128 | while (*ptr) 129 | ptr++; 130 | 131 | machine = regex_compile(test->pattern, ptr - test->pattern, test->flags, &error); 132 | 133 | if (error) { 134 | if (!verbose) 135 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 136 | printf("ABORT: Error %d\n", error); 137 | return; 138 | } 139 | if (!machine) { 140 | if (!verbose) 141 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 142 | printf("ABORT: machine must be exists. Report this bug, please\n"); 143 | return; 144 | } 145 | } 146 | else if (test->flags != 0) { 147 | if (!verbose) 148 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 149 | printf("ABORT: flag must be 0 if no pattern\n"); 150 | return; 151 | } 152 | 153 | ptr = test->string; 154 | while (*ptr) 155 | ptr++; 156 | 157 | match = regex_begin_match(machine); 158 | #ifdef REGEX_MATCH_VERBOSE 159 | if (!match) { 160 | if (!verbose) 161 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 162 | printf("ABORT: Not enough memory for matching\n"); 163 | regex_free_machine(machine); 164 | return; 165 | } 166 | regex_continue_match_debug(match, test->string, ptr - test->string); 167 | begin = regex_get_result(match, &end, &id); 168 | finished = regex_is_match_finished(match); 169 | 170 | if (begin != test->begin || end != test->end || id != test->id) { 171 | if (!verbose) 172 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 173 | printf("FAIL A: begin: %d != %d || end: %d != %d || id: %d != %d\n", test->begin, begin, test->end, end, test->id, id); 174 | continue; 175 | } 176 | if (test->finished != -1 && test->finished != !!finished) { 177 | if (!verbose) 178 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 179 | printf("FAIL A: finish check\n"); 180 | continue; 181 | } 182 | #endif 183 | 184 | regex_reset_match(match); 185 | regex_continue_match(match, test->string, ptr - test->string); 186 | begin = regex_get_result(match, &end, &id); 187 | finished = regex_is_match_finished(match); 188 | regex_free_match(match); 189 | 190 | if (begin != test->begin || end != test->end || id != test->id) { 191 | if (!verbose) 192 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 193 | printf("FAIL B: begin: %d != %d || end: %d != %d || id: %d != %d\n", test->begin, begin, test->end, end, test->id, id); 194 | continue; 195 | } 196 | if (test->finished != -1 && test->finished != !!finished) { 197 | if (!verbose) 198 | printf("test: '%s' '%s': ", test->pattern ? test->pattern : "[[REUSE]]", test->string); 199 | printf("FAIL B: finish check\n"); 200 | continue; 201 | } 202 | 203 | if (verbose) 204 | printf("SUCCESS\n"); 205 | fail--; 206 | success++; 207 | } 208 | if (machine) 209 | regex_free_machine(machine); 210 | 211 | printf("REGEX tests: "); 212 | if (fail == 0) 213 | printf("all tests are " COLOR_GREEN "PASSED" COLOR_DEFAULT " "); 214 | else 215 | printf(COLOR_RED "%d" COLOR_DEFAULT " (" COLOR_RED "%d%%" COLOR_DEFAULT ") tests are failed ", fail, fail * 100 / (success + fail)); 216 | printf("on " COLOR_ARCH "%s" COLOR_DEFAULT "\n", regex_get_platform_name()); 217 | } 218 | 219 | /* Testing. */ 220 | 221 | static struct test_case tests[] = { 222 | { 3, 7, 0, -1, 0, 223 | S("text"), S("is textile") }, 224 | { 0, 10, 0, -1, 0, 225 | S("^(ab|c)*?d+(es)?"), S("abccabddeses") }, 226 | { -1, 0, 0, 1, 0, 227 | S("^a+"), S("saaaa") }, 228 | { 3, 6, 0, 0, 0, 229 | S("(a+|b+)$"), S("saabbb") }, 230 | { 1, 6, 0, 0, 0, 231 | S("(a+|b+){,2}$"), S("saabbb") }, 232 | { 1, 6, 0, 1, 0, 233 | S("(abcde|bc)(a+*|(b|c){2}+){0}"), S("babcdeaaaaaaaa") }, 234 | { 1, 6, 0, 1, 0, 235 | S("(abc(aa)?|(cab+){2})"), S("cabcaa") }, 236 | { -1, 0, 0, 1, 0, 237 | S("^(abc(aa)?|(cab+){2})$"), S("cabcaa") }, 238 | { 0, 3, 1, -1, 0, 239 | S("^(ab{001!})?c"), S("abcde") }, 240 | { 1, 15, 2, -1, 0, 241 | S("(c?(a|bb{2!}){2,3}()+d){2,3}"), S("ccabbadbbadcaadcaad") }, 242 | { 2, 9, 0, -1, 0, 243 | NULL, S("cacaadaadaa") }, 244 | { -1, 0, 0, -1, REGEX_MATCH_BEGIN, 245 | S("(((ab?c|d{1})))"), S("ad") }, 246 | { 0, 9, 3, -1, REGEX_MATCH_BEGIN, 247 | S("^((a{1!}|b{2!}|c{3!}){3,6}d)+"), S("cabadbacddaa") }, 248 | { 1, 6, 0, 0, REGEX_MATCH_END, 249 | S("(a+(bb|cc?)?){4,}"), S("maaaac") }, 250 | { 3, 12, 1, 0, REGEX_MATCH_END, 251 | S("(x+x+{02,03}(x+|{1!})){03,06}$"), S("aaaxxxxxxxxx") }, 252 | { 1, 2, 3, -1, 0, 253 | S("((c{1!})?|x+{2!}|{3!})(a|c)"), S("scs") }, 254 | { 1, 4, 2, 1, 0, 255 | NULL, S("sxxaxxxaccacca") }, 256 | { 0, 2, 1, 1, 0, 257 | NULL, S("ccdcdcdddddcdccccd") }, 258 | { 0, 3, 0, -1, REGEX_MATCH_NON_GREEDY, 259 | S("^a+a+a+"), S("aaaaaa") }, 260 | { 2, 5, 0, -1, REGEX_MATCH_NON_GREEDY, 261 | S("a+a+a+"), S("bbaaaaaa") }, 262 | { 1, 4, 0, 1, 0, 263 | S("baa|a+"), S("sbaaaaaa") }, 264 | { 0, 6, 0, 1, 0, 265 | S("baaa|baa|sbaaaa"), S("sbaaaaa") }, 266 | { 1, 4, 0, 1, REGEX_MATCH_NON_GREEDY, 267 | S("baaa|baa"), S("xbaaa") }, 268 | { 0, 0, 3, 1, 0, 269 | S("{3!}"), S("xx") }, 270 | { 0, 0, 1, 1, 0, 271 | S("{1!}(a{2!})*"), S("xx") }, 272 | { 0, 2, 2, 0, 0, 273 | NULL, S("aa") }, 274 | { 0, 0, 1, 1, REGEX_MATCH_NON_GREEDY, 275 | S("{1!}(a{2!})*"), S("aaxx") }, 276 | { 4, 12, 0, 1, 0, 277 | S("(.[]-]){3}[^]-]{2}"), S("ax-xs-[][]lmn") }, 278 | { 3, 7, 1, 1, 0, 279 | S("([ABC]|[abc]{1!}){3,5}"), S("AbSAabbx") }, 280 | { 0, 8, 3, 0, 0, 281 | S("^[x\\-y[\\]]+([[\\]]{3!})*$"), S("x-y[-][]") }, 282 | { 0, 9, 0, 0, 0, 283 | NULL, S("x-y[-][]x") }, 284 | { 2, 8, 0, 1, 0, 285 | S("<(/{1!})?[^>]+>"), S(" ") }, 286 | { 2, 9, 1, 1, 0, 287 | NULL, S(" ") }, 288 | { 2, 9, 0, 1, 0, 289 | S("[A-Z0-9a-z]+"), S("[(Iden9aA)]") }, 290 | { 1, 4, 0, 1, 0, 291 | S("[^x-y]+[a-c_]{2,3}"), S("x_a_y") }, 292 | { 4, 11, 0, 0, 0, 293 | NULL, S("ssaymmaa_ccl") }, 294 | { 3, 6, 0, 1, REGEX_NEWLINE, 295 | S(".a[^k]"), S("\na\nxa\ns") }, 296 | { 0, 2, 0, 1, REGEX_NEWLINE, 297 | S("^a+"), S("aa\n") }, 298 | { 1, 4, 0, 1, 0 /* =REGEX_NEWLINE */, 299 | NULL, S("\naaa\n") }, 300 | { 2, 3, 0, 1, 0 /* =REGEX_NEWLINE */, 301 | NULL, S("\n\na\n") }, 302 | { 0, 2, 0, 1, REGEX_NEWLINE, 303 | S("a+$"), S("aa\n") }, 304 | { 0, 3, 0, 0, 0 /* =REGEX_NEWLINE */, 305 | NULL, S("aaa") }, 306 | { 2, 4, 1, 1, REGEX_NEWLINE, 307 | S("^a(a{1!})*$"), S("\n\naa\n\n") }, 308 | { 0, 1, 0, 0, 0 /* REGEX_NEWLINE */, 309 | NULL, S("a") }, 310 | { -1, 0, 0, -1, 0 /* REGEX_NEWLINE */, 311 | NULL, S("ab\nba") }, 312 | { -1, 0, 0, 0, 0, 313 | NULL, NULL } 314 | }; 315 | 316 | int main(int argc, char* argv[]) 317 | { 318 | int has_arg = (argc >= 2 && argv[1][0] == '-' && argv[1][2] == '\0'); 319 | 320 | /* verbose_test("a((b)((c|d))|)c|"); */ 321 | /* verbose_test("Xa{009,0010}Xb{,7}Xc{5,}Xd{,}Xe{1,}Xf{,1}X"); */ 322 | /* verbose_test("{3!}({3})({0!}){,"); */ 323 | /* verbose_test("(s(ab){2,4}t){2,}*S(a*(b)(c()|)d+){3,4}{0,0}*M"); */ 324 | /* verbose_test("^a({2!})*b+(a|{1!}b)+d$"); */ 325 | /* verbose_test("((a|b|c)*(xy)+)+", "asbcxyxy"); */ 326 | 327 | run_tests(tests, has_arg && argv[1][1] == 'v', has_arg && argv[1][1] == 's'); 328 | return 0; 329 | } 330 | -------------------------------------------------------------------------------- /sljit_src/sljitConfig.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #ifndef _SLJIT_CONFIG_H_ 28 | #define _SLJIT_CONFIG_H_ 29 | 30 | /* --------------------------------------------------------------------- */ 31 | /* Custom defines */ 32 | /* --------------------------------------------------------------------- */ 33 | 34 | /* Put your custom defines here. This empty section will never change 35 | which helps maintaining patches (with diff / patch utilities). */ 36 | 37 | /* --------------------------------------------------------------------- */ 38 | /* Architecture */ 39 | /* --------------------------------------------------------------------- */ 40 | 41 | /* Architecture selection. */ 42 | /* #define SLJIT_CONFIG_X86_32 1 */ 43 | /* #define SLJIT_CONFIG_X86_64 1 */ 44 | /* #define SLJIT_CONFIG_ARM_V5 1 */ 45 | /* #define SLJIT_CONFIG_ARM_V7 1 */ 46 | /* #define SLJIT_CONFIG_ARM_THUMB2 1 */ 47 | /* #define SLJIT_CONFIG_ARM_64 1 */ 48 | /* #define SLJIT_CONFIG_PPC_32 1 */ 49 | /* #define SLJIT_CONFIG_PPC_64 1 */ 50 | /* #define SLJIT_CONFIG_MIPS_32 1 */ 51 | /* #define SLJIT_CONFIG_MIPS_64 1 */ 52 | /* #define SLJIT_CONFIG_SPARC_32 1 */ 53 | /* #define SLJIT_CONFIG_TILEGX 1 */ 54 | 55 | /* #define SLJIT_CONFIG_AUTO 1 */ 56 | /* #define SLJIT_CONFIG_UNSUPPORTED 1 */ 57 | 58 | /* --------------------------------------------------------------------- */ 59 | /* Utilities */ 60 | /* --------------------------------------------------------------------- */ 61 | 62 | /* Useful for thread-safe compiling of global functions. */ 63 | #ifndef SLJIT_UTIL_GLOBAL_LOCK 64 | /* Enabled by default */ 65 | #define SLJIT_UTIL_GLOBAL_LOCK 1 66 | #endif 67 | 68 | /* Implements a stack like data structure (by using mmap / VirtualAlloc). */ 69 | #ifndef SLJIT_UTIL_STACK 70 | /* Enabled by default */ 71 | #define SLJIT_UTIL_STACK 1 72 | #endif 73 | 74 | /* Single threaded application. Does not require any locks. */ 75 | #ifndef SLJIT_SINGLE_THREADED 76 | /* Disabled by default. */ 77 | #define SLJIT_SINGLE_THREADED 0 78 | #endif 79 | 80 | /* --------------------------------------------------------------------- */ 81 | /* Configuration */ 82 | /* --------------------------------------------------------------------- */ 83 | 84 | /* Executable code allocation: 85 | If SLJIT_EXECUTABLE_ALLOCATOR is not defined, the application should 86 | define both SLJIT_MALLOC_EXEC and SLJIT_FREE_EXEC. */ 87 | #ifndef SLJIT_EXECUTABLE_ALLOCATOR 88 | /* Enabled by default. */ 89 | #define SLJIT_EXECUTABLE_ALLOCATOR 1 90 | #endif 91 | 92 | /* Return with error when an invalid argument is passed. */ 93 | #ifndef SLJIT_ARGUMENT_CHECKS 94 | /* Disabled by default */ 95 | #define SLJIT_ARGUMENT_CHECKS 0 96 | #endif 97 | 98 | /* Debug checks (assertions, etc.). */ 99 | #ifndef SLJIT_DEBUG 100 | /* Enabled by default */ 101 | #define SLJIT_DEBUG 1 102 | #endif 103 | 104 | /* Verbose operations. */ 105 | #ifndef SLJIT_VERBOSE 106 | /* Enabled by default */ 107 | #define SLJIT_VERBOSE 1 108 | #endif 109 | 110 | /* 111 | SLJIT_IS_FPU_AVAILABLE 112 | The availability of the FPU can be controlled by SLJIT_IS_FPU_AVAILABLE. 113 | zero value - FPU is NOT present. 114 | nonzero value - FPU is present. 115 | */ 116 | 117 | /* For further configurations, see the beginning of sljitConfigInternal.h */ 118 | 119 | #endif 120 | -------------------------------------------------------------------------------- /sljit_src/sljitConfigInternal.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #ifndef _SLJIT_CONFIG_INTERNAL_H_ 28 | #define _SLJIT_CONFIG_INTERNAL_H_ 29 | 30 | /* 31 | SLJIT defines the following architecture dependent types and macros: 32 | 33 | Macros for feature detection (boolean): 34 | SLJIT_32BIT_ARCHITECTURE : 32 bit architecture 35 | SLJIT_64BIT_ARCHITECTURE : 64 bit architecture 36 | SLJIT_LITTLE_ENDIAN : little endian architecture 37 | SLJIT_BIG_ENDIAN : big endian architecture 38 | SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!) 39 | SLJIT_INDIRECT_CALL : see SLJIT_FUNC_OFFSET() for more information 40 | 41 | Constants: 42 | SLJIT_NUM_REGS : number of available regs 43 | SLJIT_NUM_SCRATCH_REGS : number of available scratch regs 44 | SLJIT_NUM_SAVED_REGS : number of available saved regs 45 | SLJIT_NUM_FLOAT_REGS : number of available floating point regs 46 | SLJIT_NUM_SCRATCH_FLOAT_REGS : number of available floating point scratch regs 47 | SLJIT_NUM_SAVED_FLOAT_REGS : number of available floating point saved regs 48 | SLJIT_WORD_SHIFT : the shift required to apply when accessing a long/unsigned long array by index 49 | SLJIT_DOUBLE_SHIFT : the shift required to apply when accessing 50 | a double precision floating point array by index 51 | SLJIT_SINGLE_SHIFT : the shift required to apply when accessing 52 | a single precision floating point array by index 53 | SLJIT_LOCALS_OFFSET : local space starting offset (SLJIT_SP + SLJIT_LOCALS_OFFSET) 54 | SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address 55 | 56 | Other macros: 57 | SLJIT_CALL : C calling convention define for both calling JIT form C and C callbacks for JIT 58 | SLJIT_W(number) : defining 64 bit constants on 64 bit architectures (compiler independent helper) 59 | */ 60 | 61 | /*****************/ 62 | /* Sanity check. */ 63 | /*****************/ 64 | 65 | #if !((defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ 66 | || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ 67 | || (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \ 68 | || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ 69 | || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ 70 | || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ 71 | || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ 72 | || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ 73 | || (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ 74 | || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ 75 | || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ 76 | || (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) \ 77 | || (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ 78 | || (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)) 79 | #error "An architecture must be selected" 80 | #endif 81 | 82 | #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ 83 | + (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ 84 | + (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \ 85 | + (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ 86 | + (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ 87 | + (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ 88 | + (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ 89 | + (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ 90 | + (defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) \ 91 | + (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ 92 | + (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ 93 | + (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ 94 | + (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ 95 | + (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2 96 | #error "Multiple architectures are selected" 97 | #endif 98 | 99 | /********************************************************/ 100 | /* Automatic CPU detection (requires compiler support). */ 101 | /********************************************************/ 102 | 103 | #if (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) 104 | 105 | #ifndef _WIN32 106 | 107 | #if defined(__i386__) || defined(__i386) 108 | #define SLJIT_CONFIG_X86_32 1 109 | #elif defined(__x86_64__) 110 | #define SLJIT_CONFIG_X86_64 1 111 | #elif defined(__arm__) || defined(__ARM__) 112 | #ifdef __thumb2__ 113 | #define SLJIT_CONFIG_ARM_THUMB2 1 114 | #elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) 115 | #define SLJIT_CONFIG_ARM_V7 1 116 | #else 117 | #define SLJIT_CONFIG_ARM_V5 1 118 | #endif 119 | #elif defined (__aarch64__) 120 | #define SLJIT_CONFIG_ARM_64 1 121 | #elif defined(__ppc64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) || (defined(_POWER) && defined(__64BIT__)) 122 | #define SLJIT_CONFIG_PPC_64 1 123 | #elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || defined(_ARCH_PWR2) || defined(_POWER) 124 | #define SLJIT_CONFIG_PPC_32 1 125 | #elif defined(__mips__) && !defined(_LP64) 126 | #define SLJIT_CONFIG_MIPS_32 1 127 | #elif defined(__mips64) 128 | #define SLJIT_CONFIG_MIPS_64 1 129 | #elif defined(__sparc__) || defined(__sparc) 130 | #define SLJIT_CONFIG_SPARC_32 1 131 | #elif defined(__tilegx__) 132 | #define SLJIT_CONFIG_TILEGX 1 133 | #else 134 | /* Unsupported architecture */ 135 | #define SLJIT_CONFIG_UNSUPPORTED 1 136 | #endif 137 | 138 | #else /* !_WIN32 */ 139 | 140 | #if defined(_M_X64) || defined(__x86_64__) 141 | #define SLJIT_CONFIG_X86_64 1 142 | #elif defined(_ARM_) 143 | #define SLJIT_CONFIG_ARM_V5 1 144 | #else 145 | #define SLJIT_CONFIG_X86_32 1 146 | #endif 147 | 148 | #endif /* !WIN32 */ 149 | #endif /* SLJIT_CONFIG_AUTO */ 150 | 151 | #if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) 152 | #undef SLJIT_EXECUTABLE_ALLOCATOR 153 | #endif 154 | 155 | /******************************/ 156 | /* CPU family type detection. */ 157 | /******************************/ 158 | 159 | #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ 160 | || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) 161 | #define SLJIT_CONFIG_ARM_32 1 162 | #endif 163 | 164 | #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) 165 | #define SLJIT_CONFIG_X86 1 166 | #elif (defined SLJIT_CONFIG_ARM_32 && SLJIT_CONFIG_ARM_32) || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) 167 | #define SLJIT_CONFIG_ARM 1 168 | #elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) 169 | #define SLJIT_CONFIG_PPC 1 170 | #elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) 171 | #define SLJIT_CONFIG_MIPS 1 172 | #elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) || (defined SLJIT_CONFIG_SPARC_64 && SLJIT_CONFIG_SPARC_64) 173 | #define SLJIT_CONFIG_SPARC 1 174 | #endif 175 | 176 | /***************************/ 177 | /* Compiler helper macros. */ 178 | /***************************/ 179 | 180 | /*********************************/ 181 | /* Type of public API functions. */ 182 | /*********************************/ 183 | 184 | /****************************/ 185 | /* Instruction cache flush. */ 186 | /****************************/ 187 | 188 | #ifndef SLJIT_CACHE_FLUSH 189 | 190 | #if (defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86) 191 | 192 | /* Not required to implement on archs with unified caches. */ 193 | #define SLJIT_CACHE_FLUSH(from, to) 194 | 195 | #elif defined __APPLE__ 196 | 197 | /* Supported by all macs since Mac OS 10.5. 198 | However, it does not work on non-jailbroken iOS devices, 199 | although the compilation is successful. */ 200 | 201 | #define SLJIT_CACHE_FLUSH(from, to) \ 202 | sys_icache_invalidate((char*)(from), (char*)(to) - (char*)(from)) 203 | 204 | #elif defined __ANDROID__ 205 | 206 | /* Android lacks __clear_cache; instead, cacheflush should be used. */ 207 | 208 | #define SLJIT_CACHE_FLUSH(from, to) \ 209 | cacheflush((long)(from), (long)(to), 0) 210 | 211 | #elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) 212 | 213 | /* The __clear_cache() implementation of GCC is a dummy function on PowerPC. */ 214 | #define SLJIT_CACHE_FLUSH(from, to) \ 215 | ppc_cache_flush((from), (to)) 216 | 217 | #elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) 218 | 219 | /* The __clear_cache() implementation of GCC is a dummy function on Sparc. */ 220 | #define SLJIT_CACHE_FLUSH(from, to) \ 221 | sparc_cache_flush((from), (to)) 222 | 223 | #else 224 | 225 | /* Calls __ARM_NR_cacheflush on ARM-Linux. */ 226 | #define SLJIT_CACHE_FLUSH(from, to) \ 227 | __clear_cache((char*)(from), (char*)(to)) 228 | 229 | #endif 230 | 231 | #endif /* !SLJIT_CACHE_FLUSH */ 232 | 233 | /* 234 | * Used to maintain sanity/brevity. Admittedly a BSDism. 235 | */ 236 | typedef unsigned char u_char; 237 | typedef signed char s_char; 238 | 239 | /******************************************************/ 240 | /* Byte/half/int/word/single/double type definitions. */ 241 | /******************************************************/ 242 | 243 | #if !defined(_LP64) && !defined(__LP64__) 244 | #error "long must be size of pointer" 245 | #endif 246 | 247 | #if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) 248 | /* Just to have something. */ 249 | #define SLJIT_WORD_SHIFT 0 250 | #elif !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ 251 | && !(defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ 252 | && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ 253 | && !(defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) \ 254 | && !(defined SLJIT_CONFIG_TILEGX && SLJIT_CONFIG_TILEGX) 255 | #define SLJIT_32BIT_ARCHITECTURE 1 256 | #define SLJIT_WORD_SHIFT 2 257 | #else 258 | #define SLJIT_64BIT_ARCHITECTURE 1 259 | #define SLJIT_WORD_SHIFT 3 260 | #endif 261 | 262 | /* Shift for pointer sized data. */ 263 | #define SLJIT_POINTER_SHIFT SLJIT_WORD_SHIFT 264 | 265 | /* Shift for double precision sized data. */ 266 | #define SLJIT_DOUBLE_SHIFT 3 267 | #define SLJIT_SINGLE_SHIFT 2 268 | 269 | #ifndef SLJIT_W 270 | 271 | /* Defining long constants. */ 272 | #if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) 273 | #define SLJIT_W(w) (w##ll) 274 | #else 275 | #define SLJIT_W(w) (w) 276 | #endif 277 | 278 | #endif /* !SLJIT_W */ 279 | 280 | /*************************/ 281 | /* Endianness detection. */ 282 | /*************************/ 283 | 284 | #if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) 285 | 286 | /* These macros are mostly useful for the applications. */ 287 | #if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ 288 | || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) 289 | 290 | #ifdef __LITTLE_ENDIAN__ 291 | #define SLJIT_LITTLE_ENDIAN 1 292 | #else 293 | #define SLJIT_BIG_ENDIAN 1 294 | #endif 295 | 296 | #elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ 297 | || (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64) 298 | 299 | #ifdef __MIPSEL__ 300 | #define SLJIT_LITTLE_ENDIAN 1 301 | #else 302 | #define SLJIT_BIG_ENDIAN 1 303 | #endif 304 | 305 | #elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) 306 | 307 | #define SLJIT_BIG_ENDIAN 1 308 | 309 | #else 310 | #define SLJIT_LITTLE_ENDIAN 1 311 | #endif 312 | 313 | #endif /* !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) */ 314 | 315 | /* Sanity check. */ 316 | #if (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) 317 | #error "Exactly one endianness must be selected" 318 | #endif 319 | 320 | #if !(defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && !(defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) 321 | #error "Exactly one endianness must be selected" 322 | #endif 323 | 324 | #ifndef SLJIT_UNALIGNED 325 | 326 | #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ 327 | || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ 328 | || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ 329 | || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ 330 | || (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) \ 331 | || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ 332 | || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) 333 | #define SLJIT_UNALIGNED 1 334 | #endif 335 | 336 | #endif /* !SLJIT_UNALIGNED */ 337 | 338 | /*****************************************************************************************/ 339 | /* Calling convention of functions generated by SLJIT or called from the generated code. */ 340 | /*****************************************************************************************/ 341 | 342 | #ifndef SLJIT_CALL 343 | 344 | #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) 345 | 346 | #if defined(__GNUC__) && !defined(__APPLE__) 347 | 348 | #define SLJIT_CALL __attribute__ ((fastcall)) 349 | #define SLJIT_X86_32_FASTCALL 1 350 | 351 | #elif defined(_MSC_VER) 352 | 353 | #define SLJIT_CALL __fastcall 354 | #define SLJIT_X86_32_FASTCALL 1 355 | 356 | #elif defined(__BORLANDC__) 357 | 358 | #define SLJIT_CALL __msfastcall 359 | #define SLJIT_X86_32_FASTCALL 1 360 | 361 | #else /* Unknown compiler. */ 362 | 363 | /* The cdecl attribute is the default. */ 364 | #define SLJIT_CALL 365 | 366 | #endif 367 | 368 | #else /* Non x86-32 architectures. */ 369 | 370 | #define SLJIT_CALL 371 | 372 | #endif /* SLJIT_CONFIG_X86_32 */ 373 | 374 | #endif /* !SLJIT_CALL */ 375 | 376 | #ifndef SLJIT_INDIRECT_CALL 377 | #if ((defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) && (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN)) \ 378 | || ((defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) && defined _AIX) 379 | /* It seems certain ppc compilers use an indirect addressing for functions 380 | which makes things complicated. */ 381 | #define SLJIT_INDIRECT_CALL 1 382 | #endif 383 | #endif /* SLJIT_INDIRECT_CALL */ 384 | 385 | /* The offset which needs to be substracted from the return address to 386 | determine the next executed instruction after return. */ 387 | #ifndef SLJIT_RETURN_ADDRESS_OFFSET 388 | #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) 389 | #define SLJIT_RETURN_ADDRESS_OFFSET 8 390 | #else 391 | #define SLJIT_RETURN_ADDRESS_OFFSET 0 392 | #endif 393 | #endif /* SLJIT_RETURN_ADDRESS_OFFSET */ 394 | 395 | /***************************************************/ 396 | /* Functions of the built-in executable allocator. */ 397 | /***************************************************/ 398 | 399 | #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 400 | void* sljit_malloc_exec(unsigned long size); 401 | void sljit_free_exec(void* ptr); 402 | void sljit_free_unused_memory_exec(void); 403 | #define SLJIT_MALLOC_EXEC(size) sljit_malloc_exec(size) 404 | #define SLJIT_FREE_EXEC(ptr) sljit_free_exec(ptr) 405 | #endif 406 | 407 | /**********************************************/ 408 | /* Registers and locals offset determination. */ 409 | /**********************************************/ 410 | 411 | #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) 412 | 413 | #define SLJIT_NUM_REGS 10 414 | #define SLJIT_NUM_SAVED_REGS 7 415 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 416 | #define SLJIT_LOCALS_OFFSET_BASE ((2 + 4) * sizeof(long)) 417 | #else 418 | /* Maximum 3 arguments are passed on the stack, +1 for double alignment. */ 419 | #define SLJIT_LOCALS_OFFSET_BASE ((3 + 1 + 4) * sizeof(long)) 420 | #endif /* SLJIT_X86_32_FASTCALL */ 421 | 422 | #elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) 423 | 424 | #ifndef _WIN64 425 | #define SLJIT_NUM_REGS 12 426 | #define SLJIT_NUM_SAVED_REGS 6 427 | #define SLJIT_LOCALS_OFFSET_BASE (sizeof(long)) 428 | #else 429 | #define SLJIT_NUM_REGS 12 430 | #define SLJIT_NUM_SAVED_REGS 8 431 | #define SLJIT_LOCALS_OFFSET_BASE ((4 + 2) * sizeof(long)) 432 | #endif /* _WIN64 */ 433 | 434 | #elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) 435 | 436 | #define SLJIT_NUM_REGS 11 437 | #define SLJIT_NUM_SAVED_REGS 8 438 | #define SLJIT_LOCALS_OFFSET_BASE 0 439 | 440 | #elif (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) 441 | 442 | #define SLJIT_NUM_REGS 11 443 | #define SLJIT_NUM_SAVED_REGS 7 444 | #define SLJIT_LOCALS_OFFSET_BASE 0 445 | 446 | #elif (defined SLJIT_CONFIG_ARM_64 && SLJIT_CONFIG_ARM_64) 447 | 448 | #define SLJIT_NUM_REGS 25 449 | #define SLJIT_NUM_SAVED_REGS 10 450 | #define SLJIT_LOCALS_OFFSET_BASE (2 * sizeof(long)) 451 | 452 | #elif (defined SLJIT_CONFIG_PPC && SLJIT_CONFIG_PPC) 453 | 454 | #define SLJIT_NUM_REGS 22 455 | #define SLJIT_NUM_SAVED_REGS 17 456 | #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) || (defined _AIX) 457 | #define SLJIT_LOCALS_OFFSET_BASE ((6 + 8) * sizeof(long)) 458 | #elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) 459 | /* Add +1 for double alignment. */ 460 | #define SLJIT_LOCALS_OFFSET_BASE ((3 + 1) * sizeof(long)) 461 | #else 462 | #define SLJIT_LOCALS_OFFSET_BASE (3 * sizeof(long)) 463 | #endif /* SLJIT_CONFIG_PPC_64 || _AIX */ 464 | 465 | #elif (defined SLJIT_CONFIG_MIPS && SLJIT_CONFIG_MIPS) 466 | 467 | #define SLJIT_NUM_REGS 17 468 | #define SLJIT_NUM_SAVED_REGS 8 469 | #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) 470 | #define SLJIT_LOCALS_OFFSET_BASE (4 * sizeof(long)) 471 | #else 472 | #define SLJIT_LOCALS_OFFSET_BASE 0 473 | #endif 474 | 475 | #elif (defined SLJIT_CONFIG_SPARC && SLJIT_CONFIG_SPARC) 476 | 477 | #define SLJIT_NUM_REGS 18 478 | #define SLJIT_NUM_SAVED_REGS 14 479 | #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) 480 | /* Add +1 for double alignment. */ 481 | #define SLJIT_LOCALS_OFFSET_BASE ((23 + 1) * sizeof(long)) 482 | #endif 483 | 484 | #elif (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) 485 | 486 | #define SLJIT_NUM_REGS 0 487 | #define SLJIT_NUM_SAVED_REGS 0 488 | #define SLJIT_LOCALS_OFFSET_BASE 0 489 | 490 | #endif 491 | 492 | #define SLJIT_LOCALS_OFFSET (SLJIT_LOCALS_OFFSET_BASE) 493 | 494 | #define SLJIT_NUM_SCRATCH_REGS \ 495 | (SLJIT_NUM_REGS - SLJIT_NUM_SAVED_REGS) 496 | 497 | #define SLJIT_NUM_FLOAT_REGS 6 498 | #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && (defined _WIN64) 499 | #define SLJIT_NUM_SAVED_FLOAT_REGS 1 500 | #else 501 | #define SLJIT_NUM_SAVED_FLOAT_REGS 0 502 | #endif 503 | 504 | #define SLJIT_NUM_SCRATCH_FLOAT_REGS \ 505 | (SLJIT_NUM_FLOAT_REGS - SLJIT_NUM_SAVED_FLOAT_REGS) 506 | 507 | /*************************************/ 508 | /* Debug and verbose related macros. */ 509 | /*************************************/ 510 | 511 | #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) 512 | #include 513 | #endif 514 | 515 | #if (defined SLJIT_DEBUG && SLJIT_DEBUG) 516 | 517 | #if !defined(SLJIT_ASSERT) || !defined(SLJIT_ASSERT_STOP) 518 | 519 | /* SLJIT_HALT_PROCESS must halt the process. */ 520 | #ifndef SLJIT_HALT_PROCESS 521 | #include 522 | 523 | #define SLJIT_HALT_PROCESS() \ 524 | abort(); 525 | #endif /* !SLJIT_HALT_PROCESS */ 526 | 527 | #include 528 | 529 | #endif /* !SLJIT_ASSERT || !SLJIT_ASSERT_STOP */ 530 | 531 | /* Feel free to redefine these two macros. */ 532 | #ifndef SLJIT_ASSERT 533 | 534 | #define SLJIT_ASSERT(x) \ 535 | do { \ 536 | if (!(x)) { \ 537 | printf("Assertion failed at " __FILE__ ":%d\n", __LINE__); \ 538 | SLJIT_HALT_PROCESS(); \ 539 | } \ 540 | } while (0) 541 | 542 | #endif /* !SLJIT_ASSERT */ 543 | 544 | #ifndef SLJIT_ASSERT_STOP 545 | 546 | #define SLJIT_ASSERT_STOP() \ 547 | do { \ 548 | printf("Should never been reached " __FILE__ ":%d\n", __LINE__); \ 549 | SLJIT_HALT_PROCESS(); \ 550 | } while (0) 551 | 552 | #endif /* !SLJIT_ASSERT_STOP */ 553 | 554 | #else /* (defined SLJIT_DEBUG && SLJIT_DEBUG) */ 555 | 556 | /* Forcing empty, but valid statements. */ 557 | #undef SLJIT_ASSERT 558 | #undef SLJIT_ASSERT_STOP 559 | 560 | #define SLJIT_ASSERT(x) \ 561 | do { } while (0) 562 | #define SLJIT_ASSERT_STOP() \ 563 | do { } while (0) 564 | 565 | #endif /* (defined SLJIT_DEBUG && SLJIT_DEBUG) */ 566 | 567 | #ifndef SLJIT_COMPILE_ASSERT 568 | 569 | /* Should be improved eventually. */ 570 | #define SLJIT_COMPILE_ASSERT(x, description) \ 571 | SLJIT_ASSERT(x) 572 | 573 | #endif /* !SLJIT_COMPILE_ASSERT */ 574 | 575 | #endif 576 | -------------------------------------------------------------------------------- /sljit_src/sljitExecAllocator.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* 28 | This file contains a simple executable memory allocator 29 | 30 | It is assumed, that executable code blocks are usually medium (or sometimes 31 | large) memory blocks, and the allocator is not too frequently called (less 32 | optimized than other allocators). Thus, using it as a generic allocator is 33 | not suggested. 34 | 35 | How does it work: 36 | Memory is allocated in continuous memory areas called chunks by alloc_chunk() 37 | Chunk format: 38 | [ block ][ block ] ... [ block ][ block terminator ] 39 | 40 | All blocks and the block terminator is started with block_header. The block 41 | header contains the size of the previous and the next block. These sizes 42 | can also contain special values. 43 | Block size: 44 | 0 - The block is a free_block, with a different size member. 45 | 1 - The block is a block terminator. 46 | n - The block is used at the moment, and the value contains its size. 47 | Previous block size: 48 | 0 - This is the first block of the memory chunk. 49 | n - The size of the previous block. 50 | 51 | Using these size values we can go forward or backward on the block chain. 52 | The unused blocks are stored in a chain list pointed by free_blocks. This 53 | list is useful if we need to find a suitable memory area when the allocator 54 | is called. 55 | 56 | When a block is freed, the new free block is connected to its adjacent free 57 | blocks if possible. 58 | 59 | [ free block ][ used block ][ free block ] 60 | and "used block" is freed, the three blocks are connected together: 61 | [ one big free block ] 62 | */ 63 | 64 | /* --------------------------------------------------------------------- */ 65 | /* System (OS) functions */ 66 | /* --------------------------------------------------------------------- */ 67 | 68 | /* 64 KByte. */ 69 | #define CHUNK_SIZE 0x10000 70 | 71 | /* 72 | alloc_chunk / free_chunk : 73 | * allocate executable system memory chunks 74 | * the size is always divisible by CHUNK_SIZE 75 | allocator_grab_lock / allocator_release_lock : 76 | * make the allocator thread safe 77 | * can be empty if the OS (or the application) does not support threading 78 | * only the allocator requires this lock, sljit is fully thread safe 79 | as it only uses local variables 80 | */ 81 | 82 | #ifdef _WIN32 83 | 84 | static __inline void* alloc_chunk(unsigned long size) 85 | { 86 | return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); 87 | } 88 | 89 | static __inline void free_chunk(void* chunk, unsigned long size) 90 | { 91 | (void)size; 92 | VirtualFree(chunk, 0, MEM_RELEASE); 93 | } 94 | 95 | #else 96 | 97 | static __inline void* alloc_chunk(unsigned long size) 98 | { 99 | void* retval; 100 | 101 | #ifdef MAP_ANON 102 | retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0); 103 | #else 104 | if (dev_zero < 0) { 105 | if (open_dev_zero()) 106 | return NULL; 107 | } 108 | retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, dev_zero, 0); 109 | #endif 110 | 111 | return (retval != MAP_FAILED) ? retval : NULL; 112 | } 113 | 114 | static __inline void free_chunk(void* chunk, unsigned long size) 115 | { 116 | munmap(chunk, size); 117 | } 118 | 119 | #endif 120 | 121 | /* --------------------------------------------------------------------- */ 122 | /* Common functions */ 123 | /* --------------------------------------------------------------------- */ 124 | 125 | #define CHUNK_MASK (~(CHUNK_SIZE - 1)) 126 | 127 | struct block_header { 128 | unsigned long size; 129 | unsigned long prev_size; 130 | }; 131 | 132 | struct free_block { 133 | struct block_header header; 134 | struct free_block *next; 135 | struct free_block *prev; 136 | unsigned long size; 137 | }; 138 | 139 | #define AS_BLOCK_HEADER(base, offset) \ 140 | ((struct block_header*)(((u_char*)base) + offset)) 141 | #define AS_FREE_BLOCK(base, offset) \ 142 | ((struct free_block*)(((u_char*)base) + offset)) 143 | #define MEM_START(base) ((void*)(((u_char*)base) + sizeof(struct block_header))) 144 | #define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7) 145 | 146 | static struct free_block* free_blocks; 147 | static unsigned long allocated_size; 148 | static unsigned long total_size; 149 | 150 | static __inline void sljit_insert_free_block(struct free_block *free_block, unsigned long size) 151 | { 152 | free_block->header.size = 0; 153 | free_block->size = size; 154 | 155 | free_block->next = free_blocks; 156 | free_block->prev = NULL; 157 | if (free_blocks) 158 | free_blocks->prev = free_block; 159 | free_blocks = free_block; 160 | } 161 | 162 | static __inline void sljit_remove_free_block(struct free_block *free_block) 163 | { 164 | if (free_block->next) 165 | free_block->next->prev = free_block->prev; 166 | 167 | if (free_block->prev) 168 | free_block->prev->next = free_block->next; 169 | else { 170 | SLJIT_ASSERT(free_blocks == free_block); 171 | free_blocks = free_block->next; 172 | } 173 | } 174 | 175 | void* sljit_malloc_exec(unsigned long size) 176 | { 177 | struct block_header *header; 178 | struct block_header *next_header; 179 | struct free_block *free_block; 180 | unsigned long chunk_size; 181 | 182 | allocator_grab_lock(); 183 | if (size < sizeof(struct free_block)) 184 | size = sizeof(struct free_block); 185 | size = ALIGN_SIZE(size); 186 | 187 | free_block = free_blocks; 188 | while (free_block) { 189 | if (free_block->size >= size) { 190 | chunk_size = free_block->size; 191 | if (chunk_size > size + 64) { 192 | /* We just cut a block from the end of the free block. */ 193 | chunk_size -= size; 194 | free_block->size = chunk_size; 195 | header = AS_BLOCK_HEADER(free_block, chunk_size); 196 | header->prev_size = chunk_size; 197 | AS_BLOCK_HEADER(header, size)->prev_size = size; 198 | } 199 | else { 200 | sljit_remove_free_block(free_block); 201 | header = (struct block_header*)free_block; 202 | size = chunk_size; 203 | } 204 | allocated_size += size; 205 | header->size = size; 206 | allocator_release_lock(); 207 | return MEM_START(header); 208 | } 209 | free_block = free_block->next; 210 | } 211 | 212 | chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK; 213 | header = (struct block_header*)alloc_chunk(chunk_size); 214 | if (!header) { 215 | allocator_release_lock(); 216 | return NULL; 217 | } 218 | 219 | chunk_size -= sizeof(struct block_header); 220 | total_size += chunk_size; 221 | 222 | header->prev_size = 0; 223 | if (chunk_size > size + 64) { 224 | /* Cut the allocated space into a free and a used block. */ 225 | allocated_size += size; 226 | header->size = size; 227 | chunk_size -= size; 228 | 229 | free_block = AS_FREE_BLOCK(header, size); 230 | free_block->header.prev_size = size; 231 | sljit_insert_free_block(free_block, chunk_size); 232 | next_header = AS_BLOCK_HEADER(free_block, chunk_size); 233 | } 234 | else { 235 | /* All space belongs to this allocation. */ 236 | allocated_size += chunk_size; 237 | header->size = chunk_size; 238 | next_header = AS_BLOCK_HEADER(header, chunk_size); 239 | } 240 | next_header->size = 1; 241 | next_header->prev_size = chunk_size; 242 | allocator_release_lock(); 243 | return MEM_START(header); 244 | } 245 | 246 | void sljit_free_exec(void* ptr) 247 | { 248 | struct block_header *header; 249 | struct free_block* free_block; 250 | 251 | allocator_grab_lock(); 252 | header = AS_BLOCK_HEADER(ptr, -(long)sizeof(struct block_header)); 253 | allocated_size -= header->size; 254 | 255 | /* Connecting free blocks together if possible. */ 256 | 257 | /* If header->prev_size == 0, free_block will equal to header. 258 | In this case, free_block->header.size will be > 0. */ 259 | free_block = AS_FREE_BLOCK(header, -(long)header->prev_size); 260 | if (!free_block->header.size) { 261 | free_block->size += header->size; 262 | header = AS_BLOCK_HEADER(free_block, free_block->size); 263 | header->prev_size = free_block->size; 264 | } 265 | else { 266 | free_block = (struct free_block*)header; 267 | sljit_insert_free_block(free_block, header->size); 268 | } 269 | 270 | header = AS_BLOCK_HEADER(free_block, free_block->size); 271 | if (!header->size) { 272 | free_block->size += ((struct free_block*)header)->size; 273 | sljit_remove_free_block((struct free_block*)header); 274 | header = AS_BLOCK_HEADER(free_block, free_block->size); 275 | header->prev_size = free_block->size; 276 | } 277 | 278 | /* The whole chunk is free. */ 279 | if (!free_block->header.prev_size && header->size == 1) { 280 | /* If this block is freed, we still have (allocated_size / 2) free space. */ 281 | if (total_size - free_block->size > (allocated_size * 3 / 2)) { 282 | total_size -= free_block->size; 283 | sljit_remove_free_block(free_block); 284 | free_chunk(free_block, free_block->size + sizeof(struct block_header)); 285 | } 286 | } 287 | 288 | allocator_release_lock(); 289 | } 290 | 291 | void sljit_free_unused_memory_exec(void) 292 | { 293 | struct free_block* free_block; 294 | struct free_block* next_free_block; 295 | 296 | allocator_grab_lock(); 297 | 298 | free_block = free_blocks; 299 | while (free_block) { 300 | next_free_block = free_block->next; 301 | if (!free_block->header.prev_size && 302 | AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) { 303 | total_size -= free_block->size; 304 | sljit_remove_free_block(free_block); 305 | free_chunk(free_block, free_block->size + sizeof(struct block_header)); 306 | } 307 | free_block = next_free_block; 308 | } 309 | 310 | SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks)); 311 | allocator_release_lock(); 312 | } 313 | -------------------------------------------------------------------------------- /sljit_src/sljitNativeMIPS_32.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* mips 32-bit arch dependent functions. */ 28 | 29 | static int load_immediate(struct sljit_compiler *compiler, int dst_ar, long imm) 30 | { 31 | if (!(imm & ~0xffff)) 32 | return push_inst(compiler, ORI | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); 33 | 34 | if (imm < 0 && imm >= SIMM_MIN) 35 | return push_inst(compiler, ADDIU | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); 36 | 37 | FAIL_IF(push_inst(compiler, LUI | TA(dst_ar) | IMM(imm >> 16), dst_ar)); 38 | return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS; 39 | } 40 | 41 | #define EMIT_LOGICAL(op_imm, op_norm) \ 42 | if (flags & SRC2_IMM) { \ 43 | if (op & SLJIT_SET_E) \ 44 | FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ 45 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 46 | FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ 47 | } \ 48 | else { \ 49 | if (op & SLJIT_SET_E) \ 50 | FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ 51 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 52 | FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ 53 | } 54 | 55 | #define EMIT_SHIFT(op_imm, op_v) \ 56 | if (flags & SRC2_IMM) { \ 57 | if (op & SLJIT_SET_E) \ 58 | FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ 59 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 60 | FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ 61 | } \ 62 | else { \ 63 | if (op & SLJIT_SET_E) \ 64 | FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ 65 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 66 | FAIL_IF(push_inst(compiler, op_v | S(src2) | T(src1) | D(dst), DR(dst))); \ 67 | } 68 | 69 | static __inline int emit_single_op(struct sljit_compiler *compiler, int op, int flags, 70 | int dst, int src1, long src2) 71 | { 72 | switch (GET_OPCODE(op)) { 73 | case SLJIT_MOV: 74 | case SLJIT_MOV_UI: 75 | case SLJIT_MOV_SI: 76 | case SLJIT_MOV_P: 77 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 78 | if (dst != src2) 79 | return push_inst(compiler, ADDU | S(src2) | TA(0) | D(dst), DR(dst)); 80 | return SLJIT_SUCCESS; 81 | 82 | case SLJIT_MOV_UB: 83 | case SLJIT_MOV_SB: 84 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 85 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 86 | if (op == SLJIT_MOV_SB) { 87 | #if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) 88 | return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); 89 | #else 90 | FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst))); 91 | return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst)); 92 | #endif 93 | } 94 | return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); 95 | } 96 | else if (dst != src2) 97 | SLJIT_ASSERT_STOP(); 98 | return SLJIT_SUCCESS; 99 | 100 | case SLJIT_MOV_UH: 101 | case SLJIT_MOV_SH: 102 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 103 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 104 | if (op == SLJIT_MOV_SH) { 105 | #if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) 106 | return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); 107 | #else 108 | FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst))); 109 | return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst)); 110 | #endif 111 | } 112 | return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); 113 | } 114 | else if (dst != src2) 115 | SLJIT_ASSERT_STOP(); 116 | return SLJIT_SUCCESS; 117 | 118 | case SLJIT_NOT: 119 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 120 | if (op & SLJIT_SET_E) 121 | FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); 122 | if (CHECK_FLAGS(SLJIT_SET_E)) 123 | FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); 124 | return SLJIT_SUCCESS; 125 | 126 | case SLJIT_CLZ: 127 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 128 | #if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) 129 | if (op & SLJIT_SET_E) 130 | FAIL_IF(push_inst(compiler, CLZ | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); 131 | if (CHECK_FLAGS(SLJIT_SET_E)) 132 | FAIL_IF(push_inst(compiler, CLZ | S(src2) | T(dst) | D(dst), DR(dst))); 133 | #else 134 | if (flags & UNUSED_DEST) { 135 | FAIL_IF(push_inst(compiler, SRL | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG)); 136 | return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); 137 | } 138 | /* Nearly all instructions are unmovable in the following sequence. */ 139 | FAIL_IF(push_inst(compiler, ADDU | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); 140 | /* Check zero. */ 141 | FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); 142 | FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM(32), UNMOVABLE_INS)); 143 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(dst) | IMM(-1), DR(dst))); 144 | /* Loop for searching the highest bit. */ 145 | FAIL_IF(push_inst(compiler, ADDIU | S(dst) | T(dst) | IMM(1), DR(dst))); 146 | FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); 147 | FAIL_IF(push_inst(compiler, SLL | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); 148 | if (op & SLJIT_SET_E) 149 | return push_inst(compiler, ADDU | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG); 150 | #endif 151 | return SLJIT_SUCCESS; 152 | 153 | case SLJIT_ADD: 154 | if (flags & SRC2_IMM) { 155 | if (op & SLJIT_SET_O) { 156 | if (src2 >= 0) 157 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 158 | else 159 | FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 160 | } 161 | if (op & SLJIT_SET_E) 162 | FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); 163 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) { 164 | if (src2 >= 0) 165 | FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); 166 | else { 167 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); 168 | FAIL_IF(push_inst(compiler, OR | S(src1) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); 169 | } 170 | } 171 | /* dst may be the same as src1 or src2. */ 172 | if (CHECK_FLAGS(SLJIT_SET_E)) 173 | FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); 174 | } 175 | else { 176 | if (op & SLJIT_SET_O) 177 | FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 178 | if (op & SLJIT_SET_E) 179 | FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); 180 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) 181 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); 182 | /* dst may be the same as src1 or src2. */ 183 | if (CHECK_FLAGS(SLJIT_SET_E)) 184 | FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); 185 | } 186 | 187 | /* a + b >= a | b (otherwise, the carry should be set to 1). */ 188 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) 189 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); 190 | if (!(op & SLJIT_SET_O)) 191 | return SLJIT_SUCCESS; 192 | FAIL_IF(push_inst(compiler, SLL | TA(ULESS_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); 193 | FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 194 | FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 195 | return push_inst(compiler, SLL | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG); 196 | 197 | case SLJIT_ADDC: 198 | if (flags & SRC2_IMM) { 199 | if (op & SLJIT_SET_C) { 200 | if (src2 >= 0) 201 | FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OVERFLOW_FLAG) | IMM(src2), OVERFLOW_FLAG)); 202 | else { 203 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(OVERFLOW_FLAG) | IMM(src2), OVERFLOW_FLAG)); 204 | FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 205 | } 206 | } 207 | FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); 208 | } else { 209 | if (op & SLJIT_SET_C) 210 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 211 | /* dst may be the same as src1 or src2. */ 212 | FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); 213 | } 214 | if (op & SLJIT_SET_C) 215 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 216 | 217 | FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); 218 | if (!(op & SLJIT_SET_C)) 219 | return SLJIT_SUCCESS; 220 | 221 | /* Set ULESS_FLAG (dst == 0) && (ULESS_FLAG == 1). */ 222 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); 223 | /* Set carry flag. */ 224 | return push_inst(compiler, OR | SA(ULESS_FLAG) | TA(OVERFLOW_FLAG) | DA(ULESS_FLAG), ULESS_FLAG); 225 | 226 | case SLJIT_SUB: 227 | if ((flags & SRC2_IMM) && ((op & (SLJIT_SET_U | SLJIT_SET_S)) || src2 == SIMM_MIN)) { 228 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); 229 | src2 = TMP_REG2; 230 | flags &= ~SRC2_IMM; 231 | } 232 | 233 | if (flags & SRC2_IMM) { 234 | if (op & SLJIT_SET_O) { 235 | if (src2 >= 0) 236 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 237 | else 238 | FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 239 | } 240 | if (op & SLJIT_SET_E) 241 | FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); 242 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) 243 | FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); 244 | /* dst may be the same as src1 or src2. */ 245 | if (CHECK_FLAGS(SLJIT_SET_E)) 246 | FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); 247 | } 248 | else { 249 | if (op & SLJIT_SET_O) 250 | FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 251 | if (op & SLJIT_SET_E) 252 | FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); 253 | if (op & (SLJIT_SET_U | SLJIT_SET_C | SLJIT_SET_O)) 254 | FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); 255 | if (op & SLJIT_SET_U) 256 | FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(UGREATER_FLAG), UGREATER_FLAG)); 257 | if (op & SLJIT_SET_S) { 258 | FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(LESS_FLAG), LESS_FLAG)); 259 | FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(GREATER_FLAG), GREATER_FLAG)); 260 | } 261 | /* dst may be the same as src1 or src2. */ 262 | if (CHECK_FLAGS(SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_C)) 263 | FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); 264 | } 265 | 266 | if (!(op & SLJIT_SET_O)) 267 | return SLJIT_SUCCESS; 268 | FAIL_IF(push_inst(compiler, SLL | TA(ULESS_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); 269 | FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 270 | FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 271 | return push_inst(compiler, SRL | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG); 272 | 273 | case SLJIT_SUBC: 274 | if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { 275 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); 276 | src2 = TMP_REG2; 277 | flags &= ~SRC2_IMM; 278 | } 279 | 280 | if (flags & SRC2_IMM) { 281 | if (op & SLJIT_SET_C) 282 | FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OVERFLOW_FLAG) | IMM(src2), OVERFLOW_FLAG)); 283 | /* dst may be the same as src1 or src2. */ 284 | FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); 285 | } 286 | else { 287 | if (op & SLJIT_SET_C) 288 | FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 289 | /* dst may be the same as src1 or src2. */ 290 | FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); 291 | } 292 | 293 | if (op & SLJIT_SET_C) 294 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(LESS_FLAG), LESS_FLAG)); 295 | 296 | FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); 297 | return (op & SLJIT_SET_C) ? push_inst(compiler, OR | SA(OVERFLOW_FLAG) | TA(LESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG) : SLJIT_SUCCESS; 298 | 299 | case SLJIT_MUL: 300 | SLJIT_ASSERT(!(flags & SRC2_IMM)); 301 | if (!(op & SLJIT_SET_O)) { 302 | #if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) 303 | return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); 304 | #else 305 | FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); 306 | return push_inst(compiler, MFLO | D(dst), DR(dst)); 307 | #endif 308 | } 309 | FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); 310 | FAIL_IF(push_inst(compiler, MFHI | DA(ULESS_FLAG), ULESS_FLAG)); 311 | FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); 312 | FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(UGREATER_FLAG) | SH_IMM(31), UGREATER_FLAG)); 313 | return push_inst(compiler, SUBU | SA(ULESS_FLAG) | TA(UGREATER_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); 314 | 315 | case SLJIT_AND: 316 | EMIT_LOGICAL(ANDI, AND); 317 | return SLJIT_SUCCESS; 318 | 319 | case SLJIT_OR: 320 | EMIT_LOGICAL(ORI, OR); 321 | return SLJIT_SUCCESS; 322 | 323 | case SLJIT_XOR: 324 | EMIT_LOGICAL(XORI, XOR); 325 | return SLJIT_SUCCESS; 326 | 327 | case SLJIT_SHL: 328 | EMIT_SHIFT(SLL, SLLV); 329 | return SLJIT_SUCCESS; 330 | 331 | case SLJIT_LSHR: 332 | EMIT_SHIFT(SRL, SRLV); 333 | return SLJIT_SUCCESS; 334 | 335 | case SLJIT_ASHR: 336 | EMIT_SHIFT(SRA, SRAV); 337 | return SLJIT_SUCCESS; 338 | } 339 | 340 | SLJIT_ASSERT_STOP(); 341 | return SLJIT_SUCCESS; 342 | } 343 | 344 | static __inline int emit_const(struct sljit_compiler *compiler, int dst, long init_value) 345 | { 346 | FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst))); 347 | return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst)); 348 | } 349 | 350 | void sljit_set_jump_addr(unsigned long addr, unsigned long new_addr) 351 | { 352 | sljit_ins *inst = (sljit_ins*)addr; 353 | 354 | inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 16) & 0xffff); 355 | inst[1] = (inst[1] & 0xffff0000) | (new_addr & 0xffff); 356 | SLJIT_CACHE_FLUSH(inst, inst + 2); 357 | } 358 | 359 | void sljit_set_const(unsigned long addr, long new_constant) 360 | { 361 | sljit_ins *inst = (sljit_ins*)addr; 362 | 363 | inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 16) & 0xffff); 364 | inst[1] = (inst[1] & 0xffff0000) | (new_constant & 0xffff); 365 | SLJIT_CACHE_FLUSH(inst, inst + 2); 366 | } 367 | -------------------------------------------------------------------------------- /sljit_src/sljitNativeMIPS_64.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* mips 64-bit arch dependent functions. */ 28 | 29 | static int load_immediate(struct sljit_compiler *compiler, int dst_ar, long imm) 30 | { 31 | int shift = 32; 32 | int shift2; 33 | int inv = 0; 34 | sljit_ins ins; 35 | unsigned long uimm; 36 | 37 | if (!(imm & ~0xffff)) 38 | return push_inst(compiler, ORI | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); 39 | 40 | if (imm < 0 && imm >= SIMM_MIN) 41 | return push_inst(compiler, ADDIU | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); 42 | 43 | if (imm <= 0x7fffffffl && imm >= -0x80000000l) { 44 | FAIL_IF(push_inst(compiler, LUI | TA(dst_ar) | IMM(imm >> 16), dst_ar)); 45 | return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS; 46 | } 47 | 48 | /* Zero extended number. */ 49 | uimm = imm; 50 | if (imm < 0) { 51 | uimm = ~imm; 52 | inv = 1; 53 | } 54 | 55 | while (!(uimm & 0xff00000000000000l)) { 56 | shift -= 8; 57 | uimm <<= 8; 58 | } 59 | 60 | if (!(uimm & 0xf000000000000000l)) { 61 | shift -= 4; 62 | uimm <<= 4; 63 | } 64 | 65 | if (!(uimm & 0xc000000000000000l)) { 66 | shift -= 2; 67 | uimm <<= 2; 68 | } 69 | 70 | if ((long)uimm < 0) { 71 | uimm >>= 1; 72 | shift += 1; 73 | } 74 | SLJIT_ASSERT(((uimm & 0xc000000000000000l) == 0x4000000000000000l) && (shift > 0) && (shift <= 32)); 75 | 76 | if (inv) 77 | uimm = ~uimm; 78 | 79 | FAIL_IF(push_inst(compiler, LUI | TA(dst_ar) | IMM(uimm >> 48), dst_ar)); 80 | if (uimm & 0x0000ffff00000000l) 81 | FAIL_IF(push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(uimm >> 32), dst_ar)); 82 | 83 | imm &= (1l << shift) - 1; 84 | if (!(imm & ~0xffff)) { 85 | ins = (shift == 32) ? DSLL32 : DSLL; 86 | if (shift < 32) 87 | ins |= SH_IMM(shift); 88 | FAIL_IF(push_inst(compiler, ins | TA(dst_ar) | DA(dst_ar), dst_ar)); 89 | return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar); 90 | } 91 | 92 | /* Double shifts needs to be performed. */ 93 | uimm <<= 32; 94 | shift2 = shift - 16; 95 | 96 | while (!(uimm & 0xf000000000000000l)) { 97 | shift2 -= 4; 98 | uimm <<= 4; 99 | } 100 | 101 | if (!(uimm & 0xc000000000000000l)) { 102 | shift2 -= 2; 103 | uimm <<= 2; 104 | } 105 | 106 | if (!(uimm & 0x8000000000000000l)) { 107 | shift2--; 108 | uimm <<= 1; 109 | } 110 | 111 | SLJIT_ASSERT((uimm & 0x8000000000000000l) && (shift2 > 0) && (shift2 <= 16)); 112 | 113 | FAIL_IF(push_inst(compiler, DSLL | TA(dst_ar) | DA(dst_ar) | SH_IMM(shift - shift2), dst_ar)); 114 | FAIL_IF(push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(uimm >> 48), dst_ar)); 115 | FAIL_IF(push_inst(compiler, DSLL | TA(dst_ar) | DA(dst_ar) | SH_IMM(shift2), dst_ar)); 116 | 117 | imm &= (1l << shift2) - 1; 118 | return !(imm & 0xffff) ? SLJIT_SUCCESS : push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar); 119 | } 120 | 121 | #define SELECT_OP(a, b) \ 122 | (!(op & SLJIT_INT_OP) ? a : b) 123 | 124 | #define EMIT_LOGICAL(op_imm, op_norm) \ 125 | if (flags & SRC2_IMM) { \ 126 | if (op & SLJIT_SET_E) \ 127 | FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ 128 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 129 | FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ 130 | } \ 131 | else { \ 132 | if (op & SLJIT_SET_E) \ 133 | FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ 134 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 135 | FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ 136 | } 137 | 138 | #define EMIT_SHIFT(op_dimm, op_dimm32, op_imm, op_dv, op_v) \ 139 | if (flags & SRC2_IMM) { \ 140 | if (src2 >= 32) { \ 141 | SLJIT_ASSERT(!(op & SLJIT_INT_OP)); \ 142 | ins = op_dimm32; \ 143 | src2 -= 32; \ 144 | } \ 145 | else \ 146 | ins = (op & SLJIT_INT_OP) ? op_imm : op_dimm; \ 147 | if (op & SLJIT_SET_E) \ 148 | FAIL_IF(push_inst(compiler, ins | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ 149 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 150 | FAIL_IF(push_inst(compiler, ins | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ 151 | } \ 152 | else { \ 153 | ins = (op & SLJIT_INT_OP) ? op_v : op_dv; \ 154 | if (op & SLJIT_SET_E) \ 155 | FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ 156 | if (CHECK_FLAGS(SLJIT_SET_E)) \ 157 | FAIL_IF(push_inst(compiler, ins | S(src2) | T(src1) | D(dst), DR(dst))); \ 158 | } 159 | 160 | static __inline int emit_single_op(struct sljit_compiler *compiler, int op, int flags, 161 | int dst, int src1, long src2) 162 | { 163 | sljit_ins ins; 164 | 165 | switch (GET_OPCODE(op)) { 166 | case SLJIT_MOV: 167 | case SLJIT_MOV_P: 168 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 169 | if (dst != src2) 170 | return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(dst), DR(dst)); 171 | return SLJIT_SUCCESS; 172 | 173 | case SLJIT_MOV_UB: 174 | case SLJIT_MOV_SB: 175 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 176 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 177 | if (op == SLJIT_MOV_SB) { 178 | FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(24), DR(dst))); 179 | return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(24), DR(dst)); 180 | } 181 | return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); 182 | } 183 | else if (dst != src2) 184 | SLJIT_ASSERT_STOP(); 185 | return SLJIT_SUCCESS; 186 | 187 | case SLJIT_MOV_UH: 188 | case SLJIT_MOV_SH: 189 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 190 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 191 | if (op == SLJIT_MOV_SH) { 192 | FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(16), DR(dst))); 193 | return push_inst(compiler, DSRA32 | T(dst) | D(dst) | SH_IMM(16), DR(dst)); 194 | } 195 | return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); 196 | } 197 | else if (dst != src2) 198 | SLJIT_ASSERT_STOP(); 199 | return SLJIT_SUCCESS; 200 | 201 | case SLJIT_MOV_UI: 202 | SLJIT_ASSERT(!(op & SLJIT_INT_OP)); 203 | FAIL_IF(push_inst(compiler, DSLL32 | T(src2) | D(dst) | SH_IMM(0), DR(dst))); 204 | return push_inst(compiler, DSRL32 | T(dst) | D(dst) | SH_IMM(0), DR(dst)); 205 | 206 | case SLJIT_MOV_SI: 207 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 208 | return push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(0), DR(dst)); 209 | 210 | case SLJIT_NOT: 211 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 212 | if (op & SLJIT_SET_E) 213 | FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); 214 | if (CHECK_FLAGS(SLJIT_SET_E)) 215 | FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); 216 | return SLJIT_SUCCESS; 217 | 218 | case SLJIT_CLZ: 219 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 220 | #if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) 221 | if (op & SLJIT_SET_E) 222 | FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); 223 | if (CHECK_FLAGS(SLJIT_SET_E)) 224 | FAIL_IF(push_inst(compiler, SELECT_OP(DCLZ, CLZ) | S(src2) | T(dst) | D(dst), DR(dst))); 225 | #else 226 | if (flags & UNUSED_DEST) { 227 | FAIL_IF(push_inst(compiler, SELECT_OP(DSRL32, SRL) | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG)); 228 | return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); 229 | } 230 | /* Nearly all instructions are unmovable in the following sequence. */ 231 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); 232 | /* Check zero. */ 233 | FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); 234 | FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM((op & SLJIT_INT_OP) ? 32 : 64), UNMOVABLE_INS)); 235 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | T(dst) | IMM(-1), DR(dst))); 236 | /* Loop for searching the highest bit. */ 237 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(dst) | T(dst) | IMM(1), DR(dst))); 238 | FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); 239 | FAIL_IF(push_inst(compiler, SELECT_OP(DSLL, SLL) | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); 240 | if (op & SLJIT_SET_E) 241 | return push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG); 242 | #endif 243 | return SLJIT_SUCCESS; 244 | 245 | case SLJIT_ADD: 246 | if (flags & SRC2_IMM) { 247 | if (op & SLJIT_SET_O) { 248 | if (src2 >= 0) 249 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 250 | else 251 | FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 252 | } 253 | if (op & SLJIT_SET_E) 254 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); 255 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) { 256 | if (src2 >= 0) 257 | FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); 258 | else { 259 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); 260 | FAIL_IF(push_inst(compiler, OR | S(src1) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); 261 | } 262 | } 263 | /* dst may be the same as src1 or src2. */ 264 | if (CHECK_FLAGS(SLJIT_SET_E)) 265 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); 266 | } 267 | else { 268 | if (op & SLJIT_SET_O) 269 | FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 270 | if (op & SLJIT_SET_E) 271 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); 272 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) 273 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); 274 | /* dst may be the same as src1 or src2. */ 275 | if (CHECK_FLAGS(SLJIT_SET_E)) 276 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); 277 | } 278 | 279 | /* a + b >= a | b (otherwise, the carry should be set to 1). */ 280 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) 281 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); 282 | if (!(op & SLJIT_SET_O)) 283 | return SLJIT_SUCCESS; 284 | FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(ULESS_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); 285 | FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 286 | FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 287 | return push_inst(compiler, SELECT_OP(DSRL32, SLL) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG); 288 | 289 | case SLJIT_ADDC: 290 | if (flags & SRC2_IMM) { 291 | if (op & SLJIT_SET_C) { 292 | if (src2 >= 0) 293 | FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(OVERFLOW_FLAG) | IMM(src2), OVERFLOW_FLAG)); 294 | else { 295 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | SA(0) | TA(OVERFLOW_FLAG) | IMM(src2), OVERFLOW_FLAG)); 296 | FAIL_IF(push_inst(compiler, OR | S(src1) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 297 | } 298 | } 299 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(src2), DR(dst))); 300 | } else { 301 | if (op & SLJIT_SET_C) 302 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 303 | /* dst may be the same as src1 or src2. */ 304 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(src1) | T(src2) | D(dst), DR(dst))); 305 | } 306 | if (op & SLJIT_SET_C) 307 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 308 | 309 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDU, ADDU) | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); 310 | if (!(op & SLJIT_SET_C)) 311 | return SLJIT_SUCCESS; 312 | 313 | /* Set ULESS_FLAG (dst == 0) && (ULESS_FLAG == 1). */ 314 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); 315 | /* Set carry flag. */ 316 | return push_inst(compiler, OR | SA(ULESS_FLAG) | TA(OVERFLOW_FLAG) | DA(ULESS_FLAG), ULESS_FLAG); 317 | 318 | case SLJIT_SUB: 319 | if ((flags & SRC2_IMM) && ((op & (SLJIT_SET_U | SLJIT_SET_S)) || src2 == SIMM_MIN)) { 320 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); 321 | src2 = TMP_REG2; 322 | flags &= ~SRC2_IMM; 323 | } 324 | 325 | if (flags & SRC2_IMM) { 326 | if (op & SLJIT_SET_O) { 327 | if (src2 >= 0) 328 | FAIL_IF(push_inst(compiler, OR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 329 | else 330 | FAIL_IF(push_inst(compiler, NOR | S(src1) | T(src1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 331 | } 332 | if (op & SLJIT_SET_E) 333 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); 334 | if (op & (SLJIT_SET_C | SLJIT_SET_O)) 335 | FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); 336 | /* dst may be the same as src1 or src2. */ 337 | if (CHECK_FLAGS(SLJIT_SET_E)) 338 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); 339 | } 340 | else { 341 | if (op & SLJIT_SET_O) 342 | FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 343 | if (op & SLJIT_SET_E) 344 | FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); 345 | if (op & (SLJIT_SET_U | SLJIT_SET_C | SLJIT_SET_O)) 346 | FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); 347 | if (op & SLJIT_SET_U) 348 | FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(UGREATER_FLAG), UGREATER_FLAG)); 349 | if (op & SLJIT_SET_S) { 350 | FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(LESS_FLAG), LESS_FLAG)); 351 | FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(GREATER_FLAG), GREATER_FLAG)); 352 | } 353 | /* dst may be the same as src1 or src2. */ 354 | if (CHECK_FLAGS(SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_C)) 355 | FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); 356 | } 357 | 358 | if (!(op & SLJIT_SET_O)) 359 | return SLJIT_SUCCESS; 360 | FAIL_IF(push_inst(compiler, SELECT_OP(DSLL32, SLL) | TA(ULESS_FLAG) | D(TMP_REG1) | SH_IMM(31), DR(TMP_REG1))); 361 | FAIL_IF(push_inst(compiler, XOR | S(TMP_REG1) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 362 | FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 363 | return push_inst(compiler, SELECT_OP(DSRL32, SRL) | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG); 364 | 365 | case SLJIT_SUBC: 366 | if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { 367 | FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); 368 | src2 = TMP_REG2; 369 | flags &= ~SRC2_IMM; 370 | } 371 | 372 | if (flags & SRC2_IMM) { 373 | if (op & SLJIT_SET_C) 374 | FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(OVERFLOW_FLAG) | IMM(src2), OVERFLOW_FLAG)); 375 | /* dst may be the same as src1 or src2. */ 376 | FAIL_IF(push_inst(compiler, SELECT_OP(DADDIU, ADDIU) | S(src1) | T(dst) | IMM(-src2), DR(dst))); 377 | } 378 | else { 379 | if (op & SLJIT_SET_C) 380 | FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); 381 | /* dst may be the same as src1 or src2. */ 382 | FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(src1) | T(src2) | D(dst), DR(dst))); 383 | } 384 | 385 | if (op & SLJIT_SET_C) 386 | FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(LESS_FLAG), LESS_FLAG)); 387 | 388 | FAIL_IF(push_inst(compiler, SELECT_OP(DSUBU, SUBU) | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); 389 | return (op & SLJIT_SET_C) ? push_inst(compiler, OR | SA(OVERFLOW_FLAG) | TA(LESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG) : SLJIT_SUCCESS; 390 | 391 | case SLJIT_MUL: 392 | SLJIT_ASSERT(!(flags & SRC2_IMM)); 393 | if (!(op & SLJIT_SET_O)) { 394 | #if (defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1) 395 | if (op & SLJIT_INT_OP) 396 | return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); 397 | FAIL_IF(push_inst(compiler, DMULT | S(src1) | T(src2), MOVABLE_INS)); 398 | return push_inst(compiler, MFLO | D(dst), DR(dst)); 399 | #else 400 | FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); 401 | return push_inst(compiler, MFLO | D(dst), DR(dst)); 402 | #endif 403 | } 404 | FAIL_IF(push_inst(compiler, SELECT_OP(DMULT, MULT) | S(src1) | T(src2), MOVABLE_INS)); 405 | FAIL_IF(push_inst(compiler, MFHI | DA(ULESS_FLAG), ULESS_FLAG)); 406 | FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); 407 | FAIL_IF(push_inst(compiler, SELECT_OP(DSRA32, SRA) | T(dst) | DA(UGREATER_FLAG) | SH_IMM(31), UGREATER_FLAG)); 408 | return push_inst(compiler, SELECT_OP(DSUBU, SUBU) | SA(ULESS_FLAG) | TA(UGREATER_FLAG) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); 409 | 410 | case SLJIT_AND: 411 | EMIT_LOGICAL(ANDI, AND); 412 | return SLJIT_SUCCESS; 413 | 414 | case SLJIT_OR: 415 | EMIT_LOGICAL(ORI, OR); 416 | return SLJIT_SUCCESS; 417 | 418 | case SLJIT_XOR: 419 | EMIT_LOGICAL(XORI, XOR); 420 | return SLJIT_SUCCESS; 421 | 422 | case SLJIT_SHL: 423 | EMIT_SHIFT(DSLL, DSLL32, SLL, DSLLV, SLLV); 424 | return SLJIT_SUCCESS; 425 | 426 | case SLJIT_LSHR: 427 | EMIT_SHIFT(DSRL, DSRL32, SRL, DSRLV, SRLV); 428 | return SLJIT_SUCCESS; 429 | 430 | case SLJIT_ASHR: 431 | EMIT_SHIFT(DSRA, DSRA32, SRA, DSRAV, SRAV); 432 | return SLJIT_SUCCESS; 433 | } 434 | 435 | SLJIT_ASSERT_STOP(); 436 | return SLJIT_SUCCESS; 437 | } 438 | 439 | static __inline int emit_const(struct sljit_compiler *compiler, int dst, long init_value) 440 | { 441 | FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 48), DR(dst))); 442 | FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 32), DR(dst))); 443 | FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst))); 444 | FAIL_IF(push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value >> 16), DR(dst))); 445 | FAIL_IF(push_inst(compiler, DSLL | T(dst) | D(dst) | SH_IMM(16), DR(dst))); 446 | return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst)); 447 | } 448 | 449 | void sljit_set_jump_addr(unsigned long addr, unsigned long new_addr) 450 | { 451 | sljit_ins *inst = (sljit_ins*)addr; 452 | 453 | inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 48) & 0xffff); 454 | inst[1] = (inst[1] & 0xffff0000) | ((new_addr >> 32) & 0xffff); 455 | inst[3] = (inst[3] & 0xffff0000) | ((new_addr >> 16) & 0xffff); 456 | inst[5] = (inst[5] & 0xffff0000) | (new_addr & 0xffff); 457 | SLJIT_CACHE_FLUSH(inst, inst + 6); 458 | } 459 | 460 | void sljit_set_const(unsigned long addr, long new_constant) 461 | { 462 | sljit_ins *inst = (sljit_ins*)addr; 463 | 464 | inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 48) & 0xffff); 465 | inst[1] = (inst[1] & 0xffff0000) | ((new_constant >> 32) & 0xffff); 466 | inst[3] = (inst[3] & 0xffff0000) | ((new_constant >> 16) & 0xffff); 467 | inst[5] = (inst[5] & 0xffff0000) | (new_constant & 0xffff); 468 | SLJIT_CACHE_FLUSH(inst, inst + 6); 469 | } 470 | -------------------------------------------------------------------------------- /sljit_src/sljitNativePPC_32.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* ppc 32-bit arch dependent functions. */ 28 | 29 | static int load_immediate(struct sljit_compiler *compiler, int reg, long imm) 30 | { 31 | if (imm <= SIMM_MAX && imm >= SIMM_MIN) 32 | return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm)); 33 | 34 | if (!(imm & ~0xffff)) 35 | return push_inst(compiler, ORI | S(TMP_ZERO) | A(reg) | IMM(imm)); 36 | 37 | FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 16))); 38 | return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; 39 | } 40 | 41 | #define INS_CLEAR_LEFT(dst, src, from) \ 42 | (RLWINM | S(src) | A(dst) | ((from) << 6) | (31 << 1)) 43 | 44 | static __inline int emit_single_op(struct sljit_compiler *compiler, int op, int flags, 45 | int dst, int src1, int src2) 46 | { 47 | switch (op) { 48 | case SLJIT_MOV: 49 | case SLJIT_MOV_UI: 50 | case SLJIT_MOV_SI: 51 | case SLJIT_MOV_P: 52 | SLJIT_ASSERT(src1 == TMP_REG1); 53 | if (dst != src2) 54 | return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); 55 | return SLJIT_SUCCESS; 56 | 57 | case SLJIT_MOV_UB: 58 | case SLJIT_MOV_SB: 59 | SLJIT_ASSERT(src1 == TMP_REG1); 60 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 61 | if (op == SLJIT_MOV_SB) 62 | return push_inst(compiler, EXTSB | S(src2) | A(dst)); 63 | return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); 64 | } 65 | else if ((flags & REG_DEST) && op == SLJIT_MOV_SB) 66 | return push_inst(compiler, EXTSB | S(src2) | A(dst)); 67 | else { 68 | SLJIT_ASSERT(dst == src2); 69 | } 70 | return SLJIT_SUCCESS; 71 | 72 | case SLJIT_MOV_UH: 73 | case SLJIT_MOV_SH: 74 | SLJIT_ASSERT(src1 == TMP_REG1); 75 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 76 | if (op == SLJIT_MOV_SH) 77 | return push_inst(compiler, EXTSH | S(src2) | A(dst)); 78 | return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); 79 | } 80 | else { 81 | SLJIT_ASSERT(dst == src2); 82 | } 83 | return SLJIT_SUCCESS; 84 | 85 | case SLJIT_NOT: 86 | SLJIT_ASSERT(src1 == TMP_REG1); 87 | return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2)); 88 | 89 | case SLJIT_NEG: 90 | SLJIT_ASSERT(src1 == TMP_REG1); 91 | return push_inst(compiler, NEG | OERC(flags) | D(dst) | A(src2)); 92 | 93 | case SLJIT_CLZ: 94 | SLJIT_ASSERT(src1 == TMP_REG1); 95 | return push_inst(compiler, CNTLZW | RC(flags) | S(src2) | A(dst)); 96 | 97 | case SLJIT_ADD: 98 | if (flags & ALT_FORM1) { 99 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 100 | SLJIT_ASSERT(src2 == TMP_REG2); 101 | return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); 102 | } 103 | if (flags & ALT_FORM2) { 104 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 105 | SLJIT_ASSERT(src2 == TMP_REG2); 106 | return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); 107 | } 108 | if (flags & ALT_FORM3) { 109 | SLJIT_ASSERT(src2 == TMP_REG2); 110 | return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); 111 | } 112 | if (flags & ALT_FORM4) { 113 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 114 | FAIL_IF(push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff))); 115 | return push_inst(compiler, ADDIS | D(dst) | A(dst) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))); 116 | } 117 | if (!(flags & ALT_SET_FLAGS)) 118 | return push_inst(compiler, ADD | D(dst) | A(src1) | B(src2)); 119 | return push_inst(compiler, ADDC | OERC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); 120 | 121 | case SLJIT_ADDC: 122 | if (flags & ALT_FORM1) { 123 | FAIL_IF(push_inst(compiler, MFXER | D(0))); 124 | FAIL_IF(push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2))); 125 | return push_inst(compiler, MTXER | S(0)); 126 | } 127 | return push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2)); 128 | 129 | case SLJIT_SUB: 130 | if (flags & ALT_FORM1) { 131 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 132 | SLJIT_ASSERT(src2 == TMP_REG2); 133 | return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm); 134 | } 135 | if (flags & (ALT_FORM2 | ALT_FORM3)) { 136 | SLJIT_ASSERT(src2 == TMP_REG2); 137 | if (flags & ALT_FORM2) 138 | FAIL_IF(push_inst(compiler, CMPI | CRD(0) | A(src1) | compiler->imm)); 139 | if (flags & ALT_FORM3) 140 | return push_inst(compiler, CMPLI | CRD(4) | A(src1) | compiler->imm); 141 | return SLJIT_SUCCESS; 142 | } 143 | if (flags & (ALT_FORM4 | ALT_FORM5)) { 144 | if (flags & ALT_FORM4) 145 | FAIL_IF(push_inst(compiler, CMPL | CRD(4) | A(src1) | B(src2))); 146 | if (flags & ALT_FORM5) 147 | FAIL_IF(push_inst(compiler, CMP | CRD(0) | A(src1) | B(src2))); 148 | return SLJIT_SUCCESS; 149 | } 150 | if (!(flags & ALT_SET_FLAGS)) 151 | return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); 152 | if (flags & ALT_FORM6) 153 | FAIL_IF(push_inst(compiler, CMPL | CRD(4) | A(src1) | B(src2))); 154 | return push_inst(compiler, SUBFC | OERC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); 155 | 156 | case SLJIT_SUBC: 157 | if (flags & ALT_FORM1) { 158 | FAIL_IF(push_inst(compiler, MFXER | D(0))); 159 | FAIL_IF(push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1))); 160 | return push_inst(compiler, MTXER | S(0)); 161 | } 162 | return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1)); 163 | 164 | case SLJIT_MUL: 165 | if (flags & ALT_FORM1) { 166 | SLJIT_ASSERT(src2 == TMP_REG2); 167 | return push_inst(compiler, MULLI | D(dst) | A(src1) | compiler->imm); 168 | } 169 | return push_inst(compiler, MULLW | OERC(flags) | D(dst) | A(src2) | B(src1)); 170 | 171 | case SLJIT_AND: 172 | if (flags & ALT_FORM1) { 173 | SLJIT_ASSERT(src2 == TMP_REG2); 174 | return push_inst(compiler, ANDI | S(src1) | A(dst) | compiler->imm); 175 | } 176 | if (flags & ALT_FORM2) { 177 | SLJIT_ASSERT(src2 == TMP_REG2); 178 | return push_inst(compiler, ANDIS | S(src1) | A(dst) | compiler->imm); 179 | } 180 | return push_inst(compiler, AND | RC(flags) | S(src1) | A(dst) | B(src2)); 181 | 182 | case SLJIT_OR: 183 | if (flags & ALT_FORM1) { 184 | SLJIT_ASSERT(src2 == TMP_REG2); 185 | return push_inst(compiler, ORI | S(src1) | A(dst) | compiler->imm); 186 | } 187 | if (flags & ALT_FORM2) { 188 | SLJIT_ASSERT(src2 == TMP_REG2); 189 | return push_inst(compiler, ORIS | S(src1) | A(dst) | compiler->imm); 190 | } 191 | if (flags & ALT_FORM3) { 192 | SLJIT_ASSERT(src2 == TMP_REG2); 193 | FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm))); 194 | return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); 195 | } 196 | return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2)); 197 | 198 | case SLJIT_XOR: 199 | if (flags & ALT_FORM1) { 200 | SLJIT_ASSERT(src2 == TMP_REG2); 201 | return push_inst(compiler, XORI | S(src1) | A(dst) | compiler->imm); 202 | } 203 | if (flags & ALT_FORM2) { 204 | SLJIT_ASSERT(src2 == TMP_REG2); 205 | return push_inst(compiler, XORIS | S(src1) | A(dst) | compiler->imm); 206 | } 207 | if (flags & ALT_FORM3) { 208 | SLJIT_ASSERT(src2 == TMP_REG2); 209 | FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm))); 210 | return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); 211 | } 212 | return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2)); 213 | 214 | case SLJIT_SHL: 215 | if (flags & ALT_FORM1) { 216 | SLJIT_ASSERT(src2 == TMP_REG2); 217 | compiler->imm &= 0x1f; 218 | return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); 219 | } 220 | return push_inst(compiler, SLW | RC(flags) | S(src1) | A(dst) | B(src2)); 221 | 222 | case SLJIT_LSHR: 223 | if (flags & ALT_FORM1) { 224 | SLJIT_ASSERT(src2 == TMP_REG2); 225 | compiler->imm &= 0x1f; 226 | return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); 227 | } 228 | return push_inst(compiler, SRW | RC(flags) | S(src1) | A(dst) | B(src2)); 229 | 230 | case SLJIT_ASHR: 231 | if (flags & ALT_FORM3) 232 | FAIL_IF(push_inst(compiler, MFXER | D(0))); 233 | if (flags & ALT_FORM1) { 234 | SLJIT_ASSERT(src2 == TMP_REG2); 235 | compiler->imm &= 0x1f; 236 | FAIL_IF(push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11))); 237 | } 238 | else 239 | FAIL_IF(push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2))); 240 | return (flags & ALT_FORM3) ? push_inst(compiler, MTXER | S(0)) : SLJIT_SUCCESS; 241 | } 242 | 243 | SLJIT_ASSERT_STOP(); 244 | return SLJIT_SUCCESS; 245 | } 246 | 247 | static __inline int emit_const(struct sljit_compiler *compiler, int reg, long init_value) 248 | { 249 | FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 16))); 250 | return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); 251 | } 252 | 253 | void sljit_set_jump_addr(unsigned long addr, unsigned long new_addr) 254 | { 255 | sljit_ins *inst = (sljit_ins*)addr; 256 | 257 | inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 16) & 0xffff); 258 | inst[1] = (inst[1] & 0xffff0000) | (new_addr & 0xffff); 259 | SLJIT_CACHE_FLUSH(inst, inst + 2); 260 | } 261 | 262 | void sljit_set_const(unsigned long addr, long new_constant) 263 | { 264 | sljit_ins *inst = (sljit_ins*)addr; 265 | 266 | inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 16) & 0xffff); 267 | inst[1] = (inst[1] & 0xffff0000) | (new_constant & 0xffff); 268 | SLJIT_CACHE_FLUSH(inst, inst + 2); 269 | } 270 | -------------------------------------------------------------------------------- /sljit_src/sljitNativePPC_64.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* ppc 64-bit arch dependent functions. */ 28 | 29 | #if defined(__GNUC__) || (defined(__IBM_GCC_ASM) && __IBM_GCC_ASM) 30 | #define ASM_SLJIT_CLZ(src, dst) \ 31 | __asm__ volatile ( "cntlzd %0, %1" : "=r"(dst) : "r"(src) ) 32 | #elif defined(__xlc__) 33 | #error "Please enable GCC syntax for inline assembly statements" 34 | #else 35 | #error "Must implement count leading zeroes" 36 | #endif 37 | 38 | #define RLDI(dst, src, sh, mb, type) \ 39 | (HI(30) | S(src) | A(dst) | ((type) << 2) | (((sh) & 0x1f) << 11) | (((sh) & 0x20) >> 4) | (((mb) & 0x1f) << 6) | ((mb) & 0x20)) 40 | 41 | #define PUSH_RLDICR(reg, shift) \ 42 | push_inst(compiler, RLDI(reg, reg, 63 - shift, shift, 1)) 43 | 44 | static int load_immediate(struct sljit_compiler *compiler, int reg, long imm) 45 | { 46 | unsigned long tmp; 47 | unsigned long shift; 48 | unsigned long tmp2; 49 | unsigned long shift2; 50 | 51 | if (imm <= SIMM_MAX && imm >= SIMM_MIN) 52 | return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm)); 53 | 54 | if (!(imm & ~0xffff)) 55 | return push_inst(compiler, ORI | S(TMP_ZERO) | A(reg) | IMM(imm)); 56 | 57 | if (imm <= 0x7fffffffl && imm >= -0x80000000l) { 58 | FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 16))); 59 | return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; 60 | } 61 | 62 | /* Count leading zeroes. */ 63 | tmp = (imm >= 0) ? imm : ~imm; 64 | ASM_SLJIT_CLZ(tmp, shift); 65 | SLJIT_ASSERT(shift > 0); 66 | shift--; 67 | tmp = (imm << shift); 68 | 69 | if ((tmp & ~0xffff000000000000ul) == 0) { 70 | FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); 71 | shift += 15; 72 | return PUSH_RLDICR(reg, shift); 73 | } 74 | 75 | if ((tmp & ~0xffffffff00000000ul) == 0) { 76 | FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(tmp >> 48))); 77 | FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp >> 32))); 78 | shift += 31; 79 | return PUSH_RLDICR(reg, shift); 80 | } 81 | 82 | /* Cut out the 16 bit from immediate. */ 83 | shift += 15; 84 | tmp2 = imm & ((1ul << (63 - shift)) - 1); 85 | 86 | if (tmp2 <= 0xffff) { 87 | FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); 88 | FAIL_IF(PUSH_RLDICR(reg, shift)); 89 | return push_inst(compiler, ORI | S(reg) | A(reg) | tmp2); 90 | } 91 | 92 | if (tmp2 <= 0xffffffff) { 93 | FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); 94 | FAIL_IF(PUSH_RLDICR(reg, shift)); 95 | FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | (tmp2 >> 16))); 96 | return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp2)) : SLJIT_SUCCESS; 97 | } 98 | 99 | ASM_SLJIT_CLZ(tmp2, shift2); 100 | tmp2 <<= shift2; 101 | 102 | if ((tmp2 & ~0xffff000000000000ul) == 0) { 103 | FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); 104 | shift2 += 15; 105 | shift += (63 - shift2); 106 | FAIL_IF(PUSH_RLDICR(reg, shift)); 107 | FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | (tmp2 >> 48))); 108 | return PUSH_RLDICR(reg, shift2); 109 | } 110 | 111 | /* The general version. */ 112 | FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 48))); 113 | FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm >> 32))); 114 | FAIL_IF(PUSH_RLDICR(reg, 31)); 115 | FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(imm >> 16))); 116 | return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)); 117 | } 118 | 119 | /* Simplified mnemonics: clrldi. */ 120 | #define INS_CLEAR_LEFT(dst, src, from) \ 121 | (RLDICL | S(src) | A(dst) | ((from) << 6) | (1 << 5)) 122 | 123 | /* Sign extension for integer operations. */ 124 | #define UN_EXTS() \ 125 | if ((flags & (ALT_SIGN_EXT | REG2_SOURCE)) == (ALT_SIGN_EXT | REG2_SOURCE)) { \ 126 | FAIL_IF(push_inst(compiler, EXTSW | S(src2) | A(TMP_REG2))); \ 127 | src2 = TMP_REG2; \ 128 | } 129 | 130 | #define BIN_EXTS() \ 131 | if (flags & ALT_SIGN_EXT) { \ 132 | if (flags & REG1_SOURCE) { \ 133 | FAIL_IF(push_inst(compiler, EXTSW | S(src1) | A(TMP_REG1))); \ 134 | src1 = TMP_REG1; \ 135 | } \ 136 | if (flags & REG2_SOURCE) { \ 137 | FAIL_IF(push_inst(compiler, EXTSW | S(src2) | A(TMP_REG2))); \ 138 | src2 = TMP_REG2; \ 139 | } \ 140 | } 141 | 142 | #define BIN_IMM_EXTS() \ 143 | if ((flags & (ALT_SIGN_EXT | REG1_SOURCE)) == (ALT_SIGN_EXT | REG1_SOURCE)) { \ 144 | FAIL_IF(push_inst(compiler, EXTSW | S(src1) | A(TMP_REG1))); \ 145 | src1 = TMP_REG1; \ 146 | } 147 | 148 | static __inline int emit_single_op(struct sljit_compiler *compiler, int op, int flags, 149 | int dst, int src1, int src2) 150 | { 151 | switch (op) { 152 | case SLJIT_MOV: 153 | case SLJIT_MOV_P: 154 | SLJIT_ASSERT(src1 == TMP_REG1); 155 | if (dst != src2) 156 | return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); 157 | return SLJIT_SUCCESS; 158 | 159 | case SLJIT_MOV_UI: 160 | case SLJIT_MOV_SI: 161 | SLJIT_ASSERT(src1 == TMP_REG1); 162 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 163 | if (op == SLJIT_MOV_SI) 164 | return push_inst(compiler, EXTSW | S(src2) | A(dst)); 165 | return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0)); 166 | } 167 | else { 168 | SLJIT_ASSERT(dst == src2); 169 | } 170 | return SLJIT_SUCCESS; 171 | 172 | case SLJIT_MOV_UB: 173 | case SLJIT_MOV_SB: 174 | SLJIT_ASSERT(src1 == TMP_REG1); 175 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 176 | if (op == SLJIT_MOV_SB) 177 | return push_inst(compiler, EXTSB | S(src2) | A(dst)); 178 | return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); 179 | } 180 | else if ((flags & REG_DEST) && op == SLJIT_MOV_SB) 181 | return push_inst(compiler, EXTSB | S(src2) | A(dst)); 182 | else { 183 | SLJIT_ASSERT(dst == src2); 184 | } 185 | return SLJIT_SUCCESS; 186 | 187 | case SLJIT_MOV_UH: 188 | case SLJIT_MOV_SH: 189 | SLJIT_ASSERT(src1 == TMP_REG1); 190 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 191 | if (op == SLJIT_MOV_SH) 192 | return push_inst(compiler, EXTSH | S(src2) | A(dst)); 193 | return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); 194 | } 195 | else { 196 | SLJIT_ASSERT(dst == src2); 197 | } 198 | return SLJIT_SUCCESS; 199 | 200 | case SLJIT_NOT: 201 | SLJIT_ASSERT(src1 == TMP_REG1); 202 | UN_EXTS(); 203 | return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2)); 204 | 205 | case SLJIT_NEG: 206 | SLJIT_ASSERT(src1 == TMP_REG1); 207 | UN_EXTS(); 208 | return push_inst(compiler, NEG | OERC(flags) | D(dst) | A(src2)); 209 | 210 | case SLJIT_CLZ: 211 | SLJIT_ASSERT(src1 == TMP_REG1); 212 | if (flags & ALT_FORM1) 213 | return push_inst(compiler, CNTLZW | RC(flags) | S(src2) | A(dst)); 214 | return push_inst(compiler, CNTLZD | RC(flags) | S(src2) | A(dst)); 215 | 216 | case SLJIT_ADD: 217 | if (flags & ALT_FORM1) { 218 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 219 | SLJIT_ASSERT(src2 == TMP_REG2); 220 | return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); 221 | } 222 | if (flags & ALT_FORM2) { 223 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 224 | SLJIT_ASSERT(src2 == TMP_REG2); 225 | return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); 226 | } 227 | if (flags & ALT_FORM3) { 228 | SLJIT_ASSERT(src2 == TMP_REG2); 229 | BIN_IMM_EXTS(); 230 | return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); 231 | } 232 | if (flags & ALT_FORM4) { 233 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 234 | FAIL_IF(push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff))); 235 | return push_inst(compiler, ADDIS | D(dst) | A(dst) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))); 236 | } 237 | if (!(flags & ALT_SET_FLAGS)) 238 | return push_inst(compiler, ADD | D(dst) | A(src1) | B(src2)); 239 | BIN_EXTS(); 240 | return push_inst(compiler, ADDC | OERC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); 241 | 242 | case SLJIT_ADDC: 243 | if (flags & ALT_FORM1) { 244 | FAIL_IF(push_inst(compiler, MFXER | D(0))); 245 | FAIL_IF(push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2))); 246 | return push_inst(compiler, MTXER | S(0)); 247 | } 248 | BIN_EXTS(); 249 | return push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2)); 250 | 251 | case SLJIT_SUB: 252 | if (flags & ALT_FORM1) { 253 | /* Flags does not set: BIN_IMM_EXTS unnecessary. */ 254 | SLJIT_ASSERT(src2 == TMP_REG2); 255 | return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm); 256 | } 257 | if (flags & (ALT_FORM2 | ALT_FORM3)) { 258 | SLJIT_ASSERT(src2 == TMP_REG2); 259 | if (flags & ALT_FORM2) 260 | FAIL_IF(push_inst(compiler, CMPI | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm)); 261 | if (flags & ALT_FORM3) 262 | return push_inst(compiler, CMPLI | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm); 263 | return SLJIT_SUCCESS; 264 | } 265 | if (flags & (ALT_FORM4 | ALT_FORM5)) { 266 | if (flags & ALT_FORM4) 267 | FAIL_IF(push_inst(compiler, CMPL | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); 268 | if (flags & ALT_FORM5) 269 | return push_inst(compiler, CMP | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2)); 270 | return SLJIT_SUCCESS; 271 | } 272 | if (!(flags & ALT_SET_FLAGS)) 273 | return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); 274 | BIN_EXTS(); 275 | if (flags & ALT_FORM6) 276 | FAIL_IF(push_inst(compiler, CMPL | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); 277 | return push_inst(compiler, SUBFC | OERC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); 278 | 279 | case SLJIT_SUBC: 280 | if (flags & ALT_FORM1) { 281 | FAIL_IF(push_inst(compiler, MFXER | D(0))); 282 | FAIL_IF(push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1))); 283 | return push_inst(compiler, MTXER | S(0)); 284 | } 285 | BIN_EXTS(); 286 | return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1)); 287 | 288 | case SLJIT_MUL: 289 | if (flags & ALT_FORM1) { 290 | SLJIT_ASSERT(src2 == TMP_REG2); 291 | return push_inst(compiler, MULLI | D(dst) | A(src1) | compiler->imm); 292 | } 293 | BIN_EXTS(); 294 | if (flags & ALT_FORM2) 295 | return push_inst(compiler, MULLW | OERC(flags) | D(dst) | A(src2) | B(src1)); 296 | return push_inst(compiler, MULLD | OERC(flags) | D(dst) | A(src2) | B(src1)); 297 | 298 | case SLJIT_AND: 299 | if (flags & ALT_FORM1) { 300 | SLJIT_ASSERT(src2 == TMP_REG2); 301 | return push_inst(compiler, ANDI | S(src1) | A(dst) | compiler->imm); 302 | } 303 | if (flags & ALT_FORM2) { 304 | SLJIT_ASSERT(src2 == TMP_REG2); 305 | return push_inst(compiler, ANDIS | S(src1) | A(dst) | compiler->imm); 306 | } 307 | return push_inst(compiler, AND | RC(flags) | S(src1) | A(dst) | B(src2)); 308 | 309 | case SLJIT_OR: 310 | if (flags & ALT_FORM1) { 311 | SLJIT_ASSERT(src2 == TMP_REG2); 312 | return push_inst(compiler, ORI | S(src1) | A(dst) | compiler->imm); 313 | } 314 | if (flags & ALT_FORM2) { 315 | SLJIT_ASSERT(src2 == TMP_REG2); 316 | return push_inst(compiler, ORIS | S(src1) | A(dst) | compiler->imm); 317 | } 318 | if (flags & ALT_FORM3) { 319 | SLJIT_ASSERT(src2 == TMP_REG2); 320 | FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm))); 321 | return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); 322 | } 323 | return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2)); 324 | 325 | case SLJIT_XOR: 326 | if (flags & ALT_FORM1) { 327 | SLJIT_ASSERT(src2 == TMP_REG2); 328 | return push_inst(compiler, XORI | S(src1) | A(dst) | compiler->imm); 329 | } 330 | if (flags & ALT_FORM2) { 331 | SLJIT_ASSERT(src2 == TMP_REG2); 332 | return push_inst(compiler, XORIS | S(src1) | A(dst) | compiler->imm); 333 | } 334 | if (flags & ALT_FORM3) { 335 | SLJIT_ASSERT(src2 == TMP_REG2); 336 | FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm))); 337 | return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); 338 | } 339 | return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2)); 340 | 341 | case SLJIT_SHL: 342 | if (flags & ALT_FORM1) { 343 | SLJIT_ASSERT(src2 == TMP_REG2); 344 | if (flags & ALT_FORM2) { 345 | compiler->imm &= 0x1f; 346 | return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); 347 | } 348 | else { 349 | compiler->imm &= 0x3f; 350 | return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags)); 351 | } 352 | } 353 | return push_inst(compiler, ((flags & ALT_FORM2) ? SLW : SLD) | RC(flags) | S(src1) | A(dst) | B(src2)); 354 | 355 | case SLJIT_LSHR: 356 | if (flags & ALT_FORM1) { 357 | SLJIT_ASSERT(src2 == TMP_REG2); 358 | if (flags & ALT_FORM2) { 359 | compiler->imm &= 0x1f; 360 | return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); 361 | } 362 | else { 363 | compiler->imm &= 0x3f; 364 | return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags)); 365 | } 366 | } 367 | return push_inst(compiler, ((flags & ALT_FORM2) ? SRW : SRD) | RC(flags) | S(src1) | A(dst) | B(src2)); 368 | 369 | case SLJIT_ASHR: 370 | if (flags & ALT_FORM3) 371 | FAIL_IF(push_inst(compiler, MFXER | D(0))); 372 | if (flags & ALT_FORM1) { 373 | SLJIT_ASSERT(src2 == TMP_REG2); 374 | if (flags & ALT_FORM2) { 375 | compiler->imm &= 0x1f; 376 | FAIL_IF(push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11))); 377 | } 378 | else { 379 | compiler->imm &= 0x3f; 380 | FAIL_IF(push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4))); 381 | } 382 | } 383 | else 384 | FAIL_IF(push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2))); 385 | return (flags & ALT_FORM3) ? push_inst(compiler, MTXER | S(0)) : SLJIT_SUCCESS; 386 | } 387 | 388 | SLJIT_ASSERT_STOP(); 389 | return SLJIT_SUCCESS; 390 | } 391 | 392 | static __inline int emit_const(struct sljit_compiler *compiler, int reg, long init_value) 393 | { 394 | FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48))); 395 | FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value >> 32))); 396 | FAIL_IF(PUSH_RLDICR(reg, 31)); 397 | FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(init_value >> 16))); 398 | return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); 399 | } 400 | 401 | void sljit_set_jump_addr(unsigned long addr, unsigned long new_addr) 402 | { 403 | sljit_ins *inst = (sljit_ins*)addr; 404 | 405 | inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 48) & 0xffff); 406 | inst[1] = (inst[1] & 0xffff0000) | ((new_addr >> 32) & 0xffff); 407 | inst[3] = (inst[3] & 0xffff0000) | ((new_addr >> 16) & 0xffff); 408 | inst[4] = (inst[4] & 0xffff0000) | (new_addr & 0xffff); 409 | SLJIT_CACHE_FLUSH(inst, inst + 5); 410 | } 411 | 412 | void sljit_set_const(unsigned long addr, long new_constant) 413 | { 414 | sljit_ins *inst = (sljit_ins*)addr; 415 | 416 | inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 48) & 0xffff); 417 | inst[1] = (inst[1] & 0xffff0000) | ((new_constant >> 32) & 0xffff); 418 | inst[3] = (inst[3] & 0xffff0000) | ((new_constant >> 16) & 0xffff); 419 | inst[4] = (inst[4] & 0xffff0000) | (new_constant & 0xffff); 420 | SLJIT_CACHE_FLUSH(inst, inst + 5); 421 | } 422 | -------------------------------------------------------------------------------- /sljit_src/sljitNativeSPARC_32.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | static int load_immediate(struct sljit_compiler *compiler, int dst, long imm) 28 | { 29 | if (imm <= SIMM_MAX && imm >= SIMM_MIN) 30 | return push_inst(compiler, OR | D(dst) | S1(0) | IMM(imm), DR(dst)); 31 | 32 | FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((imm >> 10) & 0x3fffff), DR(dst))); 33 | return (imm & 0x3ff) ? push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (imm & 0x3ff), DR(dst)) : SLJIT_SUCCESS; 34 | } 35 | 36 | #define ARG2(flags, src2) ((flags & SRC2_IMM) ? IMM(src2) : S2(src2)) 37 | 38 | static __inline int emit_single_op(struct sljit_compiler *compiler, int op, int flags, 39 | int dst, int src1, long src2) 40 | { 41 | SLJIT_COMPILE_ASSERT(ICC_IS_SET == SET_FLAGS, icc_is_set_and_set_flags_must_be_the_same); 42 | 43 | switch (op) { 44 | case SLJIT_MOV: 45 | case SLJIT_MOV_UI: 46 | case SLJIT_MOV_SI: 47 | case SLJIT_MOV_P: 48 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 49 | if (dst != src2) 50 | return push_inst(compiler, OR | D(dst) | S1(0) | S2(src2), DR(dst)); 51 | return SLJIT_SUCCESS; 52 | 53 | case SLJIT_MOV_UB: 54 | case SLJIT_MOV_SB: 55 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 56 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 57 | if (op == SLJIT_MOV_UB) 58 | return push_inst(compiler, AND | D(dst) | S1(src2) | IMM(0xff), DR(dst)); 59 | FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(24), DR(dst))); 60 | return push_inst(compiler, SRA | D(dst) | S1(dst) | IMM(24), DR(dst)); 61 | } 62 | else if (dst != src2) 63 | SLJIT_ASSERT_STOP(); 64 | return SLJIT_SUCCESS; 65 | 66 | case SLJIT_MOV_UH: 67 | case SLJIT_MOV_SH: 68 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 69 | if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { 70 | FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(16), DR(dst))); 71 | return push_inst(compiler, (op == SLJIT_MOV_SH ? SRA : SRL) | D(dst) | S1(dst) | IMM(16), DR(dst)); 72 | } 73 | else if (dst != src2) 74 | SLJIT_ASSERT_STOP(); 75 | return SLJIT_SUCCESS; 76 | 77 | case SLJIT_NOT: 78 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 79 | return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DR(dst) | (flags & SET_FLAGS)); 80 | 81 | case SLJIT_CLZ: 82 | SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); 83 | /* sparc 32 does not support SLJIT_KEEP_FLAGS. Not sure I can fix this. */ 84 | FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(src2) | S2(0), SET_FLAGS)); 85 | FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2(src2), DR(TMP_REG1))); 86 | FAIL_IF(push_inst(compiler, BICC | DA(0x1) | (7 & DISP_MASK), UNMOVABLE_INS)); 87 | FAIL_IF(push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(0) | IMM(32), UNMOVABLE_INS | (flags & SET_FLAGS))); 88 | FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(-1), DR(dst))); 89 | 90 | /* Loop. */ 91 | FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(0), SET_FLAGS)); 92 | FAIL_IF(push_inst(compiler, SLL | D(TMP_REG1) | S1(TMP_REG1) | IMM(1), DR(TMP_REG1))); 93 | FAIL_IF(push_inst(compiler, BICC | DA(0xe) | (-2 & DISP_MASK), UNMOVABLE_INS)); 94 | return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(dst) | IMM(1), UNMOVABLE_INS | (flags & SET_FLAGS)); 95 | 96 | case SLJIT_ADD: 97 | return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 98 | 99 | case SLJIT_ADDC: 100 | return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 101 | 102 | case SLJIT_SUB: 103 | return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 104 | 105 | case SLJIT_SUBC: 106 | return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 107 | 108 | case SLJIT_MUL: 109 | FAIL_IF(push_inst(compiler, SMUL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); 110 | if (!(flags & SET_FLAGS)) 111 | return SLJIT_SUCCESS; 112 | FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(dst) | IMM(31), DR(TMP_REG1))); 113 | FAIL_IF(push_inst(compiler, RDY | D(TMP_LINK), DR(TMP_LINK))); 114 | return push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(TMP_LINK), MOVABLE_INS | SET_FLAGS); 115 | 116 | case SLJIT_AND: 117 | return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 118 | 119 | case SLJIT_OR: 120 | return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 121 | 122 | case SLJIT_XOR: 123 | return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); 124 | 125 | case SLJIT_SHL: 126 | FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); 127 | return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); 128 | 129 | case SLJIT_LSHR: 130 | FAIL_IF(push_inst(compiler, SRL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); 131 | return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); 132 | 133 | case SLJIT_ASHR: 134 | FAIL_IF(push_inst(compiler, SRA | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); 135 | return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); 136 | } 137 | 138 | SLJIT_ASSERT_STOP(); 139 | return SLJIT_SUCCESS; 140 | } 141 | 142 | static __inline int emit_const(struct sljit_compiler *compiler, int dst, long init_value) 143 | { 144 | FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((init_value >> 10) & 0x3fffff), DR(dst))); 145 | return push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (init_value & 0x3ff), DR(dst)); 146 | } 147 | 148 | void sljit_set_jump_addr(unsigned long addr, unsigned long new_addr) 149 | { 150 | sljit_ins *inst = (sljit_ins*)addr; 151 | 152 | inst[0] = (inst[0] & 0xffc00000) | ((new_addr >> 10) & 0x3fffff); 153 | inst[1] = (inst[1] & 0xfffffc00) | (new_addr & 0x3ff); 154 | SLJIT_CACHE_FLUSH(inst, inst + 2); 155 | } 156 | 157 | void sljit_set_const(unsigned long addr, long new_constant) 158 | { 159 | sljit_ins *inst = (sljit_ins*)addr; 160 | 161 | inst[0] = (inst[0] & 0xffc00000) | ((new_constant >> 10) & 0x3fffff); 162 | inst[1] = (inst[1] & 0xfffffc00) | (new_constant & 0x3ff); 163 | SLJIT_CACHE_FLUSH(inst, inst + 2); 164 | } 165 | -------------------------------------------------------------------------------- /sljit_src/sljitNativeX86_32.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* x86 32-bit arch dependent functions. */ 28 | 29 | static int emit_do_imm(struct sljit_compiler *compiler, u_char opcode, long imm) 30 | { 31 | u_char *inst; 32 | 33 | inst = ensure_buf(compiler, 1 + 1 + sizeof(long)); 34 | FAIL_IF(!inst); 35 | INC_SIZE(1 + sizeof(long)); 36 | *inst++ = opcode; 37 | *(long*)inst = imm; 38 | return SLJIT_SUCCESS; 39 | } 40 | 41 | static u_char* generate_far_jump_code(struct sljit_jump *jump, u_char *code_ptr, int type) 42 | { 43 | if (type == SLJIT_JUMP) { 44 | *code_ptr++ = JMP_i32; 45 | jump->addr++; 46 | } 47 | else if (type >= SLJIT_FAST_CALL) { 48 | *code_ptr++ = CALL_i32; 49 | jump->addr++; 50 | } 51 | else { 52 | *code_ptr++ = GROUP_0F; 53 | *code_ptr++ = get_jump_code(type); 54 | jump->addr += 2; 55 | } 56 | 57 | if (jump->flags & JUMP_LABEL) 58 | jump->flags |= PATCH_MW; 59 | else 60 | *(long*)code_ptr = jump->u.target - (jump->addr + 4); 61 | code_ptr += 4; 62 | 63 | return code_ptr; 64 | } 65 | 66 | int sljit_emit_enter(struct sljit_compiler *compiler, 67 | int options, int args, int scratches, int saveds, 68 | int fscratches, int fsaveds, int local_size) 69 | { 70 | int size; 71 | u_char *inst; 72 | 73 | CHECK_ERROR(); 74 | CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size)); 75 | set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size); 76 | 77 | compiler->args = args; 78 | compiler->flags_saved = 0; 79 | 80 | size = 1 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3); 81 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 82 | size += (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0); 83 | #else 84 | size += (args > 0 ? (2 + args * 3) : 0); 85 | #endif 86 | inst = ensure_buf(compiler, 1 + size); 87 | FAIL_IF(!inst); 88 | 89 | INC_SIZE(size); 90 | PUSH_REG(reg_map[TMP_REG1]); 91 | #if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 92 | if (args > 0) { 93 | *inst++ = MOV_r_rm; 94 | *inst++ = MOD_REG | (reg_map[TMP_REG1] << 3) | 0x4 /* esp */; 95 | } 96 | #endif 97 | if (saveds > 2 || scratches > 7) 98 | PUSH_REG(reg_map[SLJIT_S2]); 99 | if (saveds > 1 || scratches > 8) 100 | PUSH_REG(reg_map[SLJIT_S1]); 101 | if (saveds > 0 || scratches > 9) 102 | PUSH_REG(reg_map[SLJIT_S0]); 103 | 104 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 105 | if (args > 0) { 106 | *inst++ = MOV_r_rm; 107 | *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | reg_map[SLJIT_R2]; 108 | } 109 | if (args > 1) { 110 | *inst++ = MOV_r_rm; 111 | *inst++ = MOD_REG | (reg_map[SLJIT_S1] << 3) | reg_map[SLJIT_R1]; 112 | } 113 | if (args > 2) { 114 | *inst++ = MOV_r_rm; 115 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */; 116 | *inst++ = 0x24; 117 | *inst++ = sizeof(long) * (3 + 2); /* saveds >= 3 as well. */ 118 | } 119 | #else 120 | if (args > 0) { 121 | *inst++ = MOV_r_rm; 122 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | reg_map[TMP_REG1]; 123 | *inst++ = sizeof(long) * 2; 124 | } 125 | if (args > 1) { 126 | *inst++ = MOV_r_rm; 127 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | reg_map[TMP_REG1]; 128 | *inst++ = sizeof(long) * 3; 129 | } 130 | if (args > 2) { 131 | *inst++ = MOV_r_rm; 132 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | reg_map[TMP_REG1]; 133 | *inst++ = sizeof(long) * 4; 134 | } 135 | #endif 136 | 137 | SLJIT_COMPILE_ASSERT(SLJIT_LOCALS_OFFSET >= (2 + 4) * sizeof(unsigned long), require_at_least_two_words); 138 | #if defined(__APPLE__) 139 | /* Ignore pushed regs and SLJIT_LOCALS_OFFSET when computing the aligned local size. */ 140 | saveds = (2 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(unsigned long); 141 | local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds; 142 | #else 143 | if (options & SLJIT_DOUBLE_ALIGNMENT) { 144 | local_size = SLJIT_LOCALS_OFFSET + ((local_size + 7) & ~7); 145 | 146 | inst = ensure_buf(compiler, 1 + 17); 147 | FAIL_IF(!inst); 148 | 149 | INC_SIZE(17); 150 | inst[0] = MOV_r_rm; 151 | inst[1] = MOD_REG | (reg_map[TMP_REG1] << 3) | reg_map[SLJIT_SP]; 152 | inst[2] = GROUP_F7; 153 | inst[3] = MOD_REG | (0 << 3) | reg_map[SLJIT_SP]; 154 | *(long*)(inst + 4) = 0x4; 155 | inst[8] = JNE_i8; 156 | inst[9] = 6; 157 | inst[10] = GROUP_BINARY_81; 158 | inst[11] = MOD_REG | (5 << 3) | reg_map[SLJIT_SP]; 159 | *(long*)(inst + 12) = 0x4; 160 | inst[16] = PUSH_r + reg_map[TMP_REG1]; 161 | } 162 | else 163 | local_size = SLJIT_LOCALS_OFFSET + ((local_size + 3) & ~3); 164 | #endif 165 | 166 | compiler->local_size = local_size; 167 | #ifdef _WIN32 168 | if (local_size > 1024) { 169 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 170 | FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size)); 171 | #else 172 | local_size -= SLJIT_LOCALS_OFFSET; 173 | FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size)); 174 | FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, 175 | SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, SLJIT_LOCALS_OFFSET)); 176 | #endif 177 | FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); 178 | } 179 | #endif 180 | 181 | SLJIT_ASSERT(local_size > 0); 182 | return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, 183 | SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size); 184 | } 185 | 186 | int sljit_set_context(struct sljit_compiler *compiler, 187 | int options, int args, int scratches, int saveds, 188 | int fscratches, int fsaveds, int local_size) 189 | { 190 | CHECK_ERROR(); 191 | CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size)); 192 | set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size); 193 | 194 | compiler->args = args; 195 | 196 | #if defined(__APPLE__) 197 | saveds = (2 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(unsigned long); 198 | compiler->local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds; 199 | #else 200 | if (options & SLJIT_DOUBLE_ALIGNMENT) 201 | compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + 7) & ~7); 202 | else 203 | compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + 3) & ~3); 204 | #endif 205 | return SLJIT_SUCCESS; 206 | } 207 | 208 | int sljit_emit_return(struct sljit_compiler *compiler, int op, int src, long srcw) 209 | { 210 | int size; 211 | u_char *inst; 212 | 213 | CHECK_ERROR(); 214 | CHECK(check_sljit_emit_return(compiler, op, src, srcw)); 215 | SLJIT_ASSERT(compiler->args >= 0); 216 | 217 | compiler->flags_saved = 0; 218 | FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); 219 | 220 | SLJIT_ASSERT(compiler->local_size > 0); 221 | FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, 222 | SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size)); 223 | 224 | #if !defined(__APPLE__) 225 | if (compiler->options & SLJIT_DOUBLE_ALIGNMENT) { 226 | inst = ensure_buf(compiler, 1 + 3); 227 | FAIL_IF(!inst); 228 | 229 | INC_SIZE(3); 230 | inst[0] = MOV_r_rm; 231 | inst[1] = (reg_map[SLJIT_SP] << 3) | 0x4 /* SIB */; 232 | inst[2] = (4 << 3) | reg_map[SLJIT_SP]; 233 | } 234 | #endif 235 | 236 | size = 2 + (compiler->scratches > 7 ? (compiler->scratches - 7) : 0) + 237 | (compiler->saveds <= 3 ? compiler->saveds : 3); 238 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 239 | if (compiler->args > 2) 240 | size += 2; 241 | #else 242 | if (compiler->args > 0) 243 | size += 2; 244 | #endif 245 | inst = ensure_buf(compiler, 1 + size); 246 | FAIL_IF(!inst); 247 | 248 | INC_SIZE(size); 249 | 250 | if (compiler->saveds > 0 || compiler->scratches > 9) 251 | POP_REG(reg_map[SLJIT_S0]); 252 | if (compiler->saveds > 1 || compiler->scratches > 8) 253 | POP_REG(reg_map[SLJIT_S1]); 254 | if (compiler->saveds > 2 || compiler->scratches > 7) 255 | POP_REG(reg_map[SLJIT_S2]); 256 | POP_REG(reg_map[TMP_REG1]); 257 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 258 | if (compiler->args > 2) 259 | RET_I16(sizeof(long)); 260 | else 261 | RET(); 262 | #else 263 | RET(); 264 | #endif 265 | 266 | return SLJIT_SUCCESS; 267 | } 268 | 269 | /* --------------------------------------------------------------------- */ 270 | /* Operators */ 271 | /* --------------------------------------------------------------------- */ 272 | 273 | /* Size contains the flags as well. */ 274 | static u_char* emit_x86_instruction(struct sljit_compiler *compiler, int size, 275 | /* The reg or immediate operand. */ 276 | int a, long imma, 277 | /* The general operand (not immediate). */ 278 | int b, long immb) 279 | { 280 | u_char *inst; 281 | u_char *buf_ptr; 282 | int flags = size & ~0xf; 283 | int inst_size; 284 | 285 | /* Both cannot be switched on. */ 286 | SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS)); 287 | /* Size flags not allowed for typed instructions. */ 288 | SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); 289 | /* Both size flags cannot be switched on. */ 290 | SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); 291 | /* SSE2 and immediate is not possible. */ 292 | SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); 293 | SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) 294 | && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) 295 | && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); 296 | 297 | size &= 0xf; 298 | inst_size = size; 299 | 300 | if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) 301 | inst_size++; 302 | if (flags & EX86_PREF_66) 303 | inst_size++; 304 | 305 | /* Calculate size of b. */ 306 | inst_size += 1; /* mod r/m byte. */ 307 | if (b & SLJIT_MEM) { 308 | if ((b & REG_MASK) == SLJIT_UNUSED) 309 | inst_size += sizeof(long); 310 | else if (immb != 0 && !(b & OFFS_REG_MASK)) { 311 | /* Immediate operand. */ 312 | if (immb <= 127 && immb >= -128) 313 | inst_size += sizeof(s_char); 314 | else 315 | inst_size += sizeof(long); 316 | } 317 | 318 | if ((b & REG_MASK) == SLJIT_SP && !(b & OFFS_REG_MASK)) 319 | b |= TO_OFFS_REG(SLJIT_SP); 320 | 321 | if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) 322 | inst_size += 1; /* SIB byte. */ 323 | } 324 | 325 | /* Calculate size of a. */ 326 | if (a & SLJIT_IMM) { 327 | if (flags & EX86_BIN_INS) { 328 | if (imma <= 127 && imma >= -128) { 329 | inst_size += 1; 330 | flags |= EX86_BYTE_ARG; 331 | } else 332 | inst_size += 4; 333 | } 334 | else if (flags & EX86_SHIFT_INS) { 335 | imma &= 0x1f; 336 | if (imma != 1) { 337 | inst_size ++; 338 | flags |= EX86_BYTE_ARG; 339 | } 340 | } else if (flags & EX86_BYTE_ARG) 341 | inst_size++; 342 | else if (flags & EX86_HALF_ARG) 343 | inst_size += sizeof(short); 344 | else 345 | inst_size += sizeof(long); 346 | } 347 | else 348 | SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); 349 | 350 | inst = ensure_buf(compiler, 1 + inst_size); 351 | if (!inst) 352 | return NULL; 353 | 354 | /* Encoding the byte. */ 355 | INC_SIZE(inst_size); 356 | if (flags & EX86_PREF_F2) 357 | *inst++ = 0xf2; 358 | if (flags & EX86_PREF_F3) 359 | *inst++ = 0xf3; 360 | if (flags & EX86_PREF_66) 361 | *inst++ = 0x66; 362 | 363 | buf_ptr = inst + size; 364 | 365 | /* Encode mod/rm byte. */ 366 | if (!(flags & EX86_SHIFT_INS)) { 367 | if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) 368 | *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; 369 | 370 | if ((a & SLJIT_IMM) || (a == 0)) 371 | *buf_ptr = 0; 372 | else if (!(flags & EX86_SSE2_OP1)) 373 | *buf_ptr = reg_map[a] << 3; 374 | else 375 | *buf_ptr = a << 3; 376 | } 377 | else { 378 | if (a & SLJIT_IMM) { 379 | if (imma == 1) 380 | *inst = GROUP_SHIFT_1; 381 | else 382 | *inst = GROUP_SHIFT_N; 383 | } else 384 | *inst = GROUP_SHIFT_CL; 385 | *buf_ptr = 0; 386 | } 387 | 388 | if (!(b & SLJIT_MEM)) 389 | *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_map[b] : b); 390 | else if ((b & REG_MASK) != SLJIT_UNUSED) { 391 | if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { 392 | if (immb != 0) { 393 | if (immb <= 127 && immb >= -128) 394 | *buf_ptr |= 0x40; 395 | else 396 | *buf_ptr |= 0x80; 397 | } 398 | 399 | if ((b & OFFS_REG_MASK) == SLJIT_UNUSED) 400 | *buf_ptr++ |= reg_map[b & REG_MASK]; 401 | else { 402 | *buf_ptr++ |= 0x04; 403 | *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3); 404 | } 405 | 406 | if (immb != 0) { 407 | if (immb <= 127 && immb >= -128) 408 | *buf_ptr++ = immb; /* 8 bit displacement. */ 409 | else { 410 | *(long*)buf_ptr = immb; /* 32 bit displacement. */ 411 | buf_ptr += sizeof(long); 412 | } 413 | } 414 | } 415 | else { 416 | *buf_ptr++ |= 0x04; 417 | *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3) | (immb << 6); 418 | } 419 | } 420 | else { 421 | *buf_ptr++ |= 0x05; 422 | *(long*)buf_ptr = immb; /* 32 bit displacement. */ 423 | buf_ptr += sizeof(long); 424 | } 425 | 426 | if (a & SLJIT_IMM) { 427 | if (flags & EX86_BYTE_ARG) 428 | *buf_ptr = imma; 429 | else if (flags & EX86_HALF_ARG) 430 | *(short*)buf_ptr = imma; 431 | else if (!(flags & EX86_SHIFT_INS)) 432 | *(long*)buf_ptr = imma; 433 | } 434 | 435 | return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); 436 | } 437 | 438 | /* --------------------------------------------------------------------- */ 439 | /* Call / return instructions */ 440 | /* --------------------------------------------------------------------- */ 441 | 442 | static __inline int call_with_args(struct sljit_compiler *compiler, int type) 443 | { 444 | u_char *inst; 445 | 446 | #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 447 | inst = ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2); 448 | FAIL_IF(!inst); 449 | INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2); 450 | 451 | if (type >= SLJIT_CALL3) 452 | PUSH_REG(reg_map[SLJIT_R2]); 453 | *inst++ = MOV_r_rm; 454 | *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0]; 455 | #else 456 | inst = ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0)); 457 | FAIL_IF(!inst); 458 | INC_SIZE(4 * (type - SLJIT_CALL0)); 459 | 460 | *inst++ = MOV_rm_r; 461 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_R0] << 3) | 0x4 /* SIB */; 462 | *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP]; 463 | *inst++ = 0; 464 | if (type >= SLJIT_CALL2) { 465 | *inst++ = MOV_rm_r; 466 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_R1] << 3) | 0x4 /* SIB */; 467 | *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP]; 468 | *inst++ = sizeof(long); 469 | } 470 | if (type >= SLJIT_CALL3) { 471 | *inst++ = MOV_rm_r; 472 | *inst++ = MOD_DISP8 | (reg_map[SLJIT_R2] << 3) | 0x4 /* SIB */; 473 | *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP]; 474 | *inst++ = 2 * sizeof(long); 475 | } 476 | #endif 477 | return SLJIT_SUCCESS; 478 | } 479 | 480 | int sljit_emit_fast_enter(struct sljit_compiler *compiler, int dst, long dstw) 481 | { 482 | u_char *inst; 483 | 484 | CHECK_ERROR(); 485 | CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); 486 | ADJUST_LOCAL_OFFSET(dst, dstw); 487 | 488 | CHECK_EXTRA_REGS(dst, dstw, (void)0); 489 | 490 | /* For UNUSED dst. Uncommon, but possible. */ 491 | if (dst == SLJIT_UNUSED) 492 | dst = TMP_REG1; 493 | 494 | if (FAST_IS_REG(dst)) { 495 | /* Unused dest is possible here. */ 496 | inst = ensure_buf(compiler, 1 + 1); 497 | FAIL_IF(!inst); 498 | 499 | INC_SIZE(1); 500 | POP_REG(reg_map[dst]); 501 | return SLJIT_SUCCESS; 502 | } 503 | 504 | /* Memory. */ 505 | inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); 506 | FAIL_IF(!inst); 507 | *inst++ = POP_rm; 508 | return SLJIT_SUCCESS; 509 | } 510 | 511 | int sljit_emit_fast_return(struct sljit_compiler *compiler, int src, long srcw) 512 | { 513 | u_char *inst; 514 | 515 | CHECK_ERROR(); 516 | CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); 517 | ADJUST_LOCAL_OFFSET(src, srcw); 518 | 519 | CHECK_EXTRA_REGS(src, srcw, (void)0); 520 | 521 | if (FAST_IS_REG(src)) { 522 | inst = ensure_buf(compiler, 1 + 1 + 1); 523 | FAIL_IF(!inst); 524 | 525 | INC_SIZE(1 + 1); 526 | PUSH_REG(reg_map[src]); 527 | } 528 | else if (src & SLJIT_MEM) { 529 | inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); 530 | FAIL_IF(!inst); 531 | *inst++ = GROUP_FF; 532 | *inst |= PUSH_rm; 533 | 534 | inst = ensure_buf(compiler, 1 + 1); 535 | FAIL_IF(!inst); 536 | INC_SIZE(1); 537 | } 538 | else { 539 | /* SLJIT_IMM. */ 540 | inst = ensure_buf(compiler, 1 + 5 + 1); 541 | FAIL_IF(!inst); 542 | 543 | INC_SIZE(5 + 1); 544 | *inst++ = PUSH_i32; 545 | *(long*)inst = srcw; 546 | inst += sizeof(long); 547 | } 548 | 549 | RET(); 550 | return SLJIT_SUCCESS; 551 | } 552 | -------------------------------------------------------------------------------- /sljit_src/sljitNativeX86_64.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* x86 64-bit arch dependent functions. */ 28 | 29 | static int emit_load_imm64(struct sljit_compiler *compiler, int reg, long imm) 30 | { 31 | u_char *inst; 32 | 33 | inst = ensure_buf(compiler, 1 + 2 + sizeof(long)); 34 | FAIL_IF(!inst); 35 | INC_SIZE(2 + sizeof(long)); 36 | *inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B); 37 | *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7); 38 | *(long*)inst = imm; 39 | return SLJIT_SUCCESS; 40 | } 41 | 42 | static u_char* generate_far_jump_code(struct sljit_jump *jump, u_char *code_ptr, int type) 43 | { 44 | if (type < SLJIT_JUMP) { 45 | /* Invert type. */ 46 | *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10; 47 | *code_ptr++ = 10 + 3; 48 | } 49 | 50 | SLJIT_COMPILE_ASSERT(reg_map[TMP_REG3] == 9, tmp3_is_9_first); 51 | *code_ptr++ = REX_W | REX_B; 52 | *code_ptr++ = MOV_r_i32 + 1; 53 | jump->addr = (unsigned long)code_ptr; 54 | 55 | if (jump->flags & JUMP_LABEL) 56 | jump->flags |= PATCH_MD; 57 | else 58 | *(long*)code_ptr = jump->u.target; 59 | 60 | code_ptr += sizeof(long); 61 | *code_ptr++ = REX_B; 62 | *code_ptr++ = GROUP_FF; 63 | *code_ptr++ = (type >= SLJIT_FAST_CALL) ? (MOD_REG | CALL_rm | 1) : (MOD_REG | JMP_rm | 1); 64 | 65 | return code_ptr; 66 | } 67 | 68 | static u_char* generate_fixed_jump(u_char *code_ptr, long addr, int type) 69 | { 70 | long delta = addr - ((long)code_ptr + 1 + sizeof(int)); 71 | 72 | if (delta <= HALFWORD_MAX && delta >= HALFWORD_MIN) { 73 | *code_ptr++ = (type == 2) ? CALL_i32 : JMP_i32; 74 | *(long*)code_ptr = delta; 75 | } 76 | else { 77 | SLJIT_COMPILE_ASSERT(reg_map[TMP_REG3] == 9, tmp3_is_9_second); 78 | *code_ptr++ = REX_W | REX_B; 79 | *code_ptr++ = MOV_r_i32 + 1; 80 | *(long*)code_ptr = addr; 81 | code_ptr += sizeof(long); 82 | *code_ptr++ = REX_B; 83 | *code_ptr++ = GROUP_FF; 84 | *code_ptr++ = (type == 2) ? (MOD_REG | CALL_rm | 1) : (MOD_REG | JMP_rm | 1); 85 | } 86 | 87 | return code_ptr; 88 | } 89 | 90 | int sljit_emit_enter(struct sljit_compiler *compiler, 91 | int options, int args, int scratches, int saveds, 92 | int fscratches, int fsaveds, int local_size) 93 | { 94 | int i, tmp, size, saved_reg_size; 95 | u_char *inst; 96 | 97 | CHECK_ERROR(); 98 | CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size)); 99 | set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size); 100 | 101 | compiler->flags_saved = 0; 102 | 103 | /* Including the return address saved by the call instruction. */ 104 | saved_reg_size = GET_SAVED_REGS_SIZE(scratches, saveds, 1); 105 | 106 | tmp = saveds < SLJIT_NUM_SAVED_REGS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG; 107 | for (i = SLJIT_S0; i >= tmp; i--) { 108 | size = reg_map[i] >= 8 ? 2 : 1; 109 | inst = ensure_buf(compiler, 1 + size); 110 | FAIL_IF(!inst); 111 | INC_SIZE(size); 112 | if (reg_map[i] >= 8) 113 | *inst++ = REX_B; 114 | PUSH_REG(reg_lmap[i]); 115 | } 116 | 117 | for (i = scratches; i >= SLJIT_FIRST_SAVED_REG; i--) { 118 | size = reg_map[i] >= 8 ? 2 : 1; 119 | inst = ensure_buf(compiler, 1 + size); 120 | FAIL_IF(!inst); 121 | INC_SIZE(size); 122 | if (reg_map[i] >= 8) 123 | *inst++ = REX_B; 124 | PUSH_REG(reg_lmap[i]); 125 | } 126 | 127 | if (args > 0) { 128 | size = args * 3; 129 | inst = ensure_buf(compiler, 1 + size); 130 | FAIL_IF(!inst); 131 | 132 | INC_SIZE(size); 133 | 134 | #ifndef _WIN64 135 | if (args > 0) { 136 | *inst++ = REX_W; 137 | *inst++ = MOV_r_rm; 138 | *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* rdi */; 139 | } 140 | if (args > 1) { 141 | *inst++ = REX_W | REX_R; 142 | *inst++ = MOV_r_rm; 143 | *inst++ = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* rsi */; 144 | } 145 | if (args > 2) { 146 | *inst++ = REX_W | REX_R; 147 | *inst++ = MOV_r_rm; 148 | *inst++ = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* rdx */; 149 | } 150 | #else 151 | if (args > 0) { 152 | *inst++ = REX_W; 153 | *inst++ = MOV_r_rm; 154 | *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* rcx */; 155 | } 156 | if (args > 1) { 157 | *inst++ = REX_W; 158 | *inst++ = MOV_r_rm; 159 | *inst++ = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* rdx */; 160 | } 161 | if (args > 2) { 162 | *inst++ = REX_W | REX_B; 163 | *inst++ = MOV_r_rm; 164 | *inst++ = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* r8 */; 165 | } 166 | #endif 167 | } 168 | 169 | local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_reg_size + 15) & ~15) - saved_reg_size; 170 | compiler->local_size = local_size; 171 | 172 | #ifdef _WIN64 173 | if (local_size > 1024) { 174 | /* Allocate stack for the callback, which grows the stack. */ 175 | inst = ensure_buf(compiler, 1 + 4 + (3 + sizeof(int))); 176 | FAIL_IF(!inst); 177 | INC_SIZE(4 + (3 + sizeof(int))); 178 | *inst++ = REX_W; 179 | *inst++ = GROUP_BINARY_83; 180 | *inst++ = MOD_REG | SUB | 4; 181 | /* Allocated size for regs must be divisible by 8. */ 182 | SLJIT_ASSERT(!(saved_reg_size & 0x7)); 183 | /* Aligned to 16 byte. */ 184 | if (saved_reg_size & 0x8) { 185 | *inst++ = 5 * sizeof(long); 186 | local_size -= 5 * sizeof(long); 187 | } else { 188 | *inst++ = 4 * sizeof(long); 189 | local_size -= 4 * sizeof(long); 190 | } 191 | /* Second instruction */ 192 | SLJIT_COMPILE_ASSERT(reg_map[SLJIT_R0] < 8, temporary_reg1_is_loreg); 193 | *inst++ = REX_W; 194 | *inst++ = MOV_rm_i32; 195 | *inst++ = MOD_REG | reg_lmap[SLJIT_R0]; 196 | *(int*)inst = local_size; 197 | #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ 198 | || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) 199 | compiler->skip_checks = 1; 200 | #endif 201 | FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); 202 | } 203 | #endif 204 | 205 | SLJIT_ASSERT(local_size > 0); 206 | if (local_size <= 127) { 207 | inst = ensure_buf(compiler, 1 + 4); 208 | FAIL_IF(!inst); 209 | INC_SIZE(4); 210 | *inst++ = REX_W; 211 | *inst++ = GROUP_BINARY_83; 212 | *inst++ = MOD_REG | SUB | 4; 213 | *inst++ = local_size; 214 | } 215 | else { 216 | inst = ensure_buf(compiler, 1 + 7); 217 | FAIL_IF(!inst); 218 | INC_SIZE(7); 219 | *inst++ = REX_W; 220 | *inst++ = GROUP_BINARY_81; 221 | *inst++ = MOD_REG | SUB | 4; 222 | *(int*)inst = local_size; 223 | inst += sizeof(int); 224 | } 225 | 226 | #ifdef _WIN64 227 | /* Save xmm6 reg: movaps [rsp + 0x20], xmm6 */ 228 | if (fscratches >= 6 || fsaveds >= 1) { 229 | inst = ensure_buf(compiler, 1 + 5); 230 | FAIL_IF(!inst); 231 | INC_SIZE(5); 232 | *inst++ = GROUP_0F; 233 | *(int*)inst = 0x20247429; 234 | } 235 | #endif 236 | 237 | return SLJIT_SUCCESS; 238 | } 239 | 240 | int sljit_set_context(struct sljit_compiler *compiler, 241 | int options, int args, int scratches, int saveds, 242 | int fscratches, int fsaveds, int local_size) 243 | { 244 | int saved_reg_size; 245 | 246 | CHECK_ERROR(); 247 | CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size)); 248 | set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size); 249 | 250 | /* Including the return address saved by the call instruction. */ 251 | saved_reg_size = GET_SAVED_REGS_SIZE(scratches, saveds, 1); 252 | compiler->local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_reg_size + 15) & ~15) - saved_reg_size; 253 | return SLJIT_SUCCESS; 254 | } 255 | 256 | int sljit_emit_return(struct sljit_compiler *compiler, int op, int src, long srcw) 257 | { 258 | int i, tmp, size; 259 | u_char *inst; 260 | 261 | CHECK_ERROR(); 262 | CHECK(check_sljit_emit_return(compiler, op, src, srcw)); 263 | 264 | compiler->flags_saved = 0; 265 | FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); 266 | 267 | #ifdef _WIN64 268 | /* Restore xmm6 reg: movaps xmm6, [rsp + 0x20] */ 269 | if (compiler->fscratches >= 6 || compiler->fsaveds >= 1) { 270 | inst = ensure_buf(compiler, 1 + 5); 271 | FAIL_IF(!inst); 272 | INC_SIZE(5); 273 | *inst++ = GROUP_0F; 274 | *(int*)inst = 0x20247428; 275 | } 276 | #endif 277 | 278 | SLJIT_ASSERT(compiler->local_size > 0); 279 | if (compiler->local_size <= 127) { 280 | inst = ensure_buf(compiler, 1 + 4); 281 | FAIL_IF(!inst); 282 | INC_SIZE(4); 283 | *inst++ = REX_W; 284 | *inst++ = GROUP_BINARY_83; 285 | *inst++ = MOD_REG | ADD | 4; 286 | *inst = compiler->local_size; 287 | } 288 | else { 289 | inst = ensure_buf(compiler, 1 + 7); 290 | FAIL_IF(!inst); 291 | INC_SIZE(7); 292 | *inst++ = REX_W; 293 | *inst++ = GROUP_BINARY_81; 294 | *inst++ = MOD_REG | ADD | 4; 295 | *(int*)inst = compiler->local_size; 296 | } 297 | 298 | tmp = compiler->scratches; 299 | for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) { 300 | size = reg_map[i] >= 8 ? 2 : 1; 301 | inst = ensure_buf(compiler, 1 + size); 302 | FAIL_IF(!inst); 303 | INC_SIZE(size); 304 | if (reg_map[i] >= 8) 305 | *inst++ = REX_B; 306 | POP_REG(reg_lmap[i]); 307 | } 308 | 309 | tmp = compiler->saveds < SLJIT_NUM_SAVED_REGS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG; 310 | for (i = tmp; i <= SLJIT_S0; i++) { 311 | size = reg_map[i] >= 8 ? 2 : 1; 312 | inst = ensure_buf(compiler, 1 + size); 313 | FAIL_IF(!inst); 314 | INC_SIZE(size); 315 | if (reg_map[i] >= 8) 316 | *inst++ = REX_B; 317 | POP_REG(reg_lmap[i]); 318 | } 319 | 320 | inst = ensure_buf(compiler, 1 + 1); 321 | FAIL_IF(!inst); 322 | INC_SIZE(1); 323 | RET(); 324 | return SLJIT_SUCCESS; 325 | } 326 | 327 | /* --------------------------------------------------------------------- */ 328 | /* Operators */ 329 | /* --------------------------------------------------------------------- */ 330 | 331 | static int emit_do_imm32(struct sljit_compiler *compiler, u_char rex, u_char opcode, long imm) 332 | { 333 | u_char *inst; 334 | int length = 1 + (rex ? 1 : 0) + sizeof(int); 335 | 336 | inst = ensure_buf(compiler, 1 + length); 337 | FAIL_IF(!inst); 338 | INC_SIZE(length); 339 | if (rex) 340 | *inst++ = rex; 341 | *inst++ = opcode; 342 | *(int*)inst = imm; 343 | return SLJIT_SUCCESS; 344 | } 345 | 346 | static u_char* emit_x86_instruction(struct sljit_compiler *compiler, int size, 347 | /* The reg or immediate operand. */ 348 | int a, long imma, 349 | /* The general operand (not immediate). */ 350 | int b, long immb) 351 | { 352 | u_char *inst; 353 | u_char *buf_ptr; 354 | u_char rex = 0; 355 | int flags = size & ~0xf; 356 | int inst_size; 357 | 358 | /* The immediate operand must be 32 bit. */ 359 | SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma)); 360 | /* Both cannot be switched on. */ 361 | SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS)); 362 | /* Size flags not allowed for typed instructions. */ 363 | SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); 364 | /* Both size flags cannot be switched on. */ 365 | SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); 366 | /* SSE2 and immediate is not possible. */ 367 | SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); 368 | SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) 369 | && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) 370 | && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); 371 | 372 | size &= 0xf; 373 | inst_size = size; 374 | 375 | if (!compiler->mode32 && !(flags & EX86_NO_REXW)) 376 | rex |= REX_W; 377 | else if (flags & EX86_REX) 378 | rex |= REX; 379 | 380 | if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) 381 | inst_size++; 382 | if (flags & EX86_PREF_66) 383 | inst_size++; 384 | 385 | /* Calculate size of b. */ 386 | inst_size += 1; /* mod r/m byte. */ 387 | if (b & SLJIT_MEM) { 388 | if (!(b & OFFS_REG_MASK)) { 389 | if (NOT_HALFWORD(immb)) { 390 | if (emit_load_imm64(compiler, TMP_REG3, immb)) 391 | return NULL; 392 | immb = 0; 393 | if (b & REG_MASK) 394 | b |= TO_OFFS_REG(TMP_REG3); 395 | else 396 | b |= TMP_REG3; 397 | } 398 | else if (reg_lmap[b & REG_MASK] == 4) 399 | b |= TO_OFFS_REG(SLJIT_SP); 400 | } 401 | 402 | if ((b & REG_MASK) == SLJIT_UNUSED) 403 | inst_size += 1 + sizeof(int); /* SIB byte required to avoid RIP based addressing. */ 404 | else { 405 | if (reg_map[b & REG_MASK] >= 8) 406 | rex |= REX_B; 407 | 408 | if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) { 409 | /* Immediate operand. */ 410 | if (immb <= 127 && immb >= -128) 411 | inst_size += sizeof(s_char); 412 | else 413 | inst_size += sizeof(int); 414 | } 415 | else if (reg_lmap[b & REG_MASK] == 5) 416 | inst_size += sizeof(s_char); 417 | 418 | if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) { 419 | inst_size += 1; /* SIB byte. */ 420 | if (reg_map[OFFS_REG(b)] >= 8) 421 | rex |= REX_X; 422 | } 423 | } 424 | } 425 | else if (!(flags & EX86_SSE2_OP2) && reg_map[b] >= 8) 426 | rex |= REX_B; 427 | 428 | if (a & SLJIT_IMM) { 429 | if (flags & EX86_BIN_INS) { 430 | if (imma <= 127 && imma >= -128) { 431 | inst_size += 1; 432 | flags |= EX86_BYTE_ARG; 433 | } else 434 | inst_size += 4; 435 | } 436 | else if (flags & EX86_SHIFT_INS) { 437 | imma &= compiler->mode32 ? 0x1f : 0x3f; 438 | if (imma != 1) { 439 | inst_size ++; 440 | flags |= EX86_BYTE_ARG; 441 | } 442 | } else if (flags & EX86_BYTE_ARG) 443 | inst_size++; 444 | else if (flags & EX86_HALF_ARG) 445 | inst_size += sizeof(short); 446 | else 447 | inst_size += sizeof(int); 448 | } 449 | else { 450 | SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); 451 | /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */ 452 | if (!(flags & EX86_SSE2_OP1) && reg_map[a] >= 8) 453 | rex |= REX_R; 454 | } 455 | 456 | if (rex) 457 | inst_size++; 458 | 459 | inst = ensure_buf(compiler, 1 + inst_size); 460 | if (!inst) 461 | return NULL; 462 | 463 | /* Encoding the byte. */ 464 | INC_SIZE(inst_size); 465 | if (flags & EX86_PREF_F2) 466 | *inst++ = 0xf2; 467 | if (flags & EX86_PREF_F3) 468 | *inst++ = 0xf3; 469 | if (flags & EX86_PREF_66) 470 | *inst++ = 0x66; 471 | if (rex) 472 | *inst++ = rex; 473 | buf_ptr = inst + size; 474 | 475 | /* Encode mod/rm byte. */ 476 | if (!(flags & EX86_SHIFT_INS)) { 477 | if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) 478 | *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; 479 | 480 | if ((a & SLJIT_IMM) || (a == 0)) 481 | *buf_ptr = 0; 482 | else if (!(flags & EX86_SSE2_OP1)) 483 | *buf_ptr = reg_lmap[a] << 3; 484 | else 485 | *buf_ptr = a << 3; 486 | } 487 | else { 488 | if (a & SLJIT_IMM) { 489 | if (imma == 1) 490 | *inst = GROUP_SHIFT_1; 491 | else 492 | *inst = GROUP_SHIFT_N; 493 | } else 494 | *inst = GROUP_SHIFT_CL; 495 | *buf_ptr = 0; 496 | } 497 | 498 | if (!(b & SLJIT_MEM)) 499 | *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : b); 500 | else if ((b & REG_MASK) != SLJIT_UNUSED) { 501 | if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { 502 | if (immb != 0 || reg_lmap[b & REG_MASK] == 5) { 503 | if (immb <= 127 && immb >= -128) 504 | *buf_ptr |= 0x40; 505 | else 506 | *buf_ptr |= 0x80; 507 | } 508 | 509 | if ((b & OFFS_REG_MASK) == SLJIT_UNUSED) 510 | *buf_ptr++ |= reg_lmap[b & REG_MASK]; 511 | else { 512 | *buf_ptr++ |= 0x04; 513 | *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3); 514 | } 515 | 516 | if (immb != 0 || reg_lmap[b & REG_MASK] == 5) { 517 | if (immb <= 127 && immb >= -128) 518 | *buf_ptr++ = immb; /* 8 bit displacement. */ 519 | else { 520 | *(int*)buf_ptr = immb; /* 32 bit displacement. */ 521 | buf_ptr += sizeof(int); 522 | } 523 | } 524 | } 525 | else { 526 | if (reg_lmap[b & REG_MASK] == 5) 527 | *buf_ptr |= 0x40; 528 | *buf_ptr++ |= 0x04; 529 | *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6); 530 | if (reg_lmap[b & REG_MASK] == 5) 531 | *buf_ptr++ = 0; 532 | } 533 | } 534 | else { 535 | *buf_ptr++ |= 0x04; 536 | *buf_ptr++ = 0x25; 537 | *(int*)buf_ptr = immb; /* 32 bit displacement. */ 538 | buf_ptr += sizeof(int); 539 | } 540 | 541 | if (a & SLJIT_IMM) { 542 | if (flags & EX86_BYTE_ARG) 543 | *buf_ptr = imma; 544 | else if (flags & EX86_HALF_ARG) 545 | *(short*)buf_ptr = imma; 546 | else if (!(flags & EX86_SHIFT_INS)) 547 | *(int*)buf_ptr = imma; 548 | } 549 | 550 | return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); 551 | } 552 | 553 | /* --------------------------------------------------------------------- */ 554 | /* Call / return instructions */ 555 | /* --------------------------------------------------------------------- */ 556 | 557 | static __inline int call_with_args(struct sljit_compiler *compiler, int type) 558 | { 559 | u_char *inst; 560 | 561 | #ifndef _WIN64 562 | SLJIT_COMPILE_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R0] < 8 && reg_map[SLJIT_R2] < 8, args_regs); 563 | 564 | inst = ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6)); 565 | FAIL_IF(!inst); 566 | INC_SIZE((type < SLJIT_CALL3) ? 3 : 6); 567 | if (type >= SLJIT_CALL3) { 568 | *inst++ = REX_W; 569 | *inst++ = MOV_r_rm; 570 | *inst++ = MOD_REG | (0x2 /* rdx */ << 3) | reg_lmap[SLJIT_R2]; 571 | } 572 | *inst++ = REX_W; 573 | *inst++ = MOV_r_rm; 574 | *inst++ = MOD_REG | (0x7 /* rdi */ << 3) | reg_lmap[SLJIT_R0]; 575 | #else 576 | SLJIT_COMPILE_ASSERT(reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R0] < 8 && reg_map[SLJIT_R2] < 8, args_regs); 577 | 578 | inst = ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6)); 579 | FAIL_IF(!inst); 580 | INC_SIZE((type < SLJIT_CALL3) ? 3 : 6); 581 | if (type >= SLJIT_CALL3) { 582 | *inst++ = REX_W | REX_R; 583 | *inst++ = MOV_r_rm; 584 | *inst++ = MOD_REG | (0x0 /* r8 */ << 3) | reg_lmap[SLJIT_R2]; 585 | } 586 | *inst++ = REX_W; 587 | *inst++ = MOV_r_rm; 588 | *inst++ = MOD_REG | (0x1 /* rcx */ << 3) | reg_lmap[SLJIT_R0]; 589 | #endif 590 | return SLJIT_SUCCESS; 591 | } 592 | 593 | int sljit_emit_fast_enter(struct sljit_compiler *compiler, int dst, long dstw) 594 | { 595 | u_char *inst; 596 | 597 | CHECK_ERROR(); 598 | CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); 599 | ADJUST_LOCAL_OFFSET(dst, dstw); 600 | 601 | /* For UNUSED dst. Uncommon, but possible. */ 602 | if (dst == SLJIT_UNUSED) 603 | dst = TMP_REG1; 604 | 605 | if (FAST_IS_REG(dst)) { 606 | if (reg_map[dst] < 8) { 607 | inst = ensure_buf(compiler, 1 + 1); 608 | FAIL_IF(!inst); 609 | INC_SIZE(1); 610 | POP_REG(reg_lmap[dst]); 611 | return SLJIT_SUCCESS; 612 | } 613 | 614 | inst = ensure_buf(compiler, 1 + 2); 615 | FAIL_IF(!inst); 616 | INC_SIZE(2); 617 | *inst++ = REX_B; 618 | POP_REG(reg_lmap[dst]); 619 | return SLJIT_SUCCESS; 620 | } 621 | 622 | /* REX_W is not necessary (src is not immediate). */ 623 | compiler->mode32 = 1; 624 | inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); 625 | FAIL_IF(!inst); 626 | *inst++ = POP_rm; 627 | return SLJIT_SUCCESS; 628 | } 629 | 630 | int sljit_emit_fast_return(struct sljit_compiler *compiler, int src, long srcw) 631 | { 632 | u_char *inst; 633 | 634 | CHECK_ERROR(); 635 | CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); 636 | ADJUST_LOCAL_OFFSET(src, srcw); 637 | 638 | if ((src & SLJIT_IMM) && NOT_HALFWORD(srcw)) { 639 | FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw)); 640 | src = TMP_REG1; 641 | } 642 | 643 | if (FAST_IS_REG(src)) { 644 | if (reg_map[src] < 8) { 645 | inst = ensure_buf(compiler, 1 + 1 + 1); 646 | FAIL_IF(!inst); 647 | 648 | INC_SIZE(1 + 1); 649 | PUSH_REG(reg_lmap[src]); 650 | } 651 | else { 652 | inst = ensure_buf(compiler, 1 + 2 + 1); 653 | FAIL_IF(!inst); 654 | 655 | INC_SIZE(2 + 1); 656 | *inst++ = REX_B; 657 | PUSH_REG(reg_lmap[src]); 658 | } 659 | } 660 | else if (src & SLJIT_MEM) { 661 | /* REX_W is not necessary (src is not immediate). */ 662 | compiler->mode32 = 1; 663 | inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); 664 | FAIL_IF(!inst); 665 | *inst++ = GROUP_FF; 666 | *inst |= PUSH_rm; 667 | 668 | inst = ensure_buf(compiler, 1 + 1); 669 | FAIL_IF(!inst); 670 | INC_SIZE(1); 671 | } 672 | else { 673 | SLJIT_ASSERT(IS_HALFWORD(srcw)); 674 | /* SLJIT_IMM. */ 675 | inst = ensure_buf(compiler, 1 + 5 + 1); 676 | FAIL_IF(!inst); 677 | 678 | INC_SIZE(5 + 1); 679 | *inst++ = PUSH_i32; 680 | *(int*)inst = srcw; 681 | inst += sizeof(int); 682 | } 683 | 684 | RET(); 685 | return SLJIT_SUCCESS; 686 | } 687 | 688 | 689 | /* --------------------------------------------------------------------- */ 690 | /* Extend input */ 691 | /* --------------------------------------------------------------------- */ 692 | 693 | static int emit_mov_int(struct sljit_compiler *compiler, int sign, 694 | int dst, long dstw, 695 | int src, long srcw) 696 | { 697 | u_char* inst; 698 | int dst_r; 699 | 700 | compiler->mode32 = 0; 701 | 702 | if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) 703 | return SLJIT_SUCCESS; /* Empty instruction. */ 704 | 705 | if (src & SLJIT_IMM) { 706 | if (FAST_IS_REG(dst)) { 707 | if (sign || ((unsigned long)srcw <= 0x7fffffff)) { 708 | inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (long)(int)srcw, dst, dstw); 709 | FAIL_IF(!inst); 710 | *inst = MOV_rm_i32; 711 | return SLJIT_SUCCESS; 712 | } 713 | return emit_load_imm64(compiler, dst, srcw); 714 | } 715 | compiler->mode32 = 1; 716 | inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (long)(int)srcw, dst, dstw); 717 | FAIL_IF(!inst); 718 | *inst = MOV_rm_i32; 719 | compiler->mode32 = 0; 720 | return SLJIT_SUCCESS; 721 | } 722 | 723 | dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; 724 | 725 | if ((dst & SLJIT_MEM) && FAST_IS_REG(src)) 726 | dst_r = src; 727 | else { 728 | if (sign) { 729 | inst = emit_x86_instruction(compiler, 1, dst_r, 0, src, srcw); 730 | FAIL_IF(!inst); 731 | *inst++ = MOVSXD_r_rm; 732 | } else { 733 | compiler->mode32 = 1; 734 | FAIL_IF(emit_mov(compiler, dst_r, 0, src, srcw)); 735 | compiler->mode32 = 0; 736 | } 737 | } 738 | 739 | if (dst & SLJIT_MEM) { 740 | compiler->mode32 = 1; 741 | inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw); 742 | FAIL_IF(!inst); 743 | *inst = MOV_rm_r; 744 | compiler->mode32 = 0; 745 | } 746 | 747 | return SLJIT_SUCCESS; 748 | } 749 | -------------------------------------------------------------------------------- /sljit_src/sljitUtils.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | /* ------------------------------------------------------------------------ */ 28 | /* Locks */ 29 | /* ------------------------------------------------------------------------ */ 30 | 31 | #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 32 | 33 | #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) 34 | 35 | #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 36 | 37 | static __inline void allocator_grab_lock(void) 38 | { 39 | /* Always successful. */ 40 | } 41 | 42 | static __inline void allocator_release_lock(void) 43 | { 44 | /* Always successful. */ 45 | } 46 | 47 | #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 48 | 49 | #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 50 | 51 | void SLJIT_CALL sljit_grab_lock(void) 52 | { 53 | /* Always successful. */ 54 | } 55 | 56 | void SLJIT_CALL sljit_release_lock(void) 57 | { 58 | /* Always successful. */ 59 | } 60 | 61 | #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 62 | 63 | #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */ 64 | 65 | #include "windows.h" 66 | 67 | #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 68 | 69 | static HANDLE allocator_mutex = 0; 70 | 71 | static __inline void allocator_grab_lock(void) 72 | { 73 | /* No idea what to do if an error occures. Static mutexes should never fail... */ 74 | if (!allocator_mutex) 75 | allocator_mutex = CreateMutex(NULL, TRUE, NULL); 76 | else 77 | WaitForSingleObject(allocator_mutex, INFINITE); 78 | } 79 | 80 | static __inline void allocator_release_lock(void) 81 | { 82 | ReleaseMutex(allocator_mutex); 83 | } 84 | 85 | #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 86 | 87 | #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 88 | 89 | static HANDLE global_mutex = 0; 90 | 91 | void SLJIT_CALL sljit_grab_lock(void) 92 | { 93 | /* No idea what to do if an error occures. Static mutexes should never fail... */ 94 | if (!global_mutex) 95 | global_mutex = CreateMutex(NULL, TRUE, NULL); 96 | else 97 | WaitForSingleObject(global_mutex, INFINITE); 98 | } 99 | 100 | void SLJIT_CALL sljit_release_lock(void) 101 | { 102 | ReleaseMutex(global_mutex); 103 | } 104 | 105 | #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 106 | 107 | #else /* _WIN32 */ 108 | 109 | #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 110 | 111 | #include 112 | 113 | static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER; 114 | 115 | static __inline void allocator_grab_lock(void) 116 | { 117 | pthread_mutex_lock(&allocator_mutex); 118 | } 119 | 120 | static __inline void allocator_release_lock(void) 121 | { 122 | pthread_mutex_unlock(&allocator_mutex); 123 | } 124 | 125 | #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 126 | 127 | #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 128 | 129 | #include 130 | 131 | static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER; 132 | 133 | void SLJIT_CALL sljit_grab_lock(void) 134 | { 135 | pthread_mutex_lock(&global_mutex); 136 | } 137 | 138 | void SLJIT_CALL sljit_release_lock(void) 139 | { 140 | pthread_mutex_unlock(&global_mutex); 141 | } 142 | 143 | #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 144 | 145 | #endif /* _WIN32 */ 146 | 147 | /* ------------------------------------------------------------------------ */ 148 | /* Stack */ 149 | /* ------------------------------------------------------------------------ */ 150 | 151 | #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 152 | 153 | #ifdef _WIN32 154 | #include "windows.h" 155 | #else 156 | /* Provides mmap function. */ 157 | #include 158 | /* For detecting the page size. */ 159 | #include 160 | 161 | #ifndef MAP_ANON 162 | 163 | #include 164 | 165 | /* Some old systems does not have MAP_ANON. */ 166 | static int dev_zero = -1; 167 | 168 | #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) 169 | 170 | static __inline int open_dev_zero(void) 171 | { 172 | dev_zero = open("/dev/zero", O_RDWR); 173 | return dev_zero < 0; 174 | } 175 | 176 | #else /* SLJIT_SINGLE_THREADED */ 177 | 178 | #include 179 | 180 | static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER; 181 | 182 | static __inline int open_dev_zero(void) 183 | { 184 | pthread_mutex_lock(&dev_zero_mutex); 185 | dev_zero = open("/dev/zero", O_RDWR); 186 | pthread_mutex_unlock(&dev_zero_mutex); 187 | return dev_zero < 0; 188 | } 189 | 190 | #endif /* SLJIT_SINGLE_THREADED */ 191 | 192 | #endif 193 | 194 | #endif 195 | 196 | #endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */ 197 | 198 | #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) 199 | 200 | /* Planning to make it even more clever in the future. */ 201 | static long uintptr_tage_align = 0; 202 | 203 | struct sljit_stack* SLJIT_CALL sljit_allocate_stack(unsigned long limit, unsigned long max_limit) 204 | { 205 | struct sljit_stack *stack; 206 | union { 207 | void *ptr; 208 | unsigned long uw; 209 | } base; 210 | #ifdef _WIN32 211 | SYSTEM_INFO si; 212 | #endif 213 | 214 | if (limit > max_limit || limit < 1) 215 | return NULL; 216 | 217 | #ifdef _WIN32 218 | if (!uintptr_tage_align) { 219 | GetSystemInfo(&si); 220 | uintptr_tage_align = si.dwPageSize - 1; 221 | } 222 | #else 223 | if (!uintptr_tage_align) { 224 | uintptr_tage_align = sysconf(_SC_PAGESIZE); 225 | /* Should never happen. */ 226 | if (uintptr_tage_align < 0) 227 | uintptr_tage_align = 4096; 228 | uintptr_tage_align--; 229 | } 230 | #endif 231 | 232 | /* Align limit and max_limit. */ 233 | max_limit = (max_limit + uintptr_tage_align) & ~uintptr_tage_align; 234 | 235 | stack = malloc(sizeof(struct sljit_stack)); 236 | if (!stack) 237 | return NULL; 238 | 239 | #ifdef _WIN32 240 | base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE); 241 | if (!base.ptr) { 242 | free(stack); 243 | return NULL; 244 | } 245 | stack->base = base.uw; 246 | stack->limit = stack->base; 247 | stack->max_limit = stack->base + max_limit; 248 | if (sljit_stack_resize(stack, stack->base + limit)) { 249 | sljit_free_stack(stack); 250 | return NULL; 251 | } 252 | #else 253 | #ifdef MAP_ANON 254 | base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 255 | #else 256 | if (dev_zero < 0) { 257 | if (open_dev_zero()) { 258 | free(stack); 259 | return NULL; 260 | } 261 | } 262 | base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0); 263 | #endif 264 | if (base.ptr == MAP_FAILED) { 265 | free(stack); 266 | return NULL; 267 | } 268 | stack->base = base.uw; 269 | stack->limit = stack->base + limit; 270 | stack->max_limit = stack->base + max_limit; 271 | #endif 272 | stack->top = stack->base; 273 | return stack; 274 | } 275 | 276 | #undef PAGE_ALIGN 277 | 278 | void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack) 279 | { 280 | #ifdef _WIN32 281 | VirtualFree((void*)stack->base, 0, MEM_RELEASE); 282 | #else 283 | munmap((void*)stack->base, stack->max_limit - stack->base); 284 | #endif 285 | free(stack); 286 | } 287 | 288 | long SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, unsigned long new_limit) 289 | { 290 | unsigned long aligned_old_limit; 291 | unsigned long aligned_new_limit; 292 | 293 | if ((new_limit > stack->max_limit) || (new_limit < stack->base)) 294 | return -1; 295 | #ifdef _WIN32 296 | aligned_new_limit = (new_limit + uintptr_tage_align) & ~uintptr_tage_align; 297 | aligned_old_limit = (stack->limit + uintptr_tage_align) & ~uintptr_tage_align; 298 | if (aligned_new_limit != aligned_old_limit) { 299 | if (aligned_new_limit > aligned_old_limit) { 300 | if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE)) 301 | return -1; 302 | } 303 | else { 304 | if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT)) 305 | return -1; 306 | } 307 | } 308 | stack->limit = new_limit; 309 | return 0; 310 | #else 311 | if (new_limit >= stack->limit) { 312 | stack->limit = new_limit; 313 | return 0; 314 | } 315 | aligned_new_limit = (new_limit + uintptr_tage_align) & ~uintptr_tage_align; 316 | aligned_old_limit = (stack->limit + uintptr_tage_align) & ~uintptr_tage_align; 317 | /* If madvise is available, we release the unnecessary space. */ 318 | #if defined(MADV_DONTNEED) 319 | if (aligned_new_limit < aligned_old_limit) 320 | madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MADV_DONTNEED); 321 | #elif defined(POSIX_MADV_DONTNEED) 322 | if (aligned_new_limit < aligned_old_limit) 323 | posix_madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, POSIX_MADV_DONTNEED); 324 | #endif 325 | stack->limit = new_limit; 326 | return 0; 327 | #endif 328 | } 329 | 330 | #endif /* SLJIT_UTIL_STACK */ 331 | 332 | #endif 333 | -------------------------------------------------------------------------------- /test_src/sljitMain.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Stack-less Just-In-Time compiler 3 | * 4 | * Copyright 2009-2010 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without modification, are 7 | * permitted provided that the following conditions are met: 8 | * 9 | * 1. Redistributions of source code must retain the above copyright notice, this list of 10 | * conditions and the following disclaimer. 11 | * 12 | * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 | * of conditions and the following disclaimer in the documentation and/or other materials 14 | * provided with the distribution. 15 | * 16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 | * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | #include "sljitLir.h" 33 | 34 | void sljit_test(int argc, char* argv[]); 35 | 36 | union executable_code { 37 | void* code; 38 | long (SLJIT_CALL *func)(long* a); 39 | }; 40 | typedef union executable_code executable_code; 41 | 42 | void devel(void) 43 | { 44 | executable_code code; 45 | struct sljit_compiler *compiler; 46 | long buf[4] = {5, 12, 0, 0}; 47 | 48 | if ((compiler = sljit_create_compiler()) == NULL) 49 | errx(-1, "out of memory"); 50 | 51 | #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) 52 | sljit_compiler_verbose(compiler, stdout); 53 | #endif 54 | sljit_emit_enter(compiler, 0, 1, 4, 5, 4, 0, 2 * sizeof(long)); 55 | 56 | sljit_emit_return(compiler, SLJIT_MOV, SLJIT_RETURN_REG, 0); 57 | 58 | code.code = sljit_generate_code(compiler); 59 | sljit_free_compiler(compiler); 60 | 61 | printf("Code at: %p\n", (void*)SLJIT_FUNC_OFFSET(code.code)); 62 | 63 | printf("Function returned with %ld\n", (long)code.func((long*)buf)); 64 | printf("buf[0] = %ld\n", (long)buf[0]); 65 | printf("buf[1] = %ld\n", (long)buf[1]); 66 | printf("buf[2] = %ld\n", (long)buf[2]); 67 | printf("buf[3] = %ld\n", (long)buf[3]); 68 | sljit_free_code(code.code); 69 | } 70 | 71 | int main(int argc, char* argv[]) 72 | { 73 | /* devel(); */ 74 | sljit_test(argc, argv); 75 | 76 | return 0; 77 | } 78 | --------------------------------------------------------------------------------