├── LICENSE
├── README.md
├── imemslab.c
├── imemslab.h
├── testbench.c
└── testmain.c
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
282 | How to Apply These Terms to Your New Programs
283 |
284 | If you develop a new program, and you want it to be of the greatest
285 | possible use to the public, the best way to achieve this is to make it
286 | free software which everyone can redistribute and change under these terms.
287 |
288 | To do so, attach the following notices to the program. It is safest
289 | to attach them to the start of each source file to most effectively
290 | convey the exclusion of warranty; and each file should have at least
291 | the "copyright" line and a pointer to where the full notice is found.
292 |
293 | {description}
294 | Copyright (C) {year} {fullname}
295 |
296 | This program is free software; you can redistribute it and/or modify
297 | it under the terms of the GNU General Public License as published by
298 | the Free Software Foundation; either version 2 of the License, or
299 | (at your option) any later version.
300 |
301 | This program is distributed in the hope that it will be useful,
302 | but WITHOUT ANY WARRANTY; without even the implied warranty of
303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
304 | GNU General Public License for more details.
305 |
306 | You should have received a copy of the GNU General Public License along
307 | with this program; if not, write to the Free Software Foundation, Inc.,
308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
309 |
310 | Also add information on how to contact you by electronic and paper mail.
311 |
312 | If the program is interactive, make it output a short notice like this
313 | when it starts in an interactive mode:
314 |
315 | Gnomovision version 69, Copyright (C) year name of author
316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
317 | This is free software, and you are welcome to redistribute it
318 | under certain conditions; type `show c' for details.
319 |
320 | The hypothetical commands `show w' and `show c' should show the appropriate
321 | parts of the General Public License. Of course, the commands you use may
322 | be called something other than `show w' and `show c'; they could even be
323 | mouse-clicks or menu items--whatever suits your program.
324 |
325 | You should also get your employer (if you work as a programmer) or your
326 | school, if any, to sign a "copyright disclaimer" for the program, if
327 | necessary. Here is a sample; alter the names:
328 |
329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program
330 | `Gnomovision' (which makes passes at compilers) written by James Hacker.
331 |
332 | {signature of Ty Coon}, 1 April 1989
333 | Ty Coon, President of Vice
334 |
335 | This General Public License does not permit incorporating your program into
336 | proprietary programs. If your program is a subroutine library, you may
337 | consider it more useful to permit linking proprietary applications with the
338 | library. If this is what you want to do, use the GNU Lesser General
339 | Public License instead of this License.
340 |
341 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | memslab
2 | =======
3 |
4 | Slab Memory Allocator in Application Layer
5 | This is an enhanced SLAB algorithm implementation in application layer,
6 | which provides O(1) memory allocating and efficient memory recycling.
7 |
8 | Since SUNOS has presented slab allocation theroy, many OSs have implemented
9 | slab in their kernel. But it requires kernel-layer interfaces such as page
10 | supply etc. So this library improves slab's algorithm and brings the
11 | interfaces of slab into application layer:
12 |
13 | - application layer slab allocator implementation
14 | - O(1) allocating / free: almost speed up 500% - 1200% vs malloc
15 | - re-implementation of page supplier: with new "SLAB-Tree" algorithm
16 | - memory recycle: automatic give memory back to os to avoid wasting
17 | - 30% - 50% memory wasting
18 | - platform independence
19 |
20 |
21 | Example
22 | =======
23 | ```cpp
24 | #include "imembase.h"
25 | #include
26 |
27 | int main(void)
28 | {
29 | char *ptr;
30 |
31 | /* init kmem interface */
32 | ikmem_init(0, 0, 0);
33 |
34 | ptr = (char*)ikmem_malloc(8);
35 | assert(ptr);
36 |
37 | printf("sizeof(ptr)=%d\n", ikmem_ptr_size(ptr));
38 |
39 | ptr = ikmem_realloc(ptr, 40);
40 | assert(ptr);
41 |
42 | printf("sizeof(ptr)=%d\n", ikmem_ptr_size(ptr));
43 |
44 | ikmem_free(ptr);
45 |
46 | /* clean environment */
47 | ikmem_destroy();
48 |
49 | return 0;
50 | }
51 | ```
52 |
53 |
--------------------------------------------------------------------------------
/imemslab.c:
--------------------------------------------------------------------------------
1 | /**********************************************************************
2 | *
3 | * imembase.c - basic interface of memory operation
4 | * skywind3000 (at) gmail.com, 2006-2016
5 | *
6 | * - application layer slab allocator implementation
7 | * - unit interval time cost: almost speed up 500% - 1200% vs malloc
8 | * - optional page supplier: with the "GFP-Tree" algorithm
9 | * - memory recycle: automatic give memory back to os to avoid wasting
10 | * - platform independence
11 | *
12 | * for the basic information about slab algorithm, please see:
13 | * The Slab Allocator: An Object Caching Kernel
14 | * Memory Allocator (Jeff Bonwick, Sun Microsystems, 1994)
15 | * with the URL below:
16 | * http://citeseer.ist.psu.edu/bonwick94slab.html
17 | *
18 | **********************************************************************/
19 |
20 | #include "imemslab.h"
21 |
22 | #include
23 | #include
24 | #include
25 | #include
26 |
27 |
28 | #if (defined(__BORLANDC__) || defined(__WATCOMC__))
29 | #if defined(_WIN32) || defined(WIN32)
30 | #pragma warn -8002
31 | #pragma warn -8004
32 | #pragma warn -8008
33 | #pragma warn -8012
34 | #pragma warn -8027
35 | #pragma warn -8057
36 | #pragma warn -8066
37 | #endif
38 | #endif
39 |
40 |
41 | /*====================================================================*/
42 | /* IALLOCATOR */
43 | /*====================================================================*/
44 | void *(*__ihook_malloc)(size_t size) = NULL;
45 | void (*__ihook_free)(void *) = NULL;
46 |
47 |
48 | void* internal_malloc(struct IALLOCATOR *allocator, size_t size)
49 | {
50 | if (allocator != NULL) {
51 | return allocator->alloc(allocator, size);
52 | }
53 | if (__ihook_malloc != NULL) {
54 | return __ihook_malloc(size);
55 | }
56 | return malloc(size);
57 | }
58 |
59 | void internal_free(struct IALLOCATOR *allocator, void *ptr)
60 | {
61 | if (allocator != NULL) {
62 | allocator->free(allocator, ptr);
63 | return;
64 | }
65 | if (__ihook_free != NULL) {
66 | __ihook_free(ptr);
67 | return;
68 | }
69 | free(ptr);
70 | }
71 |
72 |
73 |
74 | /*====================================================================*/
75 | /* IVECTOR */
76 | /*====================================================================*/
77 | void iv_init(struct IVECTOR *v, struct IALLOCATOR *allocator)
78 | {
79 | if (v == 0) return;
80 | v->data = 0;
81 | v->size = 0;
82 | v->block = 0;
83 | v->allocator = allocator;
84 | }
85 |
86 | void iv_destroy(struct IVECTOR *v)
87 | {
88 | if (v == NULL) return;
89 | if (v->data) {
90 | internal_free(v->allocator, v->data);
91 | }
92 | v->data = NULL;
93 | v->size = 0;
94 | v->block = 0;
95 | }
96 |
97 | int iv_resize(struct IVECTOR *v, size_t newsize)
98 | {
99 | unsigned char*lptr;
100 | size_t block, min;
101 | size_t nblock;
102 |
103 | if (v == NULL) return -1;
104 | if (newsize >= v->size && newsize <= v->block) {
105 | v->size = newsize;
106 | return 0;
107 | }
108 |
109 | if (newsize == 0) {
110 | if (v->block > 0) {
111 | internal_free(v->allocator, v->data);
112 | v->block = 0;
113 | v->size = 0;
114 | v->data = NULL;
115 | }
116 | return 0;
117 | }
118 |
119 | for (nblock = sizeof(char*); nblock < newsize; ) nblock <<= 1;
120 | block = nblock;
121 |
122 | if (block == v->block) {
123 | v->size = newsize;
124 | return 0;
125 | }
126 |
127 | if (v->block == 0 || v->data == NULL) {
128 | v->data = (unsigned char*)internal_malloc(v->allocator, block);
129 | if (v->data == NULL) return -1;
130 | v->size = newsize;
131 | v->block = block;
132 | } else {
133 | lptr = (unsigned char*)internal_malloc(v->allocator, block);
134 | if (lptr == NULL) return -1;
135 |
136 | min = (v->size <= newsize)? v->size : newsize;
137 | memcpy(lptr, v->data, (size_t)min);
138 | internal_free(v->allocator, v->data);
139 |
140 | v->data = lptr;
141 | v->size = newsize;
142 | v->block = block;
143 | }
144 |
145 | return 0;
146 | }
147 |
148 | int iv_push(struct IVECTOR *v, const void *data, size_t size)
149 | {
150 | size_t current = v->size;
151 | if (iv_resize(v, current + size) != 0)
152 | return -1;
153 | if (data != NULL)
154 | memcpy(v->data + current, data, size);
155 | return 0;
156 | }
157 |
158 | size_t iv_pop(struct IVECTOR *v, void *data, size_t size)
159 | {
160 | size_t current = v->size;
161 | if (size >= current) size = current;
162 | if (data != NULL)
163 | memcpy(data, v->data + current - size, size);
164 | iv_resize(v, current - size);
165 | return size;
166 | }
167 |
168 | int iv_insert(struct IVECTOR *v, size_t pos, const void *data, size_t size)
169 | {
170 | size_t current = v->size;
171 | if (iv_resize(v, current + size) != 0)
172 | return -1;
173 | memmove(v->data + pos + size, v->data + pos, size);
174 | if (data != NULL)
175 | memcpy(v->data + pos, data, size);
176 | return 0;
177 | }
178 |
179 | int iv_erase(struct IVECTOR *v, size_t pos, size_t size)
180 | {
181 | size_t current = v->size;
182 | if (pos >= current) return 0;
183 | if (pos + size >= current) size = current - pos;
184 | if (size == 0) return 0;
185 | memmove(v->data + pos, v->data + pos + size, current - pos - size);
186 | if (iv_resize(v, current - size) != 0)
187 | return -1;
188 | return 0;
189 | }
190 |
191 |
192 | /*====================================================================*/
193 | /* IMEMNODE */
194 | /*====================================================================*/
195 | void imnode_init(struct IMEMNODE *mn, ilong nodesize, struct IALLOCATOR *ac)
196 | {
197 | struct IMEMNODE *mnode = mn;
198 | size_t newsize, shift;
199 |
200 | assert(mnode != NULL);
201 | mnode->allocator = ac;
202 |
203 | iv_init(&mnode->vprev, ac);
204 | iv_init(&mnode->vnext, ac);
205 | iv_init(&mnode->vnode, ac);
206 | iv_init(&mnode->vdata, ac);
207 | iv_init(&mnode->vmem, ac);
208 | iv_init(&mnode->vmode, ac);
209 |
210 | for (shift = 1; (((size_t)1) << shift) < (size_t)nodesize; ) shift++;
211 |
212 | newsize = (nodesize < (ilong)IMROUNDSIZE)? (ilong)IMROUNDSIZE : nodesize;
213 | newsize = IMROUNDUP(newsize);
214 |
215 | mnode->node_size = newsize;
216 | mnode->node_shift = (ilong)shift;
217 | mnode->node_free = 0;
218 | mnode->node_used = 0;
219 | mnode->node_max = 0;
220 | mnode->mem_max = 0;
221 | mnode->mem_count = 0;
222 | mnode->list_open = -1;
223 | mnode->list_close = -1;
224 | mnode->total_mem = 0;
225 | mnode->grow_limit = 0;
226 | mnode->extra = NULL;
227 | }
228 |
229 | void imnode_destroy(struct IMEMNODE *mnode)
230 | {
231 | ilong i;
232 |
233 | assert(mnode != NULL);
234 | if (mnode->mem_count > 0) {
235 | for (i = 0; i < mnode->mem_count && mnode->mmem; i++) {
236 | if (mnode->mmem[i]) {
237 | internal_free(mnode->allocator, mnode->mmem[i]);
238 | }
239 | mnode->mmem[i] = NULL;
240 | }
241 | mnode->mem_count = 0;
242 | mnode->mem_max = 0;
243 | iv_destroy(&mnode->vmem);
244 | mnode->mmem = NULL;
245 | }
246 |
247 | iv_destroy(&mnode->vprev);
248 | iv_destroy(&mnode->vnext);
249 | iv_destroy(&mnode->vnode);
250 | iv_destroy(&mnode->vdata);
251 | iv_destroy(&mnode->vmode);
252 |
253 | mnode->mprev = NULL;
254 | mnode->mnext = NULL;
255 | mnode->mnode = NULL;
256 | mnode->mdata = NULL;
257 | mnode->mmode = NULL;
258 |
259 | mnode->node_free = 0;
260 | mnode->node_used = 0;
261 | mnode->node_max = 0;
262 | mnode->list_open = -1;
263 | mnode->list_close= -1;
264 | mnode->total_mem = 0;
265 | }
266 |
267 | static int imnode_node_resize(struct IMEMNODE *mnode, ilong size)
268 | {
269 | size_t size1, size2;
270 |
271 | size1 = (size_t)(size * (ilong)sizeof(ilong));
272 | size2 = (size_t)(size * (ilong)sizeof(void*));
273 |
274 | if (iv_resize(&mnode->vprev, size1)) return -1;
275 | if (iv_resize(&mnode->vnext, size1)) return -2;
276 | if (iv_resize(&mnode->vnode, size1)) return -3;
277 | if (iv_resize(&mnode->vdata, size2)) return -5;
278 | if (iv_resize(&mnode->vmode, size1)) return -6;
279 |
280 | mnode->mprev = (ilong*)((void*)mnode->vprev.data);
281 | mnode->mnext = (ilong*)((void*)mnode->vnext.data);
282 | mnode->mnode = (ilong*)((void*)mnode->vnode.data);
283 | mnode->mdata =(void**)((void*)mnode->vdata.data);
284 | mnode->mmode = (ilong*)((void*)mnode->vmode.data);
285 | mnode->node_max = size;
286 |
287 | return 0;
288 | }
289 |
290 | static int imnode_mem_add(struct IMEMNODE*mnode, ilong node_count, void**mem)
291 | {
292 | size_t newsize;
293 | char *mptr;
294 |
295 | if (mnode->mem_count >= mnode->mem_max) {
296 | newsize = (mnode->mem_max <= 0)? IMROUNDSIZE : mnode->mem_max * 2;
297 | if (iv_resize(&mnode->vmem, newsize * sizeof(void*)))
298 | return -1;
299 | mnode->mem_max = newsize;
300 | mnode->mmem = (char**)((void*)mnode->vmem.data);
301 | }
302 | newsize = node_count * mnode->node_size + IMROUNDSIZE;
303 | mptr = (char*)internal_malloc(mnode->allocator, newsize);
304 | if (mptr == NULL) return -2;
305 |
306 | mnode->mmem[mnode->mem_count++] = mptr;
307 | mnode->total_mem += newsize;
308 | mptr = (char*)IMROUNDUP(((size_t)mptr));
309 |
310 | if (mem) *mem = mptr;
311 |
312 | return 0;
313 | }
314 |
315 |
316 | static long imnode_grow(struct IMEMNODE *mnode)
317 | {
318 | ilong size_start = mnode->node_max;
319 | ilong size_endup;
320 | ilong retval, count, i, j;
321 | void *mptr;
322 | char *p;
323 |
324 | count = (mnode->node_max <= 0)? IMROUNDSIZE : mnode->node_max;
325 | if (mnode->grow_limit > 0) {
326 | if (count > mnode->grow_limit) count = mnode->grow_limit;
327 | }
328 | size_endup = size_start + count;
329 |
330 | retval = imnode_node_resize(mnode, size_endup);
331 | if (retval) return -10 + (long)retval;
332 |
333 | retval = imnode_mem_add(mnode, count, &mptr);
334 |
335 | if (retval) {
336 | imnode_node_resize(mnode, size_start);
337 | mnode->node_max = size_start;
338 | return -20 + (long)retval;
339 | }
340 |
341 | p = (char*)mptr;
342 | for (i = mnode->node_max - 1, j = 0; j < count; i--, j++) {
343 | IMNODE_NODE(mnode, i) = 0;
344 | IMNODE_MODE(mnode, i) = 0;
345 | IMNODE_DATA(mnode, i) = p;
346 | IMNODE_PREV(mnode, i) = -1;
347 | IMNODE_NEXT(mnode, i) = mnode->list_open;
348 | if (mnode->list_open >= 0) IMNODE_PREV(mnode, mnode->list_open) = i;
349 | mnode->list_open = i;
350 | mnode->node_free++;
351 | p += mnode->node_size;
352 | }
353 |
354 | return 0;
355 | }
356 |
357 |
358 | ilong imnode_new(struct IMEMNODE *mnode)
359 | {
360 | ilong node, next;
361 |
362 | assert(mnode);
363 | if (mnode->list_open < 0) {
364 | if (imnode_grow(mnode)) return -2;
365 | }
366 |
367 | if (mnode->list_open < 0 || mnode->node_free <= 0) return -3;
368 |
369 | node = mnode->list_open;
370 | next = IMNODE_NEXT(mnode, node);
371 | if (next >= 0) IMNODE_PREV(mnode, next) = -1;
372 | mnode->list_open = next;
373 |
374 | IMNODE_PREV(mnode, node) = -1;
375 | IMNODE_NEXT(mnode, node) = mnode->list_close;
376 |
377 | if (mnode->list_close >= 0) IMNODE_PREV(mnode, mnode->list_close) = node;
378 | mnode->list_close = node;
379 | IMNODE_MODE(mnode, node) = 1;
380 |
381 | mnode->node_free--;
382 | mnode->node_used++;
383 |
384 | return node;
385 | }
386 |
387 | void imnode_del(struct IMEMNODE *mnode, ilong index)
388 | {
389 | ilong prev, next;
390 |
391 | assert(mnode);
392 | assert((index >= 0) && (index < mnode->node_max));
393 | assert(IMNODE_MODE(mnode, index));
394 |
395 | next = IMNODE_NEXT(mnode, index);
396 | prev = IMNODE_PREV(mnode, index);
397 |
398 | if (next >= 0) IMNODE_PREV(mnode, next) = prev;
399 | if (prev >= 0) IMNODE_NEXT(mnode, prev) = next;
400 | else mnode->list_close = next;
401 |
402 | IMNODE_PREV(mnode, index) = -1;
403 | IMNODE_NEXT(mnode, index) = mnode->list_open;
404 |
405 | if (mnode->list_open >= 0) IMNODE_PREV(mnode, mnode->list_open) = index;
406 | mnode->list_open = index;
407 |
408 | IMNODE_MODE(mnode, index) = 0;
409 | mnode->node_free++;
410 | mnode->node_used--;
411 | }
412 |
413 | ilong imnode_head(const struct IMEMNODE *mnode)
414 | {
415 | return (mnode)? mnode->list_close : -1;
416 | }
417 |
418 | ilong imnode_next(const struct IMEMNODE *mnode, ilong index)
419 | {
420 | return (mnode)? IMNODE_NEXT(mnode, index) : -1;
421 | }
422 |
423 | ilong imnode_prev(const struct IMEMNODE *mnode, ilong index)
424 | {
425 | return (mnode)? IMNODE_PREV(mnode, index) : -1;
426 | }
427 |
428 | void *imnode_data(struct IMEMNODE *mnode, ilong index)
429 | {
430 | return (char*)IMNODE_DATA(mnode, index);
431 | }
432 |
433 | const void* imnode_data_const(const struct IMEMNODE *mnode, ilong index)
434 | {
435 | return (const char*)IMNODE_DATA(mnode, index);
436 | }
437 |
438 |
439 |
440 | /*====================================================================*/
441 | /* IMEMSLAB */
442 | /*====================================================================*/
443 | #define IMEM_NEXT_PTR(p) (((void**)(p))[0])
444 |
445 | /* init slab structure with given memory block and coloroff */
446 | static ilong imslab_init(imemslab_t *slab, void *membase,
447 | size_t memsize, ilong obj_size, size_t coloroff)
448 | {
449 | char *start = ((char*)membase) + coloroff;
450 | char *endup = ((char*)membase) + memsize - obj_size;
451 | ilong retval = 0;
452 | char *tail;
453 |
454 | assert(slab && membase);
455 | assert((size_t)obj_size >= sizeof(void*));
456 |
457 | ilist_init(&slab->queue);
458 | slab->membase = membase;
459 | slab->memsize = memsize;
460 | slab->coloroff = coloroff;
461 | slab->inuse = 0;
462 | slab->extra = NULL;
463 | slab->bufctl = NULL;
464 |
465 | for (tail = NULL; start <= endup; start += obj_size) {
466 | IMEM_NEXT_PTR(start) = NULL;
467 | if (tail == NULL) slab->bufctl = start;
468 | else IMEM_NEXT_PTR(tail) = start;
469 | tail = start;
470 | retval++;
471 | }
472 |
473 | return retval;
474 | }
475 |
476 |
477 | /* alloc data from slab */
478 | static void *imslab_alloc(imemslab_t *slab)
479 | {
480 | void *ptr;
481 |
482 | if (slab->bufctl == 0) return 0;
483 | ptr = slab->bufctl;
484 | slab->bufctl = IMEM_NEXT_PTR(slab->bufctl);
485 | slab->inuse++;
486 |
487 | return ptr;
488 | }
489 |
490 |
491 | /* free data into slab */
492 | static void imslab_free(imemslab_t *slab, void *ptr)
493 | {
494 | char *start = ((char*)slab->membase) + slab->coloroff;
495 | char *endup = ((char*)slab->membase) + slab->memsize;
496 | char *p = (char*)ptr;
497 |
498 | assert(slab->inuse > 0);
499 | assert(p >= start && p < endup);
500 |
501 | if (p >= start && p < endup) {
502 | IMEM_NEXT_PTR(p) = slab->bufctl;
503 | slab->bufctl = p;
504 | }
505 |
506 | slab->inuse--;
507 | }
508 |
509 |
510 |
511 | /*====================================================================*/
512 | /* IMUTEX - mutex interfaces */
513 | /*====================================================================*/
514 | int imutex_disable = 0;
515 |
516 | void imutex_init(imutex_t *mutex)
517 | {
518 | #ifdef IMUTEX_INIT
519 | IMUTEX_INIT(mutex);
520 | #endif
521 | }
522 |
523 | void imutex_destroy(imutex_t *mutex)
524 | {
525 | #ifdef IMUTEX_DESTROY
526 | IMUTEX_DESTROY(mutex);
527 | #endif
528 | }
529 |
530 | void imutex_lock(imutex_t *mutex)
531 | {
532 | #ifdef IMUTEX_LOCK
533 | if (imutex_disable == 0)
534 | IMUTEX_LOCK(mutex);
535 | #endif
536 | }
537 |
538 | void imutex_unlock(imutex_t *mutex)
539 | {
540 | #ifdef IMUTEX_UNLOCK
541 | if (imutex_disable == 0)
542 | IMUTEX_UNLOCK(mutex);
543 | #endif
544 | }
545 |
546 |
547 | /*====================================================================*/
548 | /* IMEMGFP (mem_get_free_pages) - a page-supplyer class */
549 | /*====================================================================*/
550 | static struct IMEMNODE imem_page_cache;
551 | static imemgfp_t imem_gfp_default;
552 | static imutex_t imem_gfp_lock;
553 | static size_t imem_page_size;
554 | static size_t imem_page_shift;
555 | static int imem_gfp_malloc = 0;
556 | static int imem_gfp_inited = 0;
557 |
558 |
559 | static void* imem_gfp_alloc(imemgfp_t *gfp)
560 | {
561 | ilong index;
562 | char *lptr;
563 |
564 | if (gfp != NULL && gfp != &imem_gfp_default)
565 | return gfp->alloc_page(gfp);
566 |
567 | if (imem_gfp_malloc) {
568 | lptr = (char*)internal_malloc(0, imem_page_size);
569 | if (lptr == NULL) {
570 | return NULL;
571 | }
572 | } else {
573 | assert(imem_gfp_inited);
574 |
575 | imutex_lock(&imem_gfp_lock);
576 | index = imnode_new(&imem_page_cache);
577 | assert(index >= 0);
578 |
579 | if (index < 0) {
580 | imutex_unlock(&imem_gfp_lock);
581 | return NULL;
582 | }
583 |
584 | lptr = (char*)IMNODE_DATA(&imem_page_cache, index);
585 | imutex_unlock(&imem_gfp_lock);
586 |
587 | *(ilong*)lptr = index;
588 | lptr += IMROUNDUP(sizeof(ilong));
589 | }
590 |
591 | imem_gfp_default.pages_new++;
592 | imem_gfp_default.pages_inuse++;
593 |
594 | return lptr;
595 | }
596 |
597 | static void imem_gfp_free(imemgfp_t *gfp, void *ptr)
598 | {
599 | ilong index;
600 | char *lptr;
601 | int invalidptr;
602 |
603 | if (gfp != NULL && gfp != &imem_gfp_default) {
604 | gfp->free_page(gfp, ptr);
605 | return;
606 | }
607 |
608 | if (imem_gfp_malloc) {
609 | internal_free(0, ptr);
610 |
611 | } else {
612 | lptr = (char*)ptr - IMROUNDUP(sizeof(ilong));
613 | index = *(ilong*)lptr;
614 |
615 | invalidptr = (index < 0 || index >= imem_page_cache.node_max);
616 | assert( !invalidptr );
617 |
618 | if (invalidptr) return;
619 |
620 | imutex_lock(&imem_gfp_lock);
621 |
622 | if ((char*)IMNODE_DATA(&imem_page_cache, index) != lptr ||
623 | IMNODE_MODE(&imem_page_cache, index) == 0)
624 | invalidptr = 1;
625 |
626 | assert( !invalidptr );
627 | if (invalidptr) {
628 | imutex_unlock(&imem_gfp_lock);
629 | return;
630 | }
631 |
632 | imnode_del(&imem_page_cache, index);
633 | imutex_unlock(&imem_gfp_lock);
634 | }
635 |
636 | imem_gfp_default.pages_del++;
637 | imem_gfp_default.pages_inuse--;
638 | }
639 |
640 | static void imem_gfp_init(int page_shift, int use_malloc)
641 | {
642 | size_t require_size;
643 |
644 | if (imem_gfp_inited != 0)
645 | return;
646 |
647 | if (page_shift <= 0)
648 | page_shift = IDEFAULT_PAGE_SHIFT;
649 |
650 | imem_page_shift = (size_t)page_shift;
651 | imem_page_size = (((size_t)1) << page_shift) +
652 | IMROUNDUP(sizeof(void*)) * 16;
653 |
654 | require_size = imem_page_size + IMROUNDUP(sizeof(void*));
655 | imnode_init(&imem_page_cache, require_size, 0);
656 |
657 | imem_page_cache.grow_limit = 8;
658 |
659 | imutex_init(&imem_gfp_lock);
660 |
661 | imem_gfp_default.page_size = imem_page_size;
662 | imem_gfp_default.alloc_page = imem_gfp_alloc;
663 | imem_gfp_default.free_page = imem_gfp_free;
664 | imem_gfp_default.refcnt = 0;
665 | imem_gfp_default.extra = NULL;
666 | imem_gfp_default.pages_new = 0;
667 | imem_gfp_default.pages_del = 0;
668 | imem_gfp_default.pages_inuse = 0;
669 |
670 | imem_gfp_malloc = use_malloc;
671 |
672 | imem_gfp_inited = 1;
673 | }
674 |
675 | static void imem_gfp_destroy(void)
676 | {
677 | if (imem_gfp_inited == 0)
678 | return;
679 |
680 | imutex_lock(&imem_gfp_lock);
681 | imnode_destroy(&imem_page_cache);
682 | imutex_unlock(&imem_gfp_lock);
683 |
684 | imutex_destroy(&imem_gfp_lock);
685 | imem_gfp_inited = 0;
686 | }
687 |
688 |
689 |
690 | /*====================================================================*/
691 | /* SLAB SET */
692 | /*====================================================================*/
693 | static struct IMEMNODE imslab_cache;
694 | static imutex_t imslab_lock;
695 | static int imslab_inited = 0;
696 |
697 | static void imslab_set_init(void)
698 | {
699 | size_t size;
700 | if (imslab_inited != 0)
701 | return;
702 | imutex_init(&imslab_lock);
703 | size = sizeof(imemslab_t) + IMROUNDUP(sizeof(void*));
704 | imnode_init(&imslab_cache, size, 0);
705 | imslab_inited = 1;
706 | }
707 |
708 | static void imslab_set_destroy(void)
709 | {
710 | if (imslab_inited == 0)
711 | return;
712 |
713 | imutex_lock(&imslab_lock);
714 | imnode_destroy(&imslab_cache);
715 | imutex_unlock(&imslab_lock);
716 |
717 | imutex_destroy(&imslab_lock);
718 | imslab_inited = 0;
719 | }
720 |
721 | static imemslab_t *imslab_set_new(void)
722 | {
723 | ilong index;
724 | char *lptr;
725 |
726 | assert(imslab_inited != 0);
727 |
728 | imutex_lock(&imslab_lock);
729 | index = imnode_new(&imslab_cache);
730 |
731 | assert(index >= 0);
732 | if (index < 0) {
733 | imutex_unlock(&imslab_lock);
734 | return NULL;
735 | }
736 |
737 | lptr = (char*)IMNODE_DATA(&imslab_cache, index);
738 | imutex_unlock(&imslab_lock);
739 |
740 | *(ilong*)lptr = index;
741 | lptr += IMROUNDUP(sizeof(ilong));
742 |
743 | return (imemslab_t*)lptr;
744 | }
745 |
746 | static void imslab_set_delete(imemslab_t *slab)
747 | {
748 | char *lptr = (char*)slab;
749 | ilong index;
750 | int invalidptr;
751 |
752 | lptr -= IMROUNDUP(sizeof(ilong));
753 | index = *(ilong*)lptr;
754 |
755 | invalidptr = (index < 0 || index >= imem_page_cache.node_max);
756 | assert( !invalidptr );
757 |
758 | if (invalidptr) return;
759 |
760 | imutex_lock(&imslab_lock);
761 |
762 | if ((char*)IMNODE_DATA(&imslab_cache, index) != lptr ||
763 | IMNODE_MODE(&imslab_cache, index) == 0)
764 | invalidptr = 1;
765 |
766 | assert( !invalidptr );
767 | if (invalidptr) {
768 | imutex_unlock(&imslab_lock);
769 | return;
770 | }
771 |
772 | imnode_del(&imslab_cache, index);
773 | imutex_unlock(&imslab_lock);
774 | }
775 |
776 |
777 | /*====================================================================*/
778 | /* IMEMCACHE */
779 | /*====================================================================*/
780 | #define IMCACHE_FLAG_OFFSLAB 1
781 | #define IMCACHE_FLAG_NODRAIN 2
782 | #define IMCACHE_FLAG_NOLOCK 4
783 | #define IMCACHE_FLAG_SYSTEM 8
784 | #define IMCACHE_FLAG_ONQUEUE 16
785 |
786 | #define IMCACHE_OFFSLAB(mlist) ((mlist)->flags & IMCACHE_FLAG_OFFSLAB)
787 | #define IMCACHE_NODRAIN(mlist) ((mlist)->flags & IMCACHE_FLAG_NODRAIN)
788 | #define IMCACHE_NOLOCK(mlist) ((mlist)->flags & IMCACHE_FLAG_NOLOCK)
789 | #define IMCACHE_SYSTEM(mlist) ((mlist)->flags & IMCACHE_FLAG_SYSTEM)
790 | #define IMCACHE_ONQUEUE(mlist) ((mlist)->flags & IMCACHE_FLAG_ONQUEUE)
791 |
792 |
793 | static void imemcache_calculate(imemcache_t *cache)
794 | {
795 | size_t obj_num;
796 | size_t unit_size;
797 | size_t size;
798 | size_t color;
799 | int mustonslab = 0;
800 |
801 | unit_size = cache->unit_size;
802 |
803 | if (unit_size > IMROUNDUP(sizeof(imemslab_t))) {
804 | obj_num = cache->page_size / unit_size;
805 | size = cache->page_size - obj_num * unit_size;
806 | if (size >= IMROUNDUP(sizeof(imemslab_t) + IMROUNDSIZE))
807 | mustonslab = 1;
808 | }
809 |
810 | if (unit_size >= (cache->page_size >> 3) && mustonslab == 0)
811 | cache->flags |= IMCACHE_FLAG_OFFSLAB;
812 |
813 | if (unit_size >= 1024) cache->limit = 32;
814 | else if (unit_size >= 256) cache->limit = 48;
815 | else if (unit_size >= 128) cache->limit = 64;
816 | else if (unit_size >= 64) cache->limit = 96;
817 | else cache->limit = 128;
818 |
819 | cache->batchcount = (cache->limit + 1) >> 1;
820 |
821 | if (IMCACHE_OFFSLAB(cache)) {
822 | obj_num = cache->page_size / unit_size;
823 | color = cache->page_size - obj_num * unit_size;
824 | } else {
825 | obj_num = (cache->page_size - IMROUNDUP(sizeof(imemslab_t)) -
826 | IMROUNDSIZE) / unit_size;
827 | color = (cache->page_size - obj_num * unit_size) -
828 | IMROUNDUP(sizeof(imemslab_t)) - IMROUNDSIZE;
829 | }
830 | color = color > unit_size ? unit_size : color;
831 | cache->num = obj_num;
832 | cache->color_limit = color;
833 | }
834 |
835 | static void imemcache_init_list(imemcache_t *cache, imemgfp_t *gfp,
836 | size_t obj_size)
837 | {
838 | size_t limit;
839 | int i;
840 |
841 | assert(imslab_inited && imem_gfp_inited && cache);
842 | assert(obj_size >= sizeof(void*));
843 |
844 | cache->gfp = gfp;
845 |
846 | if (gfp == NULL) gfp = &imem_gfp_default;
847 | ilist_init(&cache->slabs_free);
848 | ilist_init(&cache->slabs_partial);
849 | ilist_init(&cache->slabs_full);
850 |
851 | cache->page_size = gfp->page_size;
852 | cache->obj_size = obj_size;
853 | cache->unit_size = IMROUNDUP(cache->obj_size + sizeof(void*));
854 | cache->flags = 0;
855 | cache->num = 0;
856 | cache->count_free = 0;
857 | cache->count_partial = 0;
858 | cache->count_full = 0;
859 | cache->free_objects = 0;
860 | cache->free_limit = 0;
861 | cache->color_limit = 0;
862 | cache->color_next = 0;
863 | cache->extra = NULL;
864 |
865 | imutex_init(&cache->list_lock);
866 |
867 | imemcache_calculate(cache);
868 |
869 | limit = cache->batchcount + cache->num;
870 | cache->free_limit = limit;
871 | limit = (limit >= IMCACHE_ARRAYLIMIT)? IMCACHE_ARRAYLIMIT : limit;
872 |
873 | for (i = 0; i < (int)IMCACHE_LRU_COUNT; i++) {
874 | cache->array[i].avial = 0;
875 | cache->array[i].batchcount = (int)(limit >> 1);
876 | cache->array[i].limit = (int)limit;
877 | imutex_init(&cache->array[i].lock);
878 | }
879 |
880 | ilist_init(&cache->queue);
881 |
882 | cache->pages_new = 0;
883 | cache->pages_del = 0;
884 | cache->pages_hiwater = 0;
885 | cache->pages_inuse = 0;
886 | }
887 |
888 | static imemslab_t *imemcache_slab_create(imemcache_t *cache, ilong *num)
889 | {
890 | imemslab_t *slab;
891 | size_t coloroff;
892 | size_t obj_size;
893 | ilong count;
894 | char *page;
895 |
896 | obj_size = cache->unit_size;
897 | page = (char*)imem_gfp_alloc(cache->gfp);
898 |
899 | if (page == NULL) {
900 | return NULL;
901 | }
902 |
903 | coloroff = cache->color_next;
904 | if (IMCACHE_OFFSLAB(cache)) {
905 | slab = imslab_set_new();
906 | if (slab == NULL) {
907 | imem_gfp_free(cache->gfp, page);
908 | return NULL;
909 | }
910 | } else {
911 | coloroff = IMROUNDUP((size_t)(page + coloroff));
912 | coloroff -= (size_t)page;
913 | slab = (imemslab_t*)(page + coloroff);
914 | coloroff += sizeof(imemslab_t);
915 | }
916 |
917 | assert(IMROUNDUP((size_t)slab) == (size_t)slab);
918 |
919 | count = imslab_init(slab, page, cache->page_size, obj_size, coloroff);
920 |
921 | cache->color_next += IMROUNDSIZE;
922 |
923 | if (cache->color_next >= cache->color_limit)
924 | cache->color_next = 0;
925 |
926 | slab->extra = cache;
927 | cache->pages_new++;
928 | cache->pages_inuse++;
929 |
930 | if (num) *num = count;
931 |
932 | return slab;
933 | }
934 |
935 | static void imemcache_slab_delete(imemcache_t *cache, imemslab_t *slab)
936 | {
937 | imem_gfp_free(cache->gfp, slab->membase);
938 | cache->pages_del++;
939 | cache->pages_inuse--;
940 | if (IMCACHE_OFFSLAB(cache)) {
941 | imslab_set_delete(slab);
942 | }
943 | }
944 |
945 | static ilong imemcache_drain_list(imemcache_t *cache, int id, ilong tofree)
946 | {
947 | imemslab_t *slab;
948 | ilist_head *head, *p;
949 | ilong free_count = 0;
950 |
951 | if (id == 0) head = &cache->slabs_free;
952 | else if (id == 1) head = &cache->slabs_full;
953 | else if (id == 2) head = &cache->slabs_partial;
954 | else return -1;
955 |
956 | while (!ilist_is_empty(head)) {
957 | if (tofree >= 0 && free_count >= tofree) break;
958 | if (IMCACHE_NOLOCK(cache) == 0)
959 | imutex_lock(&cache->list_lock);
960 | p = head->prev;
961 | if (p == head) {
962 | if (IMCACHE_NOLOCK(cache) == 0)
963 | imutex_unlock(&cache->list_lock);
964 | break;
965 | }
966 | slab = ilist_entry(p, imemslab_t, queue);
967 | ilist_del(p);
968 | if (IMCACHE_NOLOCK(cache) == 0)
969 | imutex_unlock(&cache->list_lock);
970 | if (id == 0) {
971 | while (!IMEMSLAB_ISFULL(slab)) imslab_alloc(slab);
972 | cache->free_objects -= slab->inuse;
973 | }
974 | imemcache_slab_delete(cache, slab);
975 | free_count++;
976 | }
977 | if (id == 0) cache->count_free -= free_count;
978 | else if (id == 1) cache->count_full -= free_count;
979 | else cache->count_partial -= free_count;
980 | return free_count;
981 | }
982 |
983 | static void imemcache_destroy_list(imemcache_t *cache)
984 | {
985 | int i;
986 |
987 | for (i = 0; i < (int)IMCACHE_LRU_COUNT; i++)
988 | imutex_destroy(&cache->array[i].lock);
989 |
990 | imemcache_drain_list(cache, 0, -1);
991 | imemcache_drain_list(cache, 1, -1);
992 | imemcache_drain_list(cache, 2, -1);
993 |
994 | imutex_lock(&cache->list_lock);
995 | cache->count_free = 0;
996 | cache->count_full = 0;
997 | cache->count_partial = 0;
998 | cache->free_objects = 0;
999 | imutex_unlock(&cache->list_lock);
1000 |
1001 | imutex_destroy(&cache->list_lock);
1002 | }
1003 |
1004 | #define IMCACHE_CHECK_MAGIC 0x05
1005 |
1006 | static void* imemcache_list_alloc(imemcache_t *cache)
1007 | {
1008 | imemslab_t *slab;
1009 | ilist_head *p;
1010 | ilong slab_obj_num;
1011 | char *lptr = NULL;
1012 |
1013 | if (IMCACHE_NOLOCK(cache) == 0)
1014 | imutex_lock(&cache->list_lock);
1015 |
1016 | p = cache->slabs_partial.next;
1017 | if (p == &cache->slabs_partial) {
1018 | p = cache->slabs_free.next;
1019 | if (p == &cache->slabs_free) {
1020 | slab = imemcache_slab_create(cache, &slab_obj_num);
1021 | if (slab == NULL) {
1022 | if (IMCACHE_NOLOCK(cache) == 0)
1023 | imutex_unlock(&cache->list_lock);
1024 | return NULL;
1025 | }
1026 | p = &slab->queue;
1027 | cache->free_objects += slab_obj_num;
1028 | slab = ilist_entry(p, imemslab_t, queue);
1029 | } else {
1030 | ilist_del(p);
1031 | ilist_init(p);
1032 | cache->count_free--;
1033 | slab = ilist_entry(p, imemslab_t, queue);
1034 | }
1035 | ilist_add(p, &cache->slabs_partial);
1036 | cache->count_partial++;
1037 | }
1038 | slab = ilist_entry(p, imemslab_t, queue);
1039 | assert(IMEMSLAB_ISFULL(slab) == 0);
1040 | lptr = (char*)imslab_alloc(slab);
1041 | assert(lptr);
1042 | if (cache->free_objects) cache->free_objects--;
1043 | if (IMEMSLAB_ISFULL(slab)) {
1044 | ilist_del(p);
1045 | ilist_init(p);
1046 | ilist_add(p, &cache->slabs_full);
1047 | cache->count_partial--;
1048 | cache->count_full++;
1049 | }
1050 | if (IMCACHE_NOLOCK(cache) == 0)
1051 | imutex_unlock(&cache->list_lock);
1052 | *(imemslab_t**)lptr = slab;
1053 | lptr += sizeof(imemslab_t*);
1054 | return lptr;
1055 | }
1056 |
1057 | static void imemcache_list_free(imemcache_t *cache, void *ptr)
1058 | {
1059 | imemslab_t *slab;
1060 | ilist_head *p;
1061 | char *lptr = (char*)ptr;
1062 | char *membase;
1063 | int invalidptr;
1064 | ilong tofree;
1065 |
1066 | lptr -= sizeof(imemslab_t*);
1067 | slab = *(imemslab_t**)lptr;
1068 | assert(slab);
1069 |
1070 | membase = (char*)slab->membase;
1071 | invalidptr = !(lptr >= membase && lptr < membase + slab->memsize);
1072 |
1073 | assert( !invalidptr );
1074 | if (invalidptr) return;
1075 | if (cache != NULL) {
1076 | invalidptr = ((void*)cache != slab->extra);
1077 | assert( !invalidptr );
1078 | if (invalidptr) return;
1079 | }
1080 |
1081 | cache = (imemcache_t*)slab->extra;
1082 | p = &slab->queue;
1083 |
1084 | if (IMCACHE_NOLOCK(cache) == 0)
1085 | imutex_lock(&cache->list_lock);
1086 |
1087 | if (IMEMSLAB_ISFULL(slab)) {
1088 | assert(cache->count_full);
1089 | ilist_del(p);
1090 | ilist_init(p);
1091 | ilist_add_tail(p, &cache->slabs_partial);
1092 | cache->count_full--;
1093 | cache->count_partial++;
1094 | }
1095 | imslab_free(slab, lptr);
1096 | cache->free_objects++;
1097 |
1098 | if (IMEMSLAB_ISEMPTY(slab)) {
1099 | ilist_del(p);
1100 | ilist_init(p);
1101 | ilist_add(p, &cache->slabs_free);
1102 | cache->count_partial--;
1103 | cache->count_free++;
1104 | }
1105 |
1106 | if (IMCACHE_NOLOCK(cache) == 0)
1107 | imutex_unlock(&cache->list_lock);
1108 |
1109 | if (IMCACHE_NODRAIN(cache) == 0) {
1110 | if (cache->free_objects >= cache->free_limit) {
1111 | tofree = cache->count_free >> 1;
1112 | if (tofree > 0)
1113 | tofree = imemcache_drain_list(cache, 0, tofree);
1114 | }
1115 | }
1116 | }
1117 |
1118 |
1119 | /*====================================================================*/
1120 | /* IMEMECACHE INTERFACE */
1121 | /*====================================================================*/
1122 |
1123 | /* callback to fetch processor id */
1124 | int (*__ihook_processor_id)(void) = NULL;
1125 |
1126 | static int imemcache_fill_batch(imemcache_t *cache, int array_index)
1127 | {
1128 | imemlru_t *array = &cache->array[array_index];
1129 | int count = 0;
1130 | void *ptr;
1131 |
1132 | imutex_lock(&cache->list_lock);
1133 |
1134 | for (count = 0; array->avial < array->batchcount; count++) {
1135 | ptr = imemcache_list_alloc(cache);
1136 | if (ptr == NULL) break;
1137 | array->entry[array->avial++] = ptr;
1138 | }
1139 |
1140 | imutex_unlock(&cache->list_lock);
1141 |
1142 | if (cache->pages_inuse > cache->pages_hiwater)
1143 | cache->pages_hiwater = cache->pages_inuse;
1144 |
1145 | return count;
1146 | }
1147 |
1148 | int imemcache_batch_new(imemcache_t *cache, void **ptr, int count)
1149 | {
1150 | int n = 0;
1151 |
1152 | imutex_lock(&cache->list_lock);
1153 |
1154 | for (n = 0; n < count; n++) {
1155 | void *p = imemcache_list_alloc(cache);
1156 | if (ptr == NULL) break;
1157 | ptr[n] = p;
1158 | }
1159 |
1160 | imutex_unlock(&cache->list_lock);
1161 |
1162 | return n;
1163 | }
1164 |
1165 | int imemcache_batch_del(imemcache_t *cache, void **ptr, int count)
1166 | {
1167 | int n = 0;
1168 |
1169 | imutex_lock(&cache->list_lock);
1170 |
1171 | for (n = 0; n < count; n++) {
1172 | imemcache_list_free(cache, ptr[n]);
1173 | }
1174 |
1175 | if (cache->free_objects >= cache->free_limit) {
1176 | if (cache->count_free > 1) {
1177 | imemcache_drain_list(cache, 0, cache->count_free >> 1);
1178 | }
1179 | }
1180 |
1181 | imutex_unlock(&cache->list_lock);
1182 |
1183 | return count;
1184 | }
1185 |
1186 | static void *imemcache_alloc(imemcache_t *cache)
1187 | {
1188 | imemlru_t *array;
1189 | int array_index = 0;
1190 | void *ptr = NULL;
1191 | void **head;
1192 |
1193 | if (__ihook_processor_id)
1194 | array_index = __ihook_processor_id();
1195 |
1196 | array_index &= (IMCACHE_LRU_COUNT - 1);
1197 |
1198 | array = &cache->array[array_index];
1199 |
1200 | imutex_lock(&array->lock);
1201 | if (array->avial == 0)
1202 | imemcache_fill_batch(cache, array_index);
1203 | if (array->avial != 0) {
1204 | ptr = array->entry[--array->avial];
1205 | }
1206 | imutex_unlock(&array->lock);
1207 |
1208 | if (ptr == 0) {
1209 | return NULL;
1210 | }
1211 |
1212 | assert(ptr);
1213 | head = (void**)((char*)ptr - sizeof(void*));
1214 | head[0] = (void*)((size_t)head[0] | IMCACHE_CHECK_MAGIC);
1215 |
1216 | return ptr;
1217 | }
1218 |
1219 | static void *imemcache_free(imemcache_t *cache, void *ptr)
1220 | {
1221 | imemslab_t *slab;
1222 | imemlru_t *array;
1223 | size_t linear;
1224 | char *lptr = (char*)ptr;
1225 | void **head;
1226 | int array_index = 0;
1227 | int invalidptr, count;
1228 |
1229 | if (__ihook_processor_id)
1230 | array_index = __ihook_processor_id();
1231 |
1232 | array_index &= (IMCACHE_LRU_COUNT - 1);
1233 |
1234 | head = (void**)(lptr - sizeof(void*));
1235 | linear = (size_t)head[0];
1236 | invalidptr = ((linear & IMCACHE_CHECK_MAGIC) != IMCACHE_CHECK_MAGIC);
1237 | head[0] = (void*)(linear & ~(IMROUNDSIZE - 1));
1238 |
1239 | assert( !invalidptr );
1240 | if (invalidptr) return NULL;
1241 |
1242 | lptr -= sizeof(imemslab_t*);
1243 | slab = *(imemslab_t**)lptr;
1244 |
1245 | if (cache) {
1246 | assert(cache == (imemcache_t*)slab->extra);
1247 | if (cache != slab->extra) {
1248 | return NULL;
1249 | }
1250 | }
1251 |
1252 | cache = (imemcache_t*)slab->extra;
1253 | array = &cache->array[array_index];
1254 |
1255 | imutex_lock(&array->lock);
1256 |
1257 | if (array->avial >= array->limit) {
1258 |
1259 | imutex_lock(&cache->list_lock);
1260 |
1261 | for (count = 0; array->avial > array->batchcount; count++)
1262 | imemcache_list_free(cache, array->entry[--array->avial]);
1263 |
1264 | imemcache_list_free(cache, ptr);
1265 |
1266 | imutex_unlock(&cache->list_lock);
1267 |
1268 | if (cache->free_objects >= cache->free_limit) {
1269 | if (cache->count_free > 1) {
1270 | imutex_lock(&cache->list_lock);
1271 | imemcache_drain_list(cache, 0, cache->count_free >> 1);
1272 | imutex_unlock(&cache->list_lock);
1273 | }
1274 | }
1275 |
1276 | } else {
1277 |
1278 | array->entry[array->avial++] = ptr;
1279 | }
1280 |
1281 | imutex_unlock(&array->lock);
1282 |
1283 | return cache;
1284 | }
1285 |
1286 |
1287 | static void imemcache_shrink(imemcache_t *cache)
1288 | {
1289 | imemlru_t *array;
1290 | int array_index = 0;
1291 |
1292 | array_index = 0;
1293 | array_index &= (IMCACHE_LRU_COUNT - 1);
1294 |
1295 | array = &cache->array[array_index];
1296 |
1297 | imutex_lock(&array->lock);
1298 | imutex_lock(&cache->list_lock);
1299 |
1300 | for (; array->avial > 0; )
1301 | imemcache_list_free(cache, array->entry[--array->avial]);
1302 |
1303 | imemcache_drain_list(cache, 0, -1);
1304 |
1305 | imutex_unlock(&cache->list_lock);
1306 | imutex_unlock(&array->lock);
1307 | }
1308 |
1309 |
1310 | static void *imemcache_gfp_alloc(struct IMEMGFP *gfp)
1311 | {
1312 | imemcache_t *cache = (imemcache_t*)gfp->extra;
1313 | char *lptr;
1314 | lptr = (char*)imemcache_alloc(cache);
1315 | assert(lptr);
1316 | return lptr;
1317 | }
1318 |
1319 | static void imemcache_gfp_free(struct IMEMGFP *gfp, void *ptr)
1320 | {
1321 | imemcache_t *cache = (imemcache_t*)gfp->extra;
1322 | char *lptr = (char*)ptr;;
1323 | imemcache_free(cache, lptr);
1324 | }
1325 |
1326 | static imemcache_t *imemcache_create(const char *name,
1327 | size_t obj_size, struct IMEMGFP *gfp)
1328 | {
1329 | imemcache_t *cache;
1330 | size_t page_size;
1331 |
1332 | assert(imslab_inited && imem_gfp_inited);
1333 | assert(obj_size >= sizeof(void*));
1334 |
1335 | page_size = (!gfp)? imem_gfp_default.page_size : gfp->page_size;
1336 | assert(obj_size > 0 && obj_size <= page_size - IMROUNDSIZE);
1337 |
1338 | cache = (imemcache_t*)internal_malloc(0, sizeof(imemcache_t));
1339 | assert(cache);
1340 |
1341 | imemcache_init_list(cache, gfp, obj_size);
1342 |
1343 | name = name? name : "NONAME";
1344 | strncpy(cache->name, name, IMCACHE_NAMESIZE);
1345 |
1346 | if (cache->gfp) cache->gfp->refcnt++;
1347 | cache->flags |= IMCACHE_FLAG_NOLOCK;
1348 | cache->index = 0;
1349 |
1350 | ilist_init(&cache->queue);
1351 |
1352 | gfp = &cache->page_supply;
1353 | gfp->page_size = cache->obj_size;
1354 | gfp->refcnt = 0;
1355 | gfp->alloc_page = imemcache_gfp_alloc;
1356 | gfp->free_page = imemcache_gfp_free;
1357 | gfp->extra = cache;
1358 | gfp->pages_inuse = 0;
1359 | gfp->pages_new = 0;
1360 | gfp->pages_del = 0;
1361 |
1362 | return cache;
1363 | }
1364 |
1365 | static void imemcache_release(imemcache_t *cache)
1366 | {
1367 | imemlru_t *array;
1368 | void *entry[IMCACHE_ARRAYLIMIT];
1369 | ilong n, i, j;
1370 |
1371 | for (i = 0; i < IMCACHE_LRU_COUNT; i++) {
1372 | array = &cache->array[i];
1373 |
1374 | imutex_lock(&array->lock);
1375 | for (j = 0, n = 0; j < array->avial; j++, n++)
1376 | entry[j] = array->entry[j];
1377 | array->avial = 0;
1378 | imutex_unlock(&array->lock);
1379 |
1380 | imutex_lock(&cache->list_lock);
1381 |
1382 | for (j = 0; j < n; j++)
1383 | imemcache_list_free(NULL, entry[j]);
1384 |
1385 | imutex_unlock(&cache->list_lock);
1386 | }
1387 |
1388 | imemcache_destroy_list(cache);
1389 |
1390 | if (cache->gfp)
1391 | cache->gfp->refcnt--;
1392 |
1393 | internal_free(0, cache);
1394 | }
1395 |
1396 |
1397 |
1398 | /*====================================================================*/
1399 | /* IKMEM INTERFACE */
1400 | /*====================================================================*/
1401 | static imemcache_t **ikmem_array = NULL;
1402 | static imemcache_t **ikmem_lookup = NULL;
1403 |
1404 | static imutex_t ikmem_lock;
1405 | static int ikmem_count = 0;
1406 | static volatile int ikmem_inited = 0;
1407 |
1408 | static imemcache_t *ikmem_size_lookup1[257];
1409 | static imemcache_t *ikmem_size_lookup2[257];
1410 |
1411 | static size_t ikmem_inuse = 0;
1412 | static ilist_head ikmem_head;
1413 | static ilist_head ikmem_large_ptr;
1414 |
1415 | static size_t ikmem_water_mark = 0;
1416 |
1417 | static size_t ikmem_range_high = 0;
1418 | static size_t ikmem_range_low = 0;
1419 |
1420 | #ifndef IKMEM_DISABLE
1421 | #define IKMEM_DEFAULT_HOOK NULL
1422 | #else
1423 | #define IKMEM_DEFAULT_HOOK (&ikmem_std_hook);
1424 | #endif
1425 |
1426 | extern const ikmemhook_t ikmem_std_hook;
1427 | static const ikmemhook_t *ikmem_hook = IKMEM_DEFAULT_HOOK;
1428 |
1429 | int ikmem_boot_flags = 0;
1430 |
1431 | // invoked when IKMEM_ENABLE_BOOT is defined
1432 | void ikmem_boot_hook(int stage);
1433 |
1434 |
1435 | static int ikmem_append(size_t size, struct IMEMGFP *gfp)
1436 | {
1437 | imemcache_t *cache, **p1, **p2;
1438 | static int sizelimit = 0;
1439 | char name[64];
1440 | char nums[32];
1441 | int index, k;
1442 | size_t num;
1443 |
1444 | strncpy(name, "kmem_", 20);
1445 |
1446 | for (num = size, index = 0; ; ) {
1447 | nums[index++] = (char)((num % 10) + '0');
1448 | num /= 10;
1449 | if (num == 0) break;
1450 | }
1451 |
1452 | for (k = 0; k < index; k++)
1453 | name[5 + k] = nums[index - 1 - k];
1454 |
1455 | name[5 + index] = 0;
1456 |
1457 | cache = imemcache_create(name, size, gfp);
1458 |
1459 | #ifdef IKMEM_MINWASTE
1460 | cache->array[0].limit = 2;
1461 | cache->array[0].batchcount = 1;
1462 | cache->free_limit = 1;
1463 | #endif
1464 |
1465 | assert(cache);
1466 |
1467 | cache->flags |= IMCACHE_FLAG_SYSTEM;
1468 |
1469 | if (ikmem_count == 0) sizelimit = 0;
1470 | if (ikmem_count >= sizelimit || ikmem_array == 0) {
1471 | sizelimit = (sizelimit <= 0)? 8 : sizelimit * 2;
1472 | sizelimit = (sizelimit < ikmem_count)? ikmem_count + 2 : sizelimit;
1473 |
1474 | p1 = (imemcache_t**)internal_malloc(0,
1475 | sizeof(imemcache_t) * sizelimit * 2);
1476 | p2 = p1 + sizelimit;
1477 |
1478 | assert(p1);
1479 |
1480 | if (ikmem_array != NULL) {
1481 | memcpy(p1, ikmem_array, sizeof(imemcache_t) * ikmem_count);
1482 | memcpy(p2, ikmem_lookup, sizeof(imemcache_t) * ikmem_count);
1483 | internal_free(0, ikmem_array);
1484 | }
1485 | ikmem_array = p1;
1486 | ikmem_lookup = p2;
1487 | }
1488 |
1489 | cache->index = ikmem_count;
1490 |
1491 | ikmem_array[ikmem_count] = cache;
1492 | ikmem_lookup[ikmem_count++] = cache;
1493 |
1494 | for (index = ikmem_count - 1; index > 1; index = k) {
1495 | k = index - 1;
1496 | if (ikmem_lookup[index]->obj_size < ikmem_lookup[k]->obj_size)
1497 | break;
1498 | cache = ikmem_lookup[index];
1499 | ikmem_lookup[index] = ikmem_lookup[k];
1500 | ikmem_lookup[k] = cache;
1501 | }
1502 |
1503 | return ikmem_count - 1;
1504 | }
1505 |
1506 |
1507 | static size_t ikmem_page_waste(size_t obj_size,
1508 | size_t page_size)
1509 | {
1510 | imemcache_t cache;
1511 | size_t size, k;
1512 | cache.obj_size = (size_t)obj_size;
1513 | cache.page_size = (size_t)page_size;
1514 | cache.unit_size = IMROUNDUP(obj_size + sizeof(void*));
1515 | cache.flags = 0;
1516 | imemcache_calculate(&cache);
1517 | size = IMROUNDUP(cache.obj_size + sizeof(void*)) * cache.num;
1518 | size = page_size - size;
1519 | k = IMROUNDUP(sizeof(imemslab_t));
1520 | if (IMCACHE_OFFSLAB(&cache)) size += k;
1521 | return size;
1522 | }
1523 |
1524 | static size_t ikmem_gfp_waste(size_t obj_size, imemgfp_t *gfp)
1525 | {
1526 | imemcache_t *cache;
1527 | size_t waste;
1528 | if (gfp == NULL) {
1529 | return ikmem_page_waste(obj_size, imem_gfp_default.page_size);
1530 | }
1531 | cache = (imemcache_t*)gfp->extra;
1532 | waste = ikmem_page_waste(obj_size, cache->obj_size) * cache->num;
1533 | return waste + ikmem_gfp_waste(cache->obj_size, cache->gfp);
1534 | }
1535 |
1536 | static imemgfp_t *ikmem_choose_gfp(size_t obj_size, ilong *w)
1537 | {
1538 | size_t hiwater = IMROUNDUP(obj_size + sizeof(void*)) * 64;
1539 | size_t lowater = IMROUNDUP(obj_size + sizeof(void*)) * 8;
1540 | imemcache_t *cache;
1541 | size_t min, waste;
1542 | int index, i = -1;
1543 |
1544 | min = imem_gfp_default.page_size;
1545 | for (index = 0; index < ikmem_count; index++) {
1546 | cache = ikmem_array[index];
1547 | if (cache->obj_size < lowater || cache->obj_size > hiwater)
1548 | continue;
1549 | waste = ikmem_gfp_waste(obj_size, &ikmem_array[index]->page_supply);
1550 | if (waste < min) min = waste, i = index;
1551 | }
1552 | if (i < 0 || i >= ikmem_count) {
1553 | if (w) w[0] = (ilong)ikmem_gfp_waste(obj_size, NULL);
1554 | return NULL;
1555 | }
1556 | if (w) w[0] = (ilong)min;
1557 | return &ikmem_array[i]->page_supply;
1558 | }
1559 |
1560 | static void ikmem_insert(size_t objsize, int approxy)
1561 | {
1562 | imemgfp_t *gfp;
1563 | size_t optimize;
1564 | ilong index, waste;
1565 |
1566 | for (index = 0; index < ikmem_count; index++) {
1567 | optimize = ikmem_array[index]->obj_size;
1568 | if (optimize < objsize) continue;
1569 | if (optimize == objsize) break;
1570 | if (optimize - objsize <= (objsize >> 4) && approxy) break;
1571 | }
1572 |
1573 | if (index < ikmem_count)
1574 | return;
1575 |
1576 | gfp = ikmem_choose_gfp(objsize, &waste);
1577 | ikmem_append(objsize, gfp);
1578 | }
1579 |
1580 | imemcache_t *ikmem_choose_size(size_t size)
1581 | {
1582 | int index;
1583 | if (size >= imem_page_size) return NULL;
1584 | if (ikmem_count > 0) if (size > ikmem_lookup[0]->obj_size) return NULL;
1585 | for (index = ikmem_count - 1; index >= 0; index--) {
1586 | if (ikmem_lookup[index]->obj_size >= size)
1587 | return ikmem_lookup[index];
1588 | }
1589 | return NULL;
1590 | }
1591 |
1592 | static void ikmem_setup_caches(size_t *sizelist)
1593 | {
1594 | static int Z[] = { 24, 40, 48, 56, 80, 96, 112, 160, 192, 224, 330, -1 };
1595 | size_t fib1 = 8, fib2 = 16, f;
1596 | size_t *sizevec, *p;
1597 | size_t k = 0;
1598 | ilong limit, shift, count, i, j;
1599 | imemcache_t *cache;
1600 |
1601 | limit = 64;
1602 | sizevec = (size_t*)internal_malloc(0, sizeof(ilong) * limit);
1603 | assert(sizevec);
1604 |
1605 | #define ikmem_sizevec_append(size) do { \
1606 | if (count >= limit) { \
1607 | limit = limit * 2; \
1608 | p = (size_t*)internal_malloc(0, sizeof(ilong) * limit); \
1609 | assert(p); \
1610 | memcpy(p, sizevec, sizeof(ilong) * count); \
1611 | free(sizevec); \
1612 | sizevec = p; \
1613 | } \
1614 | sizevec[count++] = (size); \
1615 | } while (0)
1616 |
1617 | shift = imem_page_shift;
1618 | for (count = 0; shift >= 3; shift--) {
1619 | ikmem_sizevec_append(((size_t)1) << shift);
1620 | }
1621 |
1622 | #ifndef IKMEM_DISABLE_JESIZE
1623 | for (i = 0; Z[i] >= 0; i++) {
1624 | k = (size_t)Z[i];
1625 | ikmem_sizevec_append(k);
1626 | }
1627 | fib1 = 168;
1628 | fib2 = 272;
1629 | #endif
1630 |
1631 | for (; fib2 < (imem_gfp_default.page_size >> 2); ) {
1632 | f = fib1 + fib2;
1633 | fib1 = fib2;
1634 | fib2 = f;
1635 | #ifndef IKMEM_DISABLE_FIB
1636 | ikmem_sizevec_append(f);
1637 | #endif
1638 | }
1639 |
1640 | for (i = 0; i < count - 1; i++) {
1641 | for (j = i + 1; j < count; j++) {
1642 | if (sizevec[i] < sizevec[j])
1643 | k = sizevec[i],
1644 | sizevec[i] = sizevec[j],
1645 | sizevec[j] = k;
1646 | }
1647 | }
1648 |
1649 | for (i = 0; i < count; i++) ikmem_insert(sizevec[i], 1);
1650 | internal_free(0, sizevec);
1651 |
1652 | if (sizelist) {
1653 | for (; sizelist[0]; sizelist++)
1654 | ikmem_insert(*sizelist, 0);
1655 | }
1656 |
1657 | for (f = 4; f <= 1024; f += 4)
1658 | ikmem_size_lookup1[f >> 2] = ikmem_choose_size(f);
1659 |
1660 | for (f = 1024; f <= (256 << 10); f += 1024)
1661 | ikmem_size_lookup2[f >> 10] = ikmem_choose_size(f);
1662 |
1663 | for (i = 0; i < ikmem_count; i++) {
1664 | cache = ikmem_lookup[i];
1665 | cache->extra = (ilong*)internal_malloc(0, sizeof(ilong) * 8);
1666 | cache->index = i;
1667 | assert(cache->extra);
1668 | memset(cache->extra, 0, sizeof(ilong) * 8);
1669 | }
1670 |
1671 | ikmem_size_lookup1[0] = NULL;
1672 | ikmem_size_lookup2[0] = NULL;
1673 | }
1674 |
1675 | #define IKMEM_MUTEX_MAX 8
1676 |
1677 | #define IKMEM_MUTEX_INIT (-1)
1678 | #define IKMEM_MUTEX_ONCE (-2)
1679 | #define IKMEM_MUTEX_BOOT0 (-3)
1680 | #define IKMEM_MUTEX_BOOT1 (-4)
1681 |
1682 | IMUTEX_TYPE* ikmem_mutex_once(int id)
1683 | {
1684 | static IMUTEX_TYPE mutexs[IKMEM_MUTEX_MAX];
1685 | #if defined(IMUTEX_DISABLE)
1686 | return &mutexs[0];
1687 | #elif defined(WIN32) || defined(_WIN32) || defined(_WIN64) || defined(WIN64)
1688 | static volatile int mutex_inited = 0;
1689 | if (mutex_inited == 0) {
1690 | static DWORD align_dwords[20] = {
1691 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1692 | };
1693 | unsigned char *align_ptr = (unsigned char*)align_dwords;
1694 | LONG *once;
1695 | LONG last = 0;
1696 | while (((size_t)align_ptr) & 63) align_ptr++;
1697 | once = (LONG*)align_ptr;
1698 | last = InterlockedExchange(once, 1);
1699 | if (last == 0) {
1700 | if (mutex_inited == 0) {
1701 | int i = 0;
1702 | for (i = 0; i < IKMEM_MUTEX_MAX; i++) {
1703 | IMUTEX_INIT(&mutexs[i]);
1704 | }
1705 | mutex_inited = 1;
1706 | }
1707 | }
1708 | while (mutex_inited == 0) Sleep(1);
1709 | }
1710 | return &mutexs[(id + 4) & (IKMEM_MUTEX_MAX - 1)];
1711 | #elif defined(__unix) || defined(__unix__) || defined(__MACH__)
1712 | static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1713 | static volatile int mutex_inited = 0;
1714 | if (mutex_inited == 0) {
1715 | pthread_mutex_lock(&mutex);
1716 | if (mutex_inited == 0) {
1717 | int i;
1718 | for (i = 0; i < IKMEM_MUTEX_MAX; i++) {
1719 | IMUTEX_INIT(&mutexs[i]);
1720 | }
1721 | mutex_inited = 1;
1722 | }
1723 | pthread_mutex_unlock(&mutex);
1724 | }
1725 | return &mutexs[(id + 4) & (IKMEM_MUTEX_MAX - 1)];
1726 | #else
1727 | return &mutexs[0];
1728 | #endif
1729 | }
1730 |
1731 | int ikmem_current_cpu(void)
1732 | {
1733 | #if defined(IMUTEX_DISABLE)
1734 | return 0;
1735 | #elif defined(WIN32) || defined(_WIN32) || defined(_WIN64) || defined(WIN64)
1736 | return (int)(GetCurrentThreadId() % 67);
1737 | #elif defined(__unix) || defined(__unix__) || defined(__MACH__)
1738 | size_t self = (size_t)pthread_self();
1739 | return (int)(self % 67);
1740 | #else
1741 | return 0;
1742 | #endif
1743 | }
1744 |
1745 | void ikmem_init(int page_shift, int pg_malloc, size_t *sz)
1746 | {
1747 | IMUTEX_TYPE *mutex;
1748 | size_t psize;
1749 | ilong limit;
1750 |
1751 | if (ikmem_inited != 0)
1752 | return;
1753 |
1754 | mutex = ikmem_mutex_once(IKMEM_MUTEX_INIT);
1755 |
1756 | IMUTEX_LOCK(mutex);
1757 |
1758 | if (ikmem_inited == 0) {
1759 | imem_gfp_init(page_shift, pg_malloc);
1760 | imslab_set_init();
1761 |
1762 | ikmem_lookup = NULL;
1763 | ikmem_array = NULL;
1764 | ikmem_count = 0;
1765 | ikmem_inuse = 0;
1766 |
1767 | psize = imem_gfp_default.page_size - IMROUNDUP(sizeof(void*));
1768 | ikmem_append(psize, 0);
1769 |
1770 | limit = imem_page_shift - 4;
1771 | limit = limit < 10? 10 : limit;
1772 |
1773 | ikmem_setup_caches(sz);
1774 |
1775 | imutex_init(&ikmem_lock);
1776 | ilist_init(&ikmem_head);
1777 | ilist_init(&ikmem_large_ptr);
1778 |
1779 | ikmem_range_high = (size_t)0;
1780 | ikmem_range_low = ~((size_t)0);
1781 |
1782 | #if defined(IMUTEX_DISABLE)
1783 | __ihook_processor_id = NULL;
1784 | #else
1785 | __ihook_processor_id = ikmem_current_cpu;
1786 | #endif
1787 |
1788 | ikmem_inited = 1;
1789 | }
1790 |
1791 | IMUTEX_UNLOCK(mutex);
1792 |
1793 | #ifdef IKMEM_ENABLE_BOOT
1794 | if ((ikmem_boot_flags & 1) == 0) {
1795 | mutex = ikmem_mutex_once(IKMEM_MUTEX_BOOT0);
1796 | IMUTEX_LOCK(mutex);
1797 | if ((ikmem_boot_flags & 1) == 0) {
1798 | ikmem_boot_hook(0);
1799 | ikmem_boot_flags |= 1;
1800 | }
1801 | IMUTEX_UNLOCK(mutex);
1802 | }
1803 | #endif
1804 | }
1805 |
1806 | void ikmem_destroy(void)
1807 | {
1808 | imemcache_t *cache;
1809 | ilist_head *p, *next;
1810 | ilong index;
1811 |
1812 | if (ikmem_inited == 0) {
1813 | return;
1814 | }
1815 |
1816 | #ifdef IKMEM_ENABLE_BOOT
1817 | if (ikmem_boot_flags) {
1818 | IMUTEX_TYPE *mutex = ikmem_mutex_once(IKMEM_MUTEX_BOOT0);
1819 | IMUTEX_LOCK(mutex);
1820 | if (ikmem_boot_flags) {
1821 | ikmem_boot_hook(1);
1822 | ikmem_boot_flags = 0;
1823 | }
1824 | IMUTEX_UNLOCK(mutex);
1825 | }
1826 | #endif
1827 |
1828 | imutex_lock(&ikmem_lock);
1829 | for (p = ikmem_head.next; p != &ikmem_head; ) {
1830 | cache = ILIST_ENTRY(p, imemcache_t, queue);
1831 | p = p->next;
1832 | ilist_del(&cache->queue);
1833 | imemcache_release(cache);
1834 | }
1835 |
1836 | imutex_unlock(&ikmem_lock);
1837 |
1838 | for (index = ikmem_count - 1; index >= 0; index--) {
1839 | cache = ikmem_array[index];
1840 | if (cache->extra) {
1841 | internal_free(0, cache->extra);
1842 | cache->extra = NULL;
1843 | }
1844 | imemcache_release(cache);
1845 | }
1846 |
1847 | internal_free(0, ikmem_array);
1848 | ikmem_lookup = NULL;
1849 | ikmem_array = NULL;
1850 | ikmem_count = 0;
1851 |
1852 | imutex_lock(&ikmem_lock);
1853 |
1854 | for (p = ikmem_large_ptr.next; p != &ikmem_large_ptr; ) {
1855 | next = p->next;
1856 | ilist_del(p);
1857 | internal_free(0, p);
1858 | p = next;
1859 | }
1860 |
1861 | imutex_unlock(&ikmem_lock);
1862 | imutex_destroy(&ikmem_lock);
1863 |
1864 | imslab_set_destroy();
1865 | imem_gfp_destroy();
1866 |
1867 | ikmem_inited = 0;
1868 | }
1869 |
1870 |
1871 | /*====================================================================*/
1872 | /* IKMEM CORE */
1873 | /*====================================================================*/
1874 | #define IKMEM_LARGE_HEAD \
1875 | IMROUNDUP(sizeof(ilist_head) + sizeof(void*) + sizeof(ilong))
1876 |
1877 | #define IKMEM_STAT(cache, id) (((ilong*)((cache)->extra))[id])
1878 |
1879 |
1880 | void ikmem_once_init(void)
1881 | {
1882 | IMUTEX_TYPE *mutex = ikmem_mutex_once(IKMEM_MUTEX_ONCE);
1883 | IMUTEX_LOCK(mutex);
1884 | if (ikmem_inited == 0) {
1885 | ikmem_init(0, 0, NULL);
1886 | }
1887 | IMUTEX_UNLOCK(mutex);
1888 | }
1889 |
1890 | void* ikmem_core_malloc(size_t size);
1891 | void* ikmem_core_realloc(void *ptr, size_t size);
1892 | void ikmem_core_free(void *ptr);
1893 | void ikmem_core_shrink(void);
1894 | size_t ikmem_core_ptrsize(const void *ptr);
1895 |
1896 | size_t ikmem_core_csize(int index)
1897 | {
1898 | if (index < 0) return ikmem_count;
1899 | if (index >= ikmem_count) return 0;
1900 | return ikmem_lookup[index]->obj_size;
1901 | }
1902 |
1903 | void* ikmem_core_malloc(size_t size)
1904 | {
1905 | imemcache_t *cache = NULL;
1906 | size_t round;
1907 | ilist_head *p;
1908 | char *lptr;
1909 |
1910 | if (ikmem_inited == 0) ikmem_once_init();
1911 |
1912 | assert(size > 0 && size <= (((size_t)1) << 30));
1913 | round = (size + 3) & ~((size_t)3);
1914 |
1915 | if (round <= 1024) {
1916 | cache = ikmem_size_lookup1[round >> 2];
1917 | } else {
1918 | round = (size + 1023) & ~((size_t)1023);
1919 | if (round < (256 << 10))
1920 | cache = ikmem_size_lookup2[round >> 10];
1921 | }
1922 |
1923 | if (cache == NULL)
1924 | cache = ikmem_choose_size(size);
1925 |
1926 | if (ikmem_water_mark > 0 && size > ikmem_water_mark)
1927 | cache = NULL;
1928 |
1929 | if (cache == NULL) {
1930 | lptr = (char*)internal_malloc(0, IKMEM_LARGE_HEAD + size);
1931 | if (lptr == NULL) return NULL;
1932 |
1933 | p = (ilist_head*)lptr;
1934 | lptr += IKMEM_LARGE_HEAD;
1935 | *(void**)(lptr - sizeof(void*)) = NULL;
1936 | *(ilong*)(lptr - sizeof(void*) - sizeof(ilong)) = size;
1937 |
1938 | imutex_lock(&ikmem_lock);
1939 | ilist_add(p, &ikmem_large_ptr);
1940 | imutex_unlock(&ikmem_lock);
1941 |
1942 | } else {
1943 | lptr = (char*)imemcache_alloc(cache);
1944 | if (lptr == NULL) {
1945 | ikmem_core_shrink();
1946 | lptr = (char*)imemcache_alloc(cache);
1947 | if (lptr == NULL) {
1948 | return NULL;
1949 | }
1950 | }
1951 | if (cache->extra) {
1952 | IKMEM_STAT(cache, 0) += 1;
1953 | IKMEM_STAT(cache, 1) += 1;
1954 | }
1955 | ikmem_inuse += cache->obj_size;
1956 | }
1957 |
1958 | if (ikmem_range_high < (size_t)lptr)
1959 | ikmem_range_high = (size_t)lptr;
1960 | if (ikmem_range_low > (size_t)lptr)
1961 | ikmem_range_low = (size_t)lptr;
1962 |
1963 | return lptr;
1964 | }
1965 |
1966 | void ikmem_core_free(void *ptr)
1967 | {
1968 | imemcache_t *cache = NULL;
1969 | ilist_head *p;
1970 | char *lptr = (char*)ptr;
1971 |
1972 | if (ikmem_inited == 0) ikmem_once_init();
1973 | if (ptr == NULL) return;
1974 |
1975 | if (*(void**)(lptr - sizeof(void*)) == NULL) {
1976 | lptr -= IKMEM_LARGE_HEAD;
1977 | p = (ilist_head*)lptr;
1978 | imutex_lock(&ikmem_lock);
1979 | ilist_del(p);
1980 | imutex_unlock(&ikmem_lock);
1981 | internal_free(0, lptr);
1982 | } else {
1983 | cache = (imemcache_t*)imemcache_free(NULL, ptr);
1984 | if (cache == NULL) return;
1985 | if (cache->extra) {
1986 | IKMEM_STAT(cache, 0) -= 1;
1987 | IKMEM_STAT(cache, 2) += 1;
1988 | }
1989 | ikmem_inuse -= cache->obj_size;
1990 | }
1991 | }
1992 |
1993 | size_t ikmem_core_ptrsize(const void *ptr)
1994 | {
1995 | size_t size, linear;
1996 | imemcache_t *cache;
1997 | imemslab_t *slab;
1998 | const char *lptr = (const char*)ptr;
1999 | int invalidptr;
2000 |
2001 | if (ikmem_inited == 0) ikmem_once_init();
2002 |
2003 | if ((size_t)lptr < ikmem_range_low ||
2004 | (size_t)lptr > ikmem_range_high)
2005 | return 0;
2006 |
2007 | if (*(const void**)(lptr - sizeof(void*)) == NULL) {
2008 | size = *(const size_t*)(lptr - sizeof(void*) - sizeof(ilong));
2009 | } else {
2010 |
2011 | linear = (size_t)(*(const void**)(lptr - sizeof(void*)));
2012 | invalidptr = ((linear & IMCACHE_CHECK_MAGIC) != IMCACHE_CHECK_MAGIC);
2013 |
2014 | if ( invalidptr ) return 0;
2015 |
2016 | slab = (imemslab_t*)(linear & ~(IMROUNDSIZE - 1));
2017 | lptr -= sizeof(void*);
2018 |
2019 | if (lptr >= (const char*)slab->membase + slab->memsize ||
2020 | lptr < (const char*)slab->membase)
2021 | return 0;
2022 |
2023 | cache = (imemcache_t*)slab->extra;
2024 | size = cache->obj_size;
2025 | }
2026 |
2027 | return size;
2028 | }
2029 |
2030 | void* ikmem_core_realloc(void *ptr, size_t size)
2031 | {
2032 | size_t oldsize;
2033 | void *newptr;
2034 |
2035 | if (ikmem_inited == 0) ikmem_once_init();
2036 |
2037 | if (ptr == NULL) {
2038 | if (size == 0) return NULL;
2039 | return ikmem_core_malloc(size);
2040 | }
2041 |
2042 | oldsize = ikmem_core_ptrsize(ptr);
2043 |
2044 | if (size == 0) {
2045 | ikmem_core_free(ptr);
2046 | return NULL;
2047 | }
2048 |
2049 | assert(oldsize > 0);
2050 |
2051 | if (oldsize >= size) {
2052 | if (oldsize * 3 < size * 4)
2053 | return ptr;
2054 | }
2055 |
2056 | newptr = ikmem_core_malloc(size);
2057 |
2058 | if (newptr == NULL) {
2059 | ikmem_core_free(ptr);
2060 | return NULL;
2061 | }
2062 |
2063 | memcpy(newptr, ptr, oldsize < size? oldsize : size);
2064 | ikmem_core_free(ptr);
2065 |
2066 | return newptr;
2067 | }
2068 |
2069 | void ikmem_core_shrink(void)
2070 | {
2071 | imemcache_t *cache;
2072 | int index;
2073 |
2074 | if (ikmem_inited == 0) ikmem_once_init();
2075 |
2076 | for (index = ikmem_count - 1; index >= 0; index--) {
2077 | cache = ikmem_lookup[index];
2078 | imemcache_shrink(cache);
2079 | }
2080 | }
2081 |
2082 | imemcache_t *ikmem_core_get(int id)
2083 | {
2084 | if (id < 0 || id >= ikmem_count) return NULL;
2085 | return ikmem_lookup[id];
2086 | }
2087 |
2088 | void ikmem_boot_once(void)
2089 | {
2090 | #ifdef IKMEM_ENABLE_BOOT
2091 | if ((ikmem_boot_flags & 2) == 0) {
2092 | IMUTEX_TYPE *mutex = ikmem_mutex_once(IKMEM_MUTEX_BOOT1);
2093 | IMUTEX_LOCK(mutex);
2094 | if ((ikmem_boot_flags & 2) == 0) {
2095 | ikmem_boot_hook(2);
2096 | ikmem_boot_flags |= 2;
2097 | }
2098 | IMUTEX_UNLOCK(mutex);
2099 | }
2100 | #endif
2101 | }
2102 |
2103 | void* ikmem_malloc(size_t size)
2104 | {
2105 | #ifdef IKMEM_ENABLE_BOOT
2106 | if ((ikmem_boot_flags & 2) == 0) ikmem_boot_once();
2107 | #endif
2108 | if (ikmem_hook) {
2109 | return ikmem_hook->kmem_malloc_fn(size);
2110 | }
2111 | return ikmem_core_malloc(size);
2112 | }
2113 |
2114 | void* ikmem_realloc(void *ptr, size_t size)
2115 | {
2116 | #ifdef IKMEM_ENABLE_BOOT
2117 | if ((ikmem_boot_flags & 2) == 0) ikmem_boot_once();
2118 | #endif
2119 | if (ikmem_hook) {
2120 | return ikmem_hook->kmem_realloc_fn(ptr, size);
2121 | }
2122 | return ikmem_core_realloc(ptr, size);
2123 | }
2124 |
2125 | void ikmem_free(void *ptr)
2126 | {
2127 | if (ptr == NULL) return;
2128 | #ifdef IKMEM_ENABLE_BOOT
2129 | if ((ikmem_boot_flags & 2) == 0) ikmem_boot_once();
2130 | #endif
2131 | if (ikmem_hook) {
2132 | ikmem_hook->kmem_free_fn(ptr);
2133 | return;
2134 | }
2135 | ikmem_core_free(ptr);
2136 | }
2137 |
2138 | void ikmem_shrink(void)
2139 | {
2140 | if (ikmem_hook) {
2141 | if (ikmem_hook->kmem_shrink_fn) {
2142 | ikmem_hook->kmem_shrink_fn();
2143 | }
2144 | return;
2145 | }
2146 | ikmem_core_shrink();
2147 | }
2148 |
2149 | size_t ikmem_ptr_size(const void *ptr)
2150 | {
2151 | if (ikmem_hook) {
2152 | if (ikmem_hook->kmem_ptr_size_fn)
2153 | return ikmem_hook->kmem_ptr_size_fn(ptr);
2154 | return 0;
2155 | }
2156 | return ikmem_core_ptrsize(ptr);
2157 | }
2158 |
2159 | static imemcache_t* ikmem_search(const char *name, int needlock)
2160 | {
2161 | imemcache_t *cache, *result;
2162 | ilist_head *head;
2163 | ilong index;
2164 |
2165 | for (index = 0; index < ikmem_count; index++) {
2166 | cache = ikmem_array[index];
2167 | if (strcmp(cache->name, name) == 0) return cache;
2168 | }
2169 | result = NULL;
2170 | if (needlock) imutex_lock(&ikmem_lock);
2171 | for (head = ikmem_head.next; head != &ikmem_head; head = head->next) {
2172 | cache = ilist_entry(head, imemcache_t, queue);
2173 | if (strcmp(cache->name, name) == 0) {
2174 | result = cache;
2175 | break;
2176 | }
2177 | }
2178 | if (needlock) imutex_unlock(&ikmem_lock);
2179 | return result;
2180 | }
2181 |
2182 |
2183 | void ikmem_option(size_t watermark)
2184 | {
2185 | ikmem_water_mark = watermark;
2186 | }
2187 |
2188 |
2189 | imemcache_t *ikmem_get(const char *name)
2190 | {
2191 | return ikmem_search(name, 1);
2192 | }
2193 |
2194 | ilong ikmem_page_info(ilong *pg_inuse, ilong *pg_new, ilong *pg_del)
2195 | {
2196 | if (pg_inuse) pg_inuse[0] = imem_gfp_default.pages_inuse;
2197 | if (pg_new) pg_new[0] = imem_gfp_default.pages_new;
2198 | if (pg_del) pg_del[0] = imem_gfp_default.pages_del;
2199 | return imem_page_size;
2200 | }
2201 |
2202 | ilong ikmem_cache_info(int id, int *inuse, int *cnew, int *cdel, int *cfree)
2203 | {
2204 | imemcache_t *cache;
2205 | ilong nfree, i;
2206 | if (id < 0 || id >= ikmem_count) return -1;
2207 | cache = ikmem_array[id];
2208 | nfree = cache->free_objects;
2209 | for (i = 0; i < IMCACHE_LRU_COUNT; i++)
2210 | nfree += cache->array[i].avial;
2211 | if (cache->extra) {
2212 | if (inuse) inuse[0] = (int)IKMEM_STAT(cache, 0);
2213 | if (cnew) cnew[0] = (int)IKMEM_STAT(cache, 1);
2214 | if (cdel) cdel[0] = (int)IKMEM_STAT(cache, 2);
2215 | }
2216 | if (cfree) cfree[0] = (int)nfree;
2217 | return cache->obj_size;
2218 | }
2219 |
2220 | ilong ikmem_waste_info(ilong *kmem_inuse, ilong *total_mem)
2221 | {
2222 | size_t totalmem;
2223 | totalmem = imem_page_size * imem_gfp_default.pages_inuse;
2224 | if (kmem_inuse) kmem_inuse[0] = ikmem_inuse;
2225 | if (total_mem) total_mem[0] = totalmem;
2226 | return ikmem_inuse;
2227 | }
2228 |
2229 |
2230 |
2231 | #ifndef IKMEM_CACHE_TYPE
2232 | #define IKMEM_CACHE_TYPE
2233 | typedef void* ikmem_cache_t;
2234 | #endif
2235 |
2236 | imemcache_t *ikmem_create(const char *name, size_t size)
2237 | {
2238 | imemcache_t *cache;
2239 | imemgfp_t *gfp;
2240 |
2241 | if (ikmem_inited == 0) ikmem_init(0, 0, 0);
2242 | if (size >= imem_page_size) return NULL;
2243 |
2244 | gfp = ikmem_choose_gfp(size, NULL);
2245 | imutex_lock(&ikmem_lock);
2246 | cache = ikmem_search(name, 0);
2247 | if (cache != NULL) {
2248 | imutex_unlock(&ikmem_lock);
2249 | return NULL;
2250 | }
2251 | cache = imemcache_create(name, size, gfp);
2252 | if (cache == NULL) {
2253 | imutex_unlock(&ikmem_lock);
2254 | return NULL;
2255 | }
2256 | cache->flags |= IMCACHE_FLAG_ONQUEUE;
2257 | cache->user = (ilong)gfp;
2258 | ilist_add_tail(&ikmem_head, &cache->queue);
2259 | imutex_unlock(&ikmem_lock);
2260 |
2261 | return cache;
2262 | }
2263 |
2264 | void ikmem_delete(imemcache_t *cache)
2265 | {
2266 | assert(IMCACHE_SYSTEM(cache) == 0);
2267 | assert(IMCACHE_ONQUEUE(cache));
2268 | if (IMCACHE_SYSTEM(cache)) return;
2269 | if (IMCACHE_ONQUEUE(cache) == 0) return;
2270 | imutex_lock(&ikmem_lock);
2271 | ilist_del(&cache->queue);
2272 | imutex_unlock(&ikmem_lock);
2273 | imemcache_release(cache);
2274 | }
2275 |
2276 | void *ikmem_cache_alloc(imemcache_t *cache)
2277 | {
2278 | char *ptr;
2279 | assert(cache);
2280 | ptr = (char*)imemcache_alloc(cache);
2281 | return ptr;
2282 | }
2283 |
2284 | void ikmem_cache_free(imemcache_t *cache, void *ptr)
2285 | {
2286 | imemcache_free(cache, ptr);
2287 | }
2288 |
2289 | void ikmem_cache_shrink(imemcache_t *cache)
2290 | {
2291 | imemcache_shrink(cache);
2292 | }
2293 |
2294 |
2295 | /*====================================================================*/
2296 | /* IKMEM HOOKING */
2297 | /*====================================================================*/
2298 | static void* ikmem_std_malloc(size_t size)
2299 | {
2300 | size_t round = (size + 3) & ~((size_t)3);
2301 | char *lptr;
2302 | lptr = (char*)internal_malloc(0, round + sizeof(void*));
2303 | if (lptr == NULL) return NULL;
2304 | *((size_t*)lptr) = (round | 1);
2305 | lptr += sizeof(void*);
2306 | return lptr;
2307 | }
2308 |
2309 | static void ikmem_std_free(void *ptr)
2310 | {
2311 | char *lptr = ((char*)ptr) - sizeof(void*);
2312 | size_t size = *((size_t*)lptr);
2313 | int invalidptr;
2314 | invalidptr = (size & 1) != 1;
2315 | if (invalidptr) {
2316 | assert(!invalidptr);
2317 | return;
2318 | }
2319 | *((size_t*)ptr) = 0;
2320 | internal_free(0, lptr);
2321 | }
2322 |
2323 | static size_t ikmem_std_ptr_size(const void *ptr)
2324 | {
2325 | const char *lptr = ((const char*)ptr) - sizeof(void*);
2326 | size_t size = *((const size_t*)lptr);
2327 | if ((size & 1) != 1) {
2328 | assert((size & 1) == 1);
2329 | return 0;
2330 | }
2331 | return size & (~((size_t)3));
2332 | }
2333 |
2334 | static void* ikmem_std_realloc(void *ptr, size_t size)
2335 | {
2336 | size_t oldsize;
2337 | void *newptr;
2338 |
2339 | if (ptr == NULL) return ikmem_std_malloc(size);
2340 | oldsize = ikmem_std_ptr_size(ptr);
2341 |
2342 | if (size == 0) {
2343 | ikmem_std_free(ptr);
2344 | return NULL;
2345 | }
2346 |
2347 | if (oldsize >= size) {
2348 | if (oldsize * 3 < size * 4)
2349 | return ptr;
2350 | }
2351 |
2352 | newptr = ikmem_std_malloc(size);
2353 | if (newptr == NULL) {
2354 | ikmem_std_free(ptr);
2355 | return NULL;
2356 | }
2357 |
2358 | memcpy(newptr, ptr, oldsize < size? oldsize : size);
2359 | ikmem_std_free(ptr);
2360 |
2361 | return newptr;
2362 | }
2363 |
2364 | const ikmemhook_t ikmem_std_hook =
2365 | {
2366 | ikmem_std_malloc,
2367 | ikmem_std_free,
2368 | ikmem_std_realloc,
2369 | ikmem_std_ptr_size,
2370 | NULL,
2371 | };
2372 |
2373 | int ikmem_hook_install(const ikmemhook_t *hook)
2374 | {
2375 | if (ikmem_inited) return -1;
2376 | if (hook == NULL) {
2377 | ikmem_hook = NULL;
2378 | return 0;
2379 | }
2380 | if (hook == (const ikmemhook_t*)(~((size_t)0))) {
2381 | ikmem_hook = &ikmem_std_hook;
2382 | return 0;
2383 | }
2384 | if (hook->kmem_malloc_fn == ikmem_malloc)
2385 | return -1;
2386 | if (hook->kmem_free_fn == ikmem_free)
2387 | return -1;
2388 | if (hook->kmem_realloc_fn == ikmem_realloc)
2389 | return -1;
2390 | if (hook->kmem_ptr_size_fn == ikmem_ptr_size)
2391 | return -1;
2392 | if (hook->kmem_shrink_fn == ikmem_shrink)
2393 | return -1;
2394 | ikmem_hook = hook;
2395 | return 0;
2396 | }
2397 |
2398 | const ikmemhook_t *ikmem_hook_get(int id)
2399 | {
2400 | if (id == 0) return NULL;
2401 | return &ikmem_std_hook;
2402 | }
2403 |
2404 |
2405 | /*====================================================================*/
2406 | /* IVECTOR / IMEMNODE MANAGEMENT */
2407 | /*====================================================================*/
2408 | static void* ikmem_allocator_malloc(struct IALLOCATOR *, size_t);
2409 | static void ikmem_allocator_free(struct IALLOCATOR *, void *);
2410 |
2411 | struct IALLOCATOR ikmem_allocator =
2412 | {
2413 | ikmem_allocator_malloc,
2414 | ikmem_allocator_free,
2415 | 0, 0
2416 | };
2417 |
2418 |
2419 | static void* ikmem_allocator_malloc(struct IALLOCATOR *a, size_t len)
2420 | {
2421 | a = a + 1;
2422 | return ikmem_malloc(len);
2423 | }
2424 |
2425 | static void ikmem_allocator_free(struct IALLOCATOR *a, void *ptr)
2426 | {
2427 | assert(ptr);
2428 | a = a + 1;
2429 | ikmem_free(ptr);
2430 | }
2431 |
2432 |
2433 | ivector_t *iv_create(void)
2434 | {
2435 | ivector_t *vec;
2436 | vec = (ivector_t*)ikmem_malloc(sizeof(ivector_t));
2437 | if (vec == NULL) return NULL;
2438 | iv_init(vec, &ikmem_allocator);
2439 | return vec;
2440 | }
2441 |
2442 | void iv_delete(ivector_t *vec)
2443 | {
2444 | assert(vec);
2445 | iv_destroy(vec);
2446 | ikmem_free(vec);
2447 | }
2448 |
2449 | imemnode_t *imnode_create(ilong nodesize, int grow_limit)
2450 | {
2451 | imemnode_t *mnode;
2452 | mnode = (imemnode_t*)ikmem_malloc(sizeof(imemnode_t));
2453 | if (mnode == NULL) return NULL;
2454 | imnode_init(mnode, nodesize, &ikmem_allocator);
2455 | mnode->grow_limit = grow_limit;
2456 | return mnode;
2457 | }
2458 |
2459 | void imnode_delete(imemnode_t *mnode)
2460 | {
2461 | assert(mnode);
2462 | imnode_destroy(mnode);
2463 | ikmem_free(mnode);
2464 | }
2465 |
2466 |
2467 |
2468 |
--------------------------------------------------------------------------------
/imemslab.h:
--------------------------------------------------------------------------------
1 | /**********************************************************************
2 | *
3 | * imemslab.h - slab tree allocation algorithm
4 | * skywind3000 (at) gmail.com, 2006-2016
5 | *
6 | * - application layer slab allocator implementation
7 | * - unit interval time cost: almost speed up 500% - 1200% vs malloc
8 | * - optional page supplier: with the "GFP-Tree" algorithm
9 | * - memory recycle: automatic give memory back to os to avoid wasting
10 | * - platform independence
11 | *
12 | * for the basic information about slab algorithm, please see:
13 | * The Slab Allocator: An Object Caching Kernel
14 | * Memory Allocator (Jeff Bonwick, Sun Microsystems, 1994)
15 | * with the URL below:
16 | * http://citeseer.ist.psu.edu/bonwick94slab.html
17 | *
18 | **********************************************************************/
19 |
20 | #ifndef _IMEMSLAB_H_
21 | #define _IMEMSLAB_H_
22 |
23 | #ifdef HAVE_CONFIG_H
24 | #include "config.h"
25 | #endif
26 |
27 | #include
28 | #include
29 | #include
30 |
31 |
32 | /*====================================================================*/
33 | /* IULONG/ILONG (ensure sizeof(iulong) == sizeof(void*)) */
34 | /*====================================================================*/
35 | #ifndef __IULONG_DEFINED
36 | #define __IULONG_DEFINED
37 | typedef ptrdiff_t ilong;
38 | typedef size_t iulong;
39 | #endif
40 |
41 | #ifdef __cplusplus
42 | extern "C" {
43 | #endif
44 |
45 |
46 | /*====================================================================*/
47 | /* IALLOCATOR */
48 | /*====================================================================*/
49 | struct IALLOCATOR
50 | {
51 | void *(*alloc)(struct IALLOCATOR *, size_t);
52 | void (*free)(struct IALLOCATOR *, void *);
53 | void *udata;
54 | ilong reserved;
55 | };
56 |
57 | void* internal_malloc(struct IALLOCATOR *allocator, size_t size);
58 | void internal_free(struct IALLOCATOR *allocator, void *ptr);
59 |
60 |
61 | /*====================================================================*/
62 | /* IVECTOR */
63 | /*====================================================================*/
64 | struct IVECTOR
65 | {
66 | unsigned char *data;
67 | size_t size;
68 | size_t block;
69 | struct IALLOCATOR *allocator;
70 | };
71 |
72 | void iv_init(struct IVECTOR *v, struct IALLOCATOR *allocator);
73 | void iv_destroy(struct IVECTOR *v);
74 | int iv_resize(struct IVECTOR *v, size_t newsize);
75 | int iv_reserve(struct IVECTOR *v, size_t newsize);
76 |
77 | size_t iv_pop(struct IVECTOR *v, void *data, size_t size);
78 | int iv_push(struct IVECTOR *v, const void *data, size_t size);
79 | int iv_insert(struct IVECTOR *v, size_t pos, const void *data, size_t size);
80 | int iv_erase(struct IVECTOR *v, size_t pos, size_t size);
81 |
82 | #define iv_size(v) ((v)->size)
83 | #define iv_data(v) ((v)->data)
84 |
85 | #define iv_index(v, type, index) (((type*)iv_data(v))[index])
86 |
87 | #define IMROUNDSHIFT 3
88 | #define IMROUNDSIZE (((size_t)1) << IMROUNDSHIFT)
89 | #define IMROUNDUP(s) (((s) + IMROUNDSIZE - 1) & ~(IMROUNDSIZE - 1))
90 |
91 |
92 | /*====================================================================*/
93 | /* IMEMNODE */
94 | /*====================================================================*/
95 | struct IMEMNODE
96 | {
97 | struct IALLOCATOR *allocator; /* memory allocator */
98 |
99 | struct IVECTOR vprev; /* prev node link vector */
100 | struct IVECTOR vnext; /* next node link vector */
101 | struct IVECTOR vnode; /* node information data */
102 | struct IVECTOR vdata; /* node data buffer vector */
103 | struct IVECTOR vmode; /* mode of allocation */
104 | ilong *mprev; /* prev node array */
105 | ilong *mnext; /* next node array */
106 | ilong *mnode; /* node info array */
107 | void **mdata; /* node data array */
108 | ilong *mmode; /* node mode array */
109 | ilong *extra; /* extra user data */
110 | ilong node_free; /* number of free nodes */
111 | ilong node_used; /* number of allocated */
112 | ilong node_max; /* number of all nodes */
113 | ilong grow_limit; /* limit of growing */
114 |
115 | ilong node_size; /* node data fixed size */
116 | ilong node_shift; /* node data size shift */
117 |
118 | struct IVECTOR vmem; /* mem-pages in the pool */
119 | char **mmem; /* mem-pages array */
120 | ilong mem_max; /* max num of memory pages */
121 | ilong mem_count; /* number of mem-pages */
122 |
123 | ilong list_open; /* the entry of open-list */
124 | ilong list_close; /* the entry of close-list */
125 | ilong total_mem; /* total memory size */
126 | };
127 |
128 |
129 | void imnode_init(struct IMEMNODE *mn, ilong nodesize, struct IALLOCATOR *ac);
130 | void imnode_destroy(struct IMEMNODE *mnode);
131 | ilong imnode_new(struct IMEMNODE *mnode);
132 | void imnode_del(struct IMEMNODE *mnode, ilong index);
133 | ilong imnode_head(const struct IMEMNODE *mnode);
134 | ilong imnode_next(const struct IMEMNODE *mnode, ilong index);
135 | ilong imnode_prev(const struct IMEMNODE *mnode, ilong index);
136 | void*imnode_data(struct IMEMNODE *mnode, ilong index);
137 | const void* imnode_data_const(const struct IMEMNODE *mnode, ilong index);
138 |
139 | #define IMNODE_NODE(mnodeptr, i) ((mnodeptr)->mnode[i])
140 | #define IMNODE_PREV(mnodeptr, i) ((mnodeptr)->mprev[i])
141 | #define IMNODE_NEXT(mnodeptr, i) ((mnodeptr)->mnext[i])
142 | #define IMNODE_DATA(mnodeptr, i) ((mnodeptr)->mdata[i])
143 | #define IMNODE_MODE(mnodeptr, i) ((mnodeptr)->mmode[i])
144 |
145 |
146 | /*====================================================================*/
147 | /* QUEUE DEFINITION */
148 | /*====================================================================*/
149 | #ifndef __ILIST_DEF__
150 | #define __ILIST_DEF__
151 |
152 | struct ILISTHEAD {
153 | struct ILISTHEAD *next, *prev;
154 | };
155 |
156 | typedef struct ILISTHEAD ilist_head;
157 |
158 |
159 | /*--------------------------------------------------------------------*/
160 | /* queue init */
161 | /*--------------------------------------------------------------------*/
162 | #define ILIST_HEAD_INIT(name) { &(name), &(name) }
163 | #define ILIST_HEAD(name) \
164 | struct ILISTHEAD name = ILIST_HEAD_INIT(name)
165 |
166 | #define ILIST_INIT(ptr) ( \
167 | (ptr)->next = (ptr), (ptr)->prev = (ptr))
168 |
169 | #define IOFFSETOF(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
170 |
171 | #define ICONTAINEROF(ptr, type, member) ( \
172 | (type*)( ((char*)((type*)ptr)) - IOFFSETOF(type, member)) )
173 |
174 | #define ILIST_ENTRY(ptr, type, member) ICONTAINEROF(ptr, type, member)
175 |
176 |
177 | /*--------------------------------------------------------------------*/
178 | /* queue operation */
179 | /*--------------------------------------------------------------------*/
180 | #define ILIST_ADD(node, head) ( \
181 | (node)->prev = (head), (node)->next = (head)->next, \
182 | (head)->next->prev = (node), (head)->next = (node))
183 |
184 | #define ILIST_ADD_TAIL(node, head) ( \
185 | (node)->prev = (head)->prev, (node)->next = (head), \
186 | (head)->prev->next = (node), (head)->prev = (node))
187 |
188 | #define ILIST_DEL_BETWEEN(p, n) ((n)->prev = (p), (p)->next = (n))
189 |
190 | #define ILIST_DEL(entry) (\
191 | (entry)->next->prev = (entry)->prev, \
192 | (entry)->prev->next = (entry)->next, \
193 | (entry)->next = 0, (entry)->prev = 0)
194 |
195 | #define ILIST_DEL_INIT(entry) do { \
196 | ILIST_DEL(entry); ILIST_INIT(entry); } while (0)
197 |
198 | #define ILIST_IS_EMPTY(entry) ((entry) == (entry)->next)
199 |
200 | #define ilist_init ILIST_INIT
201 | #define ilist_entry ILIST_ENTRY
202 | #define ilist_add ILIST_ADD
203 | #define ilist_add_tail ILIST_ADD_TAIL
204 | #define ilist_del ILIST_DEL
205 | #define ilist_del_init ILIST_DEL_INIT
206 | #define ilist_is_empty ILIST_IS_EMPTY
207 |
208 | #define ILIST_FOREACH(iterator, head, TYPE, MEMBER) \
209 | for ((iterator) = ilist_entry((head)->next, TYPE, MEMBER); \
210 | &((iterator)->MEMBER) != (head); \
211 | (iterator) = ilist_entry((iterator)->MEMBER.next, TYPE, MEMBER))
212 |
213 | #define ilist_foreach(iterator, head, TYPE, MEMBER) \
214 | ILIST_FOREACH(iterator, head, TYPE, MEMBER)
215 |
216 | #define ilist_foreach_entry(pos, head) \
217 | for( (pos) = (head)->next; (pos) != (head) ; (pos) = (pos)->next )
218 |
219 |
220 | #define __ilist_splice(list, head) do { \
221 | ilist_head *first = (list)->next, *last = (list)->prev; \
222 | ilist_head *at = (head)->next; \
223 | (first)->prev = (head), (head)->next = (first); \
224 | (last)->next = (at), (at)->prev = (last); } while (0)
225 |
226 | #define ilist_splice(list, head) do { \
227 | if (!ilist_is_empty(list)) __ilist_splice(list, head); } while (0)
228 |
229 | #define ilist_splice_init(list, head) do { \
230 | ilist_splice(list, head); ilist_init(list); } while (0)
231 |
232 |
233 | #ifdef _MSC_VER
234 | #pragma warning(disable:4311)
235 | #pragma warning(disable:4312)
236 | #pragma warning(disable:4996)
237 | #endif
238 |
239 | #endif
240 |
241 |
242 | /*====================================================================*/
243 | /* IMEMSLAB */
244 | /*====================================================================*/
245 | struct IMEMSLAB
246 | {
247 | struct ILISTHEAD queue;
248 | size_t coloroff;
249 | void*membase;
250 | ilong memsize;
251 | ilong inuse;
252 | void*bufctl;
253 | void*extra;
254 | };
255 |
256 | typedef struct IMEMSLAB imemslab_t;
257 |
258 | #define IMEMSLAB_ISFULL(s) ((s)->bufctl == 0)
259 | #define IMEMSLAB_ISEMPTY(s) ((s)->inuse == 0)
260 |
261 | #ifdef __cplusplus
262 | }
263 | #endif
264 |
265 | /*====================================================================*/
266 | /* IMUTEX - mutex interfaces */
267 | /*====================================================================*/
268 | #ifndef IMUTEX_TYPE
269 |
270 | #ifndef IMUTEX_DISABLE
271 | #if (defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64))
272 | #if ((!defined(_M_PPC)) && (!defined(_M_PPC_BE)) && (!defined(_XBOX)))
273 | #ifndef _WIN32_WINNT
274 | #define _WIN32_WINNT 0x0500
275 | #endif
276 | #ifndef WIN32_LEAN_AND_MEAN
277 | #define WIN32_LEAN_AND_MEAN
278 | #endif
279 | #include
280 | #else
281 | #ifndef _XBOX
282 | #define _XBOX
283 | #endif
284 | #include
285 | #endif
286 |
287 | #define IMUTEX_TYPE CRITICAL_SECTION
288 | #define IMUTEX_INIT(m) InitializeCriticalSection((CRITICAL_SECTION*)(m))
289 | #define IMUTEX_DESTROY(m) DeleteCriticalSection((CRITICAL_SECTION*)(m))
290 | #define IMUTEX_LOCK(m) EnterCriticalSection((CRITICAL_SECTION*)(m))
291 | #define IMUTEX_UNLOCK(m) LeaveCriticalSection((CRITICAL_SECTION*)(m))
292 |
293 | #elif defined(__unix) || defined(__unix__) || defined(__MACH__)
294 | #include
295 | #include
296 | #define IMUTEX_TYPE pthread_mutex_t
297 | #define IMUTEX_INIT(m) pthread_mutex_init((pthread_mutex_t*)(m), 0)
298 | #define IMUTEX_DESTROY(m) pthread_mutex_destroy((pthread_mutex_t*)(m))
299 | #define IMUTEX_LOCK(m) pthread_mutex_lock((pthread_mutex_t*)(m))
300 | #define IMUTEX_UNLOCK(m) pthread_mutex_unlock((pthread_mutex_t*)(m))
301 | #endif
302 | #endif
303 |
304 | #ifndef IMUTEX_TYPE
305 | #define IMUTEX_TYPE int
306 | #define IMUTEX_INIT(m) { (*(m)) = (*(m)); }
307 | #define IMUTEX_DESTROY(m) { (*(m)) = (*(m)); }
308 | #define IMUTEX_LOCK(m) { (*(m)) = (*(m)); }
309 | #define IMUTEX_UNLOCK(m) { (*(m)) = (*(m)); }
310 | #endif
311 |
312 | #endif
313 |
314 |
315 | #ifdef __cplusplus
316 | extern "C" {
317 | #endif
318 |
319 | typedef IMUTEX_TYPE imutex_t;
320 | extern int imutex_disable;
321 |
322 | void imutex_init(imutex_t *mutex);
323 | void imutex_destroy(imutex_t *mutex);
324 | void imutex_lock(imutex_t *mutex);
325 | void imutex_unlock(imutex_t *mutex);
326 |
327 |
328 | /*====================================================================*/
329 | /* IMEMGFP (mem_get_free_pages) - a page-supplyer class */
330 | /*====================================================================*/
331 | struct IMEMGFP
332 | {
333 | size_t page_size;
334 | ilong refcnt;
335 | void*(*alloc_page)(struct IMEMGFP *gfp);
336 | void (*free_page)(struct IMEMGFP *gfp, void *ptr);
337 | void *extra;
338 | size_t pages_inuse;
339 | size_t pages_new;
340 | size_t pages_del;
341 | };
342 |
343 | #define IDEFAULT_PAGE_SHIFT 16
344 |
345 | typedef struct IMEMGFP imemgfp_t;
346 |
347 |
348 | /*====================================================================*/
349 | /* IMEMLRU */
350 | /*====================================================================*/
351 | #ifndef IMCACHE_ARRAYLIMIT
352 | #define IMCACHE_ARRAYLIMIT 128
353 | #endif
354 |
355 | #ifndef IMCACHE_NODECOUNT_SHIFT
356 | #define IMCACHE_NODECOUNT_SHIFT 0
357 | #endif
358 |
359 | #define IMCACHE_NODECOUNT (1 << (IMCACHE_NODECOUNT_SHIFT))
360 |
361 | #ifndef IMCACHE_NAMESIZE
362 | #define IMCACHE_NAMESIZE 32
363 | #endif
364 |
365 | #ifndef IMCACHE_LRU_SHIFT
366 | #define IMCACHE_LRU_SHIFT 3
367 | #endif
368 |
369 | #define IMCACHE_LRU_COUNT (1 << IMCACHE_LRU_SHIFT)
370 |
371 | struct IMEMLRU
372 | {
373 | int avial;
374 | int limit;
375 | int batchcount;
376 | imutex_t lock;
377 | void *entry[IMCACHE_ARRAYLIMIT];
378 | };
379 |
380 | typedef struct IMEMLRU imemlru_t;
381 |
382 |
383 | /*====================================================================*/
384 | /* IMEMCACHE */
385 | /*====================================================================*/
386 | struct IMEMCACHE
387 | {
388 | size_t obj_size;
389 | size_t unit_size;
390 | size_t page_size;
391 | size_t count_partial;
392 | size_t count_full;
393 | size_t count_free;
394 | size_t free_objects;
395 | size_t free_limit;
396 | size_t color_next;
397 | size_t color_limit;
398 |
399 | ilist_head queue;
400 | imutex_t list_lock;
401 |
402 | ilist_head slabs_partial;
403 | ilist_head slabs_full;
404 | ilist_head slabs_free;
405 |
406 | imemlru_t array[IMCACHE_LRU_COUNT];
407 | imemgfp_t *gfp;
408 | imemgfp_t page_supply;
409 |
410 | size_t batchcount;
411 | size_t limit;
412 | size_t num;
413 | ilong flags;
414 | ilong user;
415 | int index;
416 | void*extra;
417 |
418 | char name[IMCACHE_NAMESIZE + 1];
419 | size_t pages_hiwater;
420 | size_t pages_inuse;
421 | size_t pages_new;
422 | size_t pages_del;
423 | };
424 |
425 | typedef struct IMEMCACHE imemcache_t;
426 |
427 |
428 | /*====================================================================*/
429 | /* IKMEMHOOK */
430 | /*====================================================================*/
431 | struct IKMEMHOOK
432 | {
433 | void* (*kmem_malloc_fn)(size_t size);
434 | void (*kmem_free_fn)(void *ptr);
435 | void* (*kmem_realloc_fn)(void *ptr, size_t size);
436 | size_t (*kmem_ptr_size_fn)(const void *ptr);
437 | void (*kmem_shrink_fn)(void);
438 | };
439 |
440 | typedef struct IKMEMHOOK ikmemhook_t;
441 |
442 |
443 | /*====================================================================*/
444 | /* IKMEM INTERFACE */
445 | /*====================================================================*/
446 | void ikmem_init(int page_shift, int pg_malloc, size_t *sz);
447 | void ikmem_destroy(void);
448 |
449 | void* ikmem_malloc(size_t size);
450 | void* ikmem_realloc(void *ptr, size_t size);
451 | void ikmem_free(void *ptr);
452 | void ikmem_shrink(void);
453 |
454 | imemcache_t *ikmem_create(const char *name, size_t size);
455 | void ikmem_delete(imemcache_t *cache);
456 | void *ikmem_cache_alloc(imemcache_t *cache);
457 | void ikmem_cache_free(imemcache_t *cache, void *ptr);
458 |
459 | size_t ikmem_ptr_size(const void *ptr);
460 | void ikmem_option(size_t watermark);
461 | imemcache_t *ikmem_get(const char *name);
462 | imemcache_t *ikmem_vector(int id);
463 |
464 | ilong ikmem_page_info(ilong *pg_inuse, ilong *pg_new, ilong *pg_del);
465 | ilong ikmem_cache_info(int id, int *inuse, int *cnew, int *cdel, int *cfree);
466 | ilong ikmem_waste_info(ilong *kmem_inuse, ilong *total_mem);
467 |
468 | int ikmem_hook_install(const ikmemhook_t *hook);
469 | const ikmemhook_t *ikmem_hook_get(int id);
470 |
471 |
472 | /*====================================================================*/
473 | /* IVECTOR / IMEMNODE MANAGEMENT */
474 | /*====================================================================*/
475 | extern struct IALLOCATOR ikmem_allocator;
476 |
477 | typedef struct IVECTOR ivector_t;
478 | typedef struct IMEMNODE imemnode_t;
479 |
480 | ivector_t *iv_create(void);
481 | void iv_delete(ivector_t *vec);
482 |
483 | imemnode_t *imnode_create(ilong nodesize, int grow_limit);
484 | void imnode_delete(imemnode_t *);
485 |
486 |
487 | #ifdef __cplusplus
488 | }
489 | #endif
490 |
491 | #endif
492 |
493 |
494 |
--------------------------------------------------------------------------------
/testbench.c:
--------------------------------------------------------------------------------
1 | /**********************************************************************
2 | *
3 | * SLAB BENCHMARK CASE
4 | *
5 | *
6 | * HOW TO BUILD:
7 | *
8 | * - unix: gcc testbench.c -o testbench
9 | * - windows: cl testbench.c
10 | *
11 | *
12 | * TESTING RESULT: Intel Cetrino Duo 1.8GHz
13 | *
14 | * - benchmark with malloc / free:
15 | * total time is 16634ms maxmem is 104857599bytes
16 | * - benchmark with ikmem_alloc / ikmem_free:
17 | * total time is 1715ms maxmem is 104857599bytes
18 | * pages in=76576 out=74435 waste=34.72%
19 | *
20 | *
21 | * NOTE:
22 | *
23 | * You can decrease "CASE_TIMES" under an old computer, "4000000"
24 | * times will cost almost 20 seconds in my Cetrino Duo.
25 | *
26 | *
27 | **********************************************************************/
28 | //#define IKMEM_ENABLE_BOOT
29 | //#define IMUTEX_DISABLE
30 | //#define IKMEM_USEFIB
31 | //#define IMLOG_ENABLE
32 | //#define IMEM_DEBUG
33 | #include "imemslab.c"
34 |
35 | #define CASE_TIMES 4000000
36 | #define CASE_LIMIT (65535) // 0-64KB each block
37 | #define CASE_HIWATER (50 << 20) // 100MB memory
38 | #define CASE_PROB (55) // probability of (alloc) is 60%
39 |
40 | #if (defined(_WIN32) || defined(WIN32))
41 | #include
42 | #include
43 | #pragma comment(lib, "winmm.lib")
44 | #elif defined(__unix)
45 | #include
46 | #include
47 | #else
48 | #error it can only be compiled under windows or unix
49 | #endif
50 |
51 | #include
52 | #include
53 |
54 | /* gettime */
55 | iulong gettime()
56 | {
57 | #if (defined(_WIN32) || defined(WIN32))
58 | return timeGetTime();
59 | #else
60 | static struct timezone tz={ 0,0 };
61 | struct timeval time;
62 | gettimeofday(&time,&tz);
63 | return (time.tv_sec * 1000 + time.tv_usec / 1000);
64 | #endif
65 | }
66 |
67 | /* random */
68 | #define RANDOM(n) (xrand() % (iulong)(n))
69 | iulong xseed = 0x1234567;
70 | iulong xrand(void)
71 | {
72 | return (((xseed = xseed * 214013L + 2531011L) >> 16) & 0x7fffffff);
73 | }
74 |
75 | /* two alloc methods dependent on the variable of kmem_turnon */
76 | int kmem_turnon = 0;
77 |
78 | void *xmalloc(iulong size)
79 | {
80 | iulong pos, end;
81 | ilong *ptr;
82 | if (kmem_turnon) ptr = (ilong*)ikmem_malloc(size);
83 | else ptr = (ilong*)malloc(size);
84 | // pos = GetCurrentThreadId() % 100001;
85 | end = size / sizeof(ilong);
86 | for (pos = 0; pos < end ; pos += 1024)
87 | ptr[pos] = (ilong)pos;
88 | return ptr;
89 | }
90 |
91 | void xfree(void *ptr)
92 | {
93 | if (kmem_turnon) ikmem_free(ptr);
94 | else free(ptr);
95 | }
96 |
97 |
98 | /* benchmark alloc / free */
99 | ilong memory_case(ilong limit, ilong hiwater, ilong times, int rate, ilong seed)
100 | {
101 | struct case_t { ilong size, m1, m2; char *ptr; };
102 | struct case_t *record, *p;
103 | iulong startup = 0, water = 0, maxmem = 0, sizev = 0;
104 | ilong pagesize, page_in, page_out, page_inuse;
105 | double waste;
106 | char *ptr;
107 | int count = 0, maxium = 0;
108 | int mode, size, pos;
109 | int retval = 0;
110 |
111 | record = (struct case_t*)malloc(100);
112 | xseed = seed;
113 | startup = gettime();
114 |
115 | for (; times >= 0; times--) {
116 | mode = 0;
117 | if (RANDOM(100) < rate) {
118 | size = RANDOM(limit);
119 | if (size < sizeof(ilong) * 2) size = sizeof(ilong) * 2;
120 | if (water + size >= hiwater) mode = 1;
121 | } else {
122 | mode = 1;
123 | }
124 |
125 | /* TO ALLOC new memory block */
126 | if (mode == 0) {
127 | if (count + 4 >= maxium) {
128 | maxium = maxium? maxium * 2 : 8;
129 | maxium = maxium >= count + 4? maxium : count + 4;
130 | sizev = maxium * sizeof(struct case_t);
131 | record = (struct case_t*)realloc(record,
132 | maxium * sizeof(struct case_t));
133 | assert(record);
134 | }
135 | ptr = xmalloc(size);
136 | assert(ptr);
137 | /* record pointer */
138 | p = &record[count++];
139 | p->ptr = ptr;
140 | p->size = size;
141 | p->m1 = rand() & 0x7ffffff;
142 | p->m2 = rand() & 0x7ffffff;
143 | water += size;
144 | /* writing magic data */
145 | *(ilong*)ptr = p->m1;
146 | *(ilong*)(ptr + p->size - sizeof(ilong)) = p->m2;
147 | }
148 | /* TO FREE old memory block */
149 | else if (count > 0) {
150 | pos = RANDOM(count);
151 | record[count] = record[pos];
152 | p = &record[count];
153 | record[pos] = record[--count];
154 | ptr = p->ptr;
155 | /* checking magic data */
156 | if (*(ilong*)ptr != p->m1) {
157 | printf("[BAD] bad magic1: %lxh size=%d times=%d\n", ptr, p->size, times);
158 | return -1;
159 | }
160 | if (*(ilong*)(ptr + p->size - sizeof(ilong)) != p->m2) {
161 | printf("[BAD] bad magic2: %lxh size=%d times=%d\n", ptr, p->size, times);
162 | return -1;
163 | }
164 | xfree(ptr);
165 | water -= p->size;
166 | }
167 | if (water > maxmem) maxmem = water;
168 | }
169 |
170 | /* state page-supplier */
171 | page_in = imem_gfp_default.pages_new;
172 | page_out = imem_gfp_default.pages_del;
173 | page_inuse = imem_gfp_default.pages_inuse;
174 | pagesize = imem_page_size;
175 |
176 | /* free last memory blocks */
177 | for (pos = 0; pos < count; pos++) {
178 | p = &record[pos];
179 | ptr = p->ptr;
180 | if (*(ilong*)ptr != p->m1) {
181 | printf("[BAD] bad magic: %lxh\n", ptr);
182 | return -1;
183 | }
184 | if (*(ilong*)(ptr + p->size - sizeof(ilong)) != p->m2) {
185 | printf("[BAD] bad magic: %lxh\n", ptr);
186 | return -1;
187 | }
188 | xfree(ptr);
189 | }
190 | /* caculate time */
191 | startup = gettime() - startup;
192 | free(record);
193 |
194 | if (kmem_turnon && water > 0) {
195 | waste = (pagesize * page_inuse - water) * 100.0 / water;
196 | } else {
197 | waste = 0;
198 | }
199 |
200 | printf("timing: total time is %lums maxmem is %lubytes\n", startup, maxmem);
201 | if (kmem_turnon) {
202 | printf( "status: pages (in=%lu out=%lu inuse=%lu) waste=%.2f%%\n",
203 | page_in, page_out, page_inuse, waste);
204 | }
205 | printf("\n");
206 | return (ilong)startup;
207 | }
208 |
209 | int cpuid(void)
210 | {
211 | static int inited = 0;
212 | //return GetCurrentThreadId();
213 | return 0;
214 | }
215 |
216 | void ikmem_boot_hook2(int stage)
217 | {
218 | printf("boot %d\n", stage);
219 | }
220 |
221 | int main(void)
222 | {
223 | char key;
224 |
225 | /* init kmem interface */
226 | ikmem_init(0, 0, 0);
227 |
228 | /* testing with malloc / free */
229 | printf("Benchmark with malloc / free:\n");
230 | kmem_turnon = 0;
231 | memory_case(CASE_LIMIT, CASE_HIWATER, CASE_TIMES, CASE_PROB, 256);
232 |
233 | /* mutex implementation in cygwin is to slow, so disable mutex */
234 | #ifdef __CYGWIN__
235 | imutex_disable = 1;
236 | #endif
237 | //imutex_neglect(1);
238 | //imutex_disable = 1;
239 |
240 | //__ihook_processor_id = cpuid;
241 |
242 | /* testing with ikmem_alloc / ikmem_free */
243 | printf("Benchmark with ikmem_alloc / ikmem_free:\n");
244 | kmem_turnon = 1;
245 | memory_case(CASE_LIMIT, CASE_HIWATER, CASE_TIMES, CASE_PROB, 256);
246 |
247 | printf("kmem_count=%d current=%d\n", ikmem_count, (0 % 71) & 7);
248 |
249 | int i = 0;
250 | for (i = 0; i < ikmem_count; i++) {
251 | // printf("[%d] %d\n", i, ikmem_array[ikmem_count - 1 - i]->obj_size);
252 | }
253 |
254 | printf("anykey to continue....\n");
255 | scanf("%c", &key);
256 |
257 |
258 | /* clean environment */
259 | ikmem_destroy();
260 |
261 | return 0;
262 | }
263 |
264 |
265 |
--------------------------------------------------------------------------------
/testmain.c:
--------------------------------------------------------------------------------
1 | /**********************************************************************
2 | *
3 | * SLAB USAGE
4 | *
5 | *
6 | * HOW TO BUILD:
7 | *
8 | * - unix: gcc testmain.c -o testmain
9 | * - windows: cl testmain.c
10 | *
11 | **********************************************************************/
12 |
13 |
14 | #include "imemslab.c"
15 | #include
16 |
17 | int main(void)
18 | {
19 | char *ptr;
20 |
21 | /* init kmem interface */
22 | ikmem_init(0, 0, 0);
23 |
24 | ptr = (char*)ikmem_malloc(8);
25 | assert(ptr);
26 |
27 | printf("sizeof(ptr)=%d\n", ikmem_ptr_size(ptr));
28 |
29 | ptr = ikmem_realloc(ptr, 40);
30 | assert(ptr);
31 |
32 | printf("sizeof(ptr)=%d\n", ikmem_ptr_size(ptr));
33 |
34 | ikmem_free(ptr);
35 |
36 | /* clean environment */
37 | ikmem_destroy();
38 |
39 | return 0;
40 | }
41 |
42 |
43 |
--------------------------------------------------------------------------------