├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── src
├── Makefile
├── ft_cache_allocator.cpp
├── ft_cache_allocator.h
├── ft_central_cache_mgr.cpp
├── ft_central_cache_mgr.h
├── ft_free_list.h
├── ft_list.h
├── ft_lock.h
├── ft_malloc_alias.h
├── ft_malloc_intf.cpp
├── ft_malloc_intf.h
├── ft_malloc_log.cpp
├── ft_malloc_log.h
├── ft_malloc_slab.h
├── ft_malloc_util.h
├── ft_mem_alloc_intf.cpp
├── ft_mem_alloc_intf.h
├── ft_mmap_page_allocator.cpp
├── ft_mmap_page_allocator.h
├── ft_page_mgr.cpp
├── ft_page_mgr.h
├── ft_rb_tree.cpp
├── ft_rb_tree.h
├── ft_sbrk_page_allocator.cpp
├── ft_sbrk_page_allocator.h
├── ft_sizemap.cpp
├── ft_sizemap.h
├── ft_sys_alloc_intf.h
├── ft_thread_cache.cpp
├── ft_thread_cache.h
├── ftmalloc.cpp
└── ftmalloc.h
└── test
├── Makefile
└── main.cpp
/.gitignore:
--------------------------------------------------------------------------------
1 | *.o
2 | main
3 | *.so
4 | *.d
5 | *.d.
6 | test
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | ########### MakeFile.env ##########
2 | # Top level pattern, include by Makefile of child directory
3 | # in which variable like TOPDIR, TARGET or LIB may be needed
4 |
5 | MAKE=make
6 |
7 | dirs:= src test
8 | SUBDIRS := $(dirs)
9 |
10 | all:$(TARGET) subdirs
11 |
12 | subdirs:$(SUBDIRS)
13 | for dir in $(SUBDIRS);\
14 | do $(MAKE) -C $$dir all||exit 1;\
15 | done
16 |
17 | -include $(DEPENDS)
18 |
19 | clean:
20 | for dir in $(SUBDIRS);\
21 | do $(MAKE) -C $$dir clean||exit 1;\
22 | done
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ftmalloc
2 | fast malloc. similar to google-tcmalloc.
3 |
4 | 因为项目需要管理大块的共享内存,之前的方案采用的线性管理方式,内存分配、释放,以及内存碎片的处理都有非常大的局限性,不仅处理速度慢,而且性能不稳定。所以提出优化需求。
5 | 因为tcmalloc是进程内存管理,没法直接移植到管理共享内存上,所以需要重新开发。本工程就是针对项目做得工作,后面通过抽取内存管理和分配部分的逻辑,修改成针对进程内存管理,形成本仓库,并参考tcmalloc,提供malloc/free/new/delete等接口,提供和tcmalloc类似的内存库功能。
6 |
7 | ###设计思路
8 | ####freelist和切片规则和tcmalloc一致,这部分没有过多设计,哈希链表管理线程内存切片
9 | ####使用红黑树实现页和切片的映射关系
10 | ####使用红黑树实现页管理关系
11 | ####模仿linux内核的slab实现从系统内存分配内存
12 |
13 |
14 | ###使用
15 | ####在ftmalloc目录下执行make,会将src目录下的代码编译,生成libftmalloc.so.
16 | ####test目录下会编译main.cpp生成test程序
17 | ####LD_PRELOAD=../src/libftmalloc.so ./test 指定使用libtcmalloc.so作为程序的内存库,申请和释放内存都会通过libftmalloc.so完成。
18 |
--------------------------------------------------------------------------------
/src/Makefile:
--------------------------------------------------------------------------------
1 | cxx=g++
2 | SERVER = libftmalloc.so
3 | #SOURCES = $(wildcard *.cpp)
4 |
5 | SOURCES = $(wildcard *.cpp)
6 |
7 | DEP_SOURCES = $(notdir $(SOURCES))
8 | DEP_OBJECTS = $(patsubst %.cpp, %.o, $(DEP_SOURCES))
9 |
10 | INC_PATH = $(shell pwd)
11 | LIBRARYS= -pthread
12 | LIB_PATH = /home/boost_1_58_0/sdk/lib
13 | MACROS = -DC_STRING_FUNC -DLOG_PRINTF -DFT_LOG_DEBUG
14 | SHARED = -shared -fPIC
15 | CPPFLAGS += -g -O0 -Wno-deprecated -Wnonnull
16 |
17 | INC_PATH +=
18 |
19 | INC_DIR = $(patsubst %, -I%, $(INC_PATH))
20 | LIB_DIR = $(patsubst %, -L%, $(LIB_PATH))
21 |
22 |
23 | all : $(SERVER)
24 |
25 | $(SERVER) : $(DEP_OBJECTS)
26 | $(cxx) $(MACROS) $(CPPFLAGS) $(SHARED) -o $@ $^ $(INC_DIR) $(LIB_DIR) $(LIBRARYS)
27 |
28 | .PHONY : clean
29 | clean:
30 | -rm -f $(SERVER)
31 | -rm -f *.o
32 | -rm -f *.d
33 |
34 | ifneq "$(MAKECMDGOALS)" "clean"
35 | include $(DEP_SOURCES:.cpp=.d)
36 | endif
37 |
38 | %.d :
39 | @t1=$*; t2=$${t1##*/}; \
40 | rely_file=""; for file in $(SOURCES); \
41 | do \
42 | find=$${file%%$$t2.cpp}; \
43 | if [ $${#find} != $${#file} ]; then \
44 | rely_file=$$file; \
45 | fi; \
46 | done; \
47 | set -e; rm -f $@; \
48 | $(cxx) -MM $(CPPFLAGS) $(INC_DIR) $$rely_file > $@.; \
49 | cat $@. > $@; \
50 | echo " $(cxx) $(CPPFLAGS) $(MACROS) $(SHARED) -c -o $$t2.o $(INC_DIR) $$rely_file" >> $@; \
51 | sed 's/\.o/\.d/g' < $@. >> $@; \
52 | rm -f $@. ;
53 |
54 |
--------------------------------------------------------------------------------
/src/ft_cache_allocator.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_cache_allocator.h"
9 | #include "ft_malloc_log.h"
10 | #include "ft_central_cache_mgr.h"
11 |
12 | #include
13 | #include
14 |
15 | namespace ftmalloc
16 | {
17 | #define MEM_INFO_TYPE size_t
18 | #define MEM_INFO_BYTES (sizeof(MEM_INFO_TYPE))//align.
19 | #define MEM_INFO_BITS (MEM_INFO_BYTES * 8)
20 | #define RESERVE_BITS (8)
21 | #define REAL_INFO_BITS (MEM_INFO_BITS - RESERVE_BITS)
22 | #define PAGE_FLAG_BIT (MEM_INFO_BITS - 1)
23 |
24 | #define GET_MEM_INFO(mem) (*((MEM_INFO_TYPE *)(((size_t)(mem) - MEM_INFO_BYTES))))
25 | #define SET_MEM_INFO(mem, clz, pc) \
26 | do { \
27 | size_t info = clz; \
28 | if (pc > 0) { \
29 | info = pc | ((size_t)1 << PAGE_FLAG_BIT); \
30 | } \
31 | (*((MEM_INFO_TYPE *)(mem))) = (info); \
32 | } while (0)
33 |
34 |
35 | #define SET_CLAZZ(mem, clz) SET_MEM_INFO(mem, clz, 0)
36 | #define SET_PAGECOUNT(mem, pgc) SET_MEM_INFO(mem, 0, pgc)
37 | #define GET_PAGE_ALLOC_BIT(info) ((info) & ((MEM_INFO_TYPE)1 << PAGE_FLAG_BIT))
38 | #define IS_PAGE_ALLOC(mem) (GET_PAGE_ALLOC_BIT(GET_MEM_INFO(mem)) != 0)
39 | #define GET_PAGE_COUNT(mem) (GET_MEM_INFO(mem) & ((((MEM_INFO_TYPE)1) << PAGE_FLAG_BIT) - 1))
40 | #define GET_CLAZZ(mem) (GET_MEM_INFO(mem))
41 |
42 | #define RETURN_MEM_ADDR(mem) ((void *)(((size_t)(mem)) + MEM_INFO_BYTES))
43 | #define GET_MEM_ADDR(mem) ((void *)(((size_t)(mem)) - MEM_INFO_BYTES))
44 |
45 | #define GET_NEED_LENGTH(size) ((size) + (MEM_INFO_BYTES))
46 | #define GET_ALLOC_LENGTH(mem) \
47 | (IS_PAGE_ALLOC(mem) ? \
48 | (((MEM_INFO_TYPE)GET_PAGE_COUNT(mem)) << FT_PAGE_BIT) : \
49 | CSizeMap::GetInstance().class_to_size(GET_CLAZZ(mem)))
50 |
51 | //#ifndef FT_FREE_USE_CALLBACK
52 |
53 | CCacheAllocator::CCacheAllocator()
54 | : m_cFreeList()
55 | , m_llAllocSize(0)
56 | , m_llUsedSize(0)
57 | , m_llAllocPages(0)
58 | {
59 | for (size_t i = 0; i < kNumClasses; i++) {
60 | m_cFreeList[i].Init();
61 | }
62 | }
63 |
64 | CCacheAllocator::~CCacheAllocator()
65 | {
66 | for (size_t i = 0; i < kNumClasses; i++) {
67 | if (m_cFreeList[i].length() > 0) {
68 | FT_LOG(FT_DEBUG, "freelist[%zd].size = %zd", i, m_cFreeList[i].length());
69 | ReleaseToCentral(i, m_cFreeList[i].length());
70 | }
71 | }
72 | }
73 |
74 | void * CCacheAllocator::Malloc(size_t bytes)
75 | {
76 | FT_LOG(FT_DEBUG, "want size:%zd", bytes);
77 | void * addr = NULL;
78 | if (bytes == 0) {
79 | return addr;
80 | }
81 |
82 | FT_LOG(FT_DEBUG, "need alloc length:%zd", GET_NEED_LENGTH(bytes));
83 | if (GET_NEED_LENGTH(bytes) >= kMaxSize) {
84 | addr = PageAlloc(bytes);
85 | } else {
86 | addr = SmallAlloc(bytes);
87 | }
88 | FT_LOG(FT_DEBUG, "want size:%zd, addr:%p", bytes, addr);
89 | return addr;
90 | }
91 |
92 | void * CCacheAllocator::ReAlloc(void * oldptr, size_t bytes)
93 | {
94 | void * addr = NULL;
95 |
96 | if (oldptr == NULL) {
97 | addr = Malloc(bytes);
98 | } else if (bytes == 0) {
99 | addr = NULL;
100 | } else {
101 | void * realAddr = GET_MEM_ADDR(oldptr);
102 | size_t oldsize = 0;
103 | size_t old_size = GET_ALLOC_LENGTH(oldptr);
104 |
105 | if (bytes > oldsize) {
106 | void * addr = Malloc(bytes);
107 | memcpy(addr, oldptr, oldsize);
108 | Free(oldptr);
109 | } else {
110 | addr = oldptr;
111 | }
112 | }
113 |
114 | return addr;
115 | }
116 |
117 | void * CCacheAllocator::Calloc(size_t nmemb, size_t size)
118 | {
119 | size_t needsize = nmemb * size;
120 | if (needsize != 0 && needsize / size != nmemb) {
121 | return NULL;
122 | }
123 |
124 | void * addr = Malloc(needsize);
125 | if (addr == NULL) {
126 | errno = ENOMEM;
127 | } else {
128 | memset(addr, 0, needsize);
129 | }
130 |
131 | return addr;
132 | }
133 |
134 | void CCacheAllocator::Free(void * ptr)
135 | {
136 | if (ptr == NULL) {
137 | FT_LOG(FT_INFO, "Error, _Free invalid ptr:%p\n", ptr);
138 | return;
139 | }
140 |
141 | size_t freeSize = 0;
142 |
143 | void * realAddr = GET_MEM_ADDR(ptr);
144 | FT_LOG(FT_DEBUG, "Get clazz info from memory:addr:%p, real:%p", ptr, realAddr);
145 | FT_LOG(FT_DEBUG, "info:%lx", *(size_t *)realAddr);
146 |
147 | if (IS_PAGE_ALLOC(ptr)) {
148 | size_t pages = GET_PAGE_COUNT(ptr);
149 | FT_LOG(FT_DEBUG, "free pages:%p, pages:%zd", realAddr, pages);
150 |
151 | freeSize = pages << FT_PAGE_BIT;
152 | CCentralCacheMgr::GetInstance().FreePages(realAddr, pages);
153 | } else {
154 | size_t clazz = GET_CLAZZ(ptr);
155 | if (clazz <= 0 || clazz >= kNumClasses) {
156 | return;
157 | }
158 |
159 | freeSize = CSizeMap::GetInstance().class_to_size(clazz);
160 | CFreeList & list = m_cFreeList[clazz];
161 | list.Push(realAddr);
162 | FT_LOG(FT_DEBUG, "free slices:%p, clazz:%zd", realAddr, clazz);
163 |
164 | FT_LOG(FT_DEBUG, "clazz:%zd, length:%zd, max_length:%zd!", clazz, list.length(), list.max_length());
165 |
166 | if (list.length() >= list.max_length()) {
167 | list.set_max_length(list.max_length() >> 1);
168 | ReleaseToCentral(clazz, list.length() - list.max_length());
169 | }
170 | }
171 | }
172 |
173 | void CCacheAllocator::FreeDirect(size_t clazz, void * ptr)
174 | {
175 | }
176 |
177 | void * CCacheAllocator::SmallAlloc(size_t bytes)
178 | {
179 | FT_LOG(FT_DEBUG, "SmallAlloc, want size:%zd", bytes);
180 | CSizeMap sizemap = CSizeMap::GetInstance();
181 |
182 | size_t allocSize = GET_NEED_LENGTH(bytes);
183 | size_t cl = sizemap.SizeClass(allocSize);
184 | size_t size = sizemap.class_to_size(cl);
185 |
186 | FT_LOG(FT_DEBUG, "SmallAlloc, want:%zd, c1:%zd, size:%zd", bytes, cl, size);
187 |
188 | void * allocAddr = NULL;
189 |
190 | CFreeList & list = m_cFreeList[cl];
191 | FT_LOG(FT_DEBUG, "clazz:%zd, freeobj:%zd", cl, list.length());
192 | if (list.empty()) {
193 | allocAddr = FetchMemFromCentral(cl, size);
194 | } else {
195 | allocAddr = list.Pop();
196 | }
197 | FT_LOG(FT_DEBUG, "object addr:%p", allocAddr);
198 |
199 | m_llUsedSize += size;
200 | SET_CLAZZ(allocAddr, cl);
201 |
202 | void * retAddr = RETURN_MEM_ADDR(allocAddr);
203 | FT_LOG(FT_DEBUG, "return addr:%p", retAddr);
204 |
205 | return retAddr;
206 | }
207 |
208 | void * CCacheAllocator::PageAlloc(size_t bytes)
209 | {
210 | size_t allocSize = GET_NEED_LENGTH(bytes);
211 | size_t needPages = (allocSize >> FT_PAGE_BIT) + ((allocSize & (FT_PAGE_BIT - 1)) > 0 ? 1 : 0);
212 |
213 | FT_LOG(FT_DEBUG, "want size:%zd, realsize:%zd, pages:%zd", bytes, allocSize, needPages);
214 |
215 | void * allocAddr = CCentralCacheMgr::GetInstance().AllocPages(needPages);
216 | FT_LOG(FT_DEBUG, "want size:%zd, realsize:%zd, pages:%zd, addr:%p", bytes, allocSize, needPages, allocAddr);
217 |
218 | m_llAllocPages += needPages;
219 |
220 | void * retAddr = allocAddr;
221 |
222 | SET_PAGECOUNT(retAddr, needPages);
223 | retAddr = RETURN_MEM_ADDR(retAddr);
224 |
225 | FT_LOG(FT_DEBUG, "return addr:%p", retAddr);
226 |
227 | return retAddr;
228 | }
229 |
230 | void * CCacheAllocator::FetchMemFromCentral(size_t clazz, size_t size)
231 | {
232 | CFreeList & list = m_cFreeList[clazz];
233 | //ASSERT(list.empty());
234 | FT_LOG(FT_DEBUG, "FetchMemFromCentral, clz:%zd, size:%zd", clazz, size);
235 |
236 | CSizeMap sizemap = CSizeMap::GetInstance();
237 |
238 | const size_t batch_size = sizemap.num_objects_to_move(clazz);
239 | const size_t num_to_move = FT_MIN(list.max_length(), batch_size);
240 | FT_LOG(FT_DEBUG, "FetchMemFromCentral, batchSize:%zd, list.length:%zd, num_to_move:%zd",
241 | batch_size, list.max_length(), num_to_move);
242 |
243 | void *start, *end;
244 | size_t fetch_count = CCentralCacheMgr::GetInstance().RemoveRange(clazz, &start, &end, num_to_move);
245 | FT_LOG(FT_DEBUG, "FetchMemFromCentral, alloc nodes from central:%zd, start:%p, end:%p", fetch_count, start, end);
246 |
247 | m_llAllocSize += fetch_count * size;
248 | if (--fetch_count >= 0) {
249 | list.PushRange(fetch_count, SLL_Next(start), end);
250 | }
251 |
252 | if (list.max_length() < batch_size) {
253 | FT_LOG(FT_DEBUG, "FetchMemFromCentral, set list max size[%zd] = %zd", clazz, list.max_length() << 1);
254 | list.set_max_length(list.max_length() << 1);
255 | }
256 |
257 | return start;
258 | }
259 |
260 | void CCacheAllocator::ReleaseToCentral(size_t clazz, size_t N)
261 | {
262 | FT_LOG(FT_DEBUG, "clz:%zd, returnsize:%zd", clazz, N);
263 | CFreeList & list = m_cFreeList[clazz];
264 |
265 | if (list.length() < N) {
266 | N = list.length();
267 | }
268 |
269 | CSizeMap & sizemap = CSizeMap::GetInstance();
270 |
271 | size_t node_size = sizemap.class_to_size(clazz);
272 | size_t batch_size = CSizeMap::GetInstance().num_objects_to_move(clazz);
273 |
274 | m_llAllocSize -= N * node_size;
275 |
276 | FT_LOG(FT_DEBUG, "clz:%zd, N:%ld, batchsize:%ld", clazz, N, batch_size);
277 | while (N > batch_size) {
278 | void *tail, *head;
279 | list.PopRange(batch_size, &head, &tail);
280 | CCentralCacheMgr::GetInstance().InsertRange(clazz, head, tail, batch_size);
281 | N -= batch_size;
282 | }
283 |
284 | FT_LOG(FT_DEBUG, "clz:%zd, N:%ld, batchsize:%ld", clazz, N, batch_size);
285 | void * start, *end;
286 | list.PopRange(N, &start, &end);
287 | CCentralCacheMgr::GetInstance().InsertRange(clazz, start, end, N);
288 | }
289 |
290 | }
291 |
--------------------------------------------------------------------------------
/src/ft_cache_allocator.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_CACHE_ALLOCATOR_H__
9 | #define __FT_CACHE_ALLOCATOR_H__
10 |
11 | #include "ft_mem_alloc_intf.h"
12 | #include "ft_sizemap.h"
13 | #include "ft_free_list.h"
14 |
15 | namespace ftmalloc
16 | {
17 | class CCacheAllocator : public IMemAlloc
18 | {
19 | friend class CReleaseHelper;
20 | public:
21 | CCacheAllocator();
22 | virtual ~CCacheAllocator();
23 |
24 | virtual void * Malloc(size_t);
25 | virtual void * ReAlloc(void *, size_t);
26 | virtual void * Calloc(size_t, size_t);
27 | virtual void Free(void *);
28 |
29 | protected:
30 | void FreeDirect(size_t clazz, void * ptr);
31 |
32 | private:
33 | void * SmallAlloc(size_t bytes);
34 | void * PageAlloc(size_t bytes);
35 | void * FetchMemFromCentral(size_t clazz, size_t size);
36 | void ReleaseToCentral(size_t clazz, size_t N);
37 | void ShowCacheInfo();
38 |
39 | private:
40 | CCacheAllocator(const CCacheAllocator &);
41 | CCacheAllocator & operator=(const CCacheAllocator &);
42 |
43 | private:
44 | CFreeList m_cFreeList[kNumClasses];
45 | size_t m_llAllocSize;
46 | size_t m_llUsedSize;
47 | size_t m_llAllocPages;
48 | };
49 |
50 | #if 0
51 | template
52 | class CReleaseHelper
53 | {
54 | public:
55 | void Release(void * addr)
56 | {
57 | if (objaddr == 0) {
58 | return;
59 | }
60 |
61 | CCacheAllocator * allocator = (CCacheAllocator *)objaddr;
62 | allocator->FreeDirect(clazz, addr);
63 | }
64 | };
65 | #endif
66 | }
67 |
68 | #endif
--------------------------------------------------------------------------------
/src/ft_central_cache_mgr.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_central_cache_mgr.h"
9 | #include "ft_lock.h"
10 | #include "ft_page_mgr.h"
11 | #include "ft_free_list.h"
12 | #include "ft_sbrk_page_allocator.h"
13 | #include "ft_mmap_page_allocator.h"
14 | #include "ft_malloc_slab.h"
15 | #include "ft_malloc_log.h"
16 |
17 | namespace ftmalloc
18 | {
19 | CCentralCacheMgr CCentralCacheMgr::sInstace;
20 |
21 | extern CMmapPageAllocator s_mmap_page_allocator;
22 | static CSlab s_spannode_allocator("span_node", s_mmap_page_allocator);
23 |
24 | static CMutexType s_central_cache_lock = FT_MUTEX_INITIALIZER();
25 |
26 | CCentralCacheMgr & CCentralCacheMgr::GetInstance()
27 | {
28 | return sInstace;
29 | }
30 |
31 | CCentralCacheMgr::CCentralCacheMgr()
32 | : m_pSpanNodeCache(NULL)
33 | , m_iLastClazz(0)
34 | , m_llAllocPages(0)
35 | , m_llAllocBytes(0)
36 | {
37 | for (int i = 0; i < kNumClasses; i++) {
38 | struct SSpanInfo & info = m_sSpanList[i];
39 |
40 | info.span_count = 0;
41 | info.free_object = 0;
42 | RB_ROOT_INIT(info.span_tree);
43 | #ifdef CACHE_ALLOC_TREE
44 | RB_ROOT_INIT(info.alloc_tree);
45 | #else
46 | INIT_LIST_HEAD(&info.alloc_list);
47 | #endif
48 | }
49 | }
50 |
51 | CCentralCacheMgr::~CCentralCacheMgr()
52 | {
53 | }
54 |
55 | void CCentralCacheMgr::InsertRange(int clz, void *start, void *end, int N)
56 | {
57 | CAutoLock lock(s_central_cache_lock);
58 |
59 | while (start != NULL) {
60 | void * next = SLL_Next(start);
61 | ReleaseToSpan(clz, start);
62 | start = next;
63 | }
64 |
65 | ReleaseBytes(N * CSizeMap::GetInstance().class_to_size(clz));
66 | FT_LOG(FT_DEBUG, "Now, allocate out pages:%zd, bytes:%zd", AllocOutPages(), AllocOutBytes());
67 |
68 | }
69 |
70 | int CCentralCacheMgr::RemoveRange(int clz, void **start, void **end, int N)
71 | {
72 | //TODO:: lock sema
73 | FT_LOG(FT_DEBUG, "clz:%d, wantsize:%d", clz, N);
74 |
75 | CAutoLock lock(s_central_cache_lock);
76 |
77 | size_t allocNodes = FetchFromSpan(clz, N, start, end);
78 | AllocBytes(allocNodes * CSizeMap::GetInstance().class_to_size(clz));
79 | FT_LOG(FT_DEBUG, "Now, allocate out pages:%zd, bytes:%zd", AllocOutPages(), AllocOutBytes());
80 |
81 | return allocNodes;
82 | }
83 |
84 | void * CCentralCacheMgr::AllocPages(int wantPages)
85 | {
86 | CAutoLock lock(s_central_cache_lock);
87 |
88 | FT_LOG(FT_DEBUG, "wantpages:%d", wantPages);
89 | void * pageAddr = NULL;
90 |
91 | pageAddr = (void *)CPageMgr::GetInstance().AllocPages(wantPages);
92 |
93 | AddAllocPages(wantPages);
94 | AllocBytes(wantPages << FT_PAGE_BIT);
95 |
96 | FT_LOG(FT_DEBUG, "alloc page addr:%p", pageAddr);
97 | FT_LOG(FT_DEBUG, "Now, allocate out pages:%zd, bytes:%zd", AllocOutPages(), AllocOutBytes());
98 |
99 | return pageAddr;
100 | }
101 |
102 | void CCentralCacheMgr::FreePages(void * pageAddr, int pagesFree)
103 | {
104 | CAutoLock lock(s_central_cache_lock);
105 | FT_LOG(FT_DEBUG, "page address:%p, pages:%d", pageAddr, pagesFree);
106 |
107 | CPageMgr::GetInstance().ReleasePages(pageAddr, pagesFree);
108 |
109 | DecAllocPages(pagesFree);
110 | ReleaseBytes(pagesFree << FT_PAGE_BIT);
111 |
112 | FT_LOG(FT_DEBUG, "Now, allocate out pages:%zd, bytes:%zd", AllocOutPages(), AllocOutBytes());
113 | }
114 |
115 | void CCentralCacheMgr::ShowInfo()
116 | {
117 | }
118 |
119 | int CCentralCacheMgr::FetchFromSpan(int clz, int N, void ** start, void ** end)
120 | {
121 | struct SSpanInfo & spanInfo = m_sSpanList[clz];
122 |
123 | FT_LOG(FT_DEBUG, "clazz:%d, wantsize:%d, %s", clz, N, spanInfo.c_string());
124 |
125 | if (spanInfo.free_object < N) {
126 | AllocSpan(clz);
127 | }
128 |
129 | void *s = NULL, *e = NULL;
130 | int needCount = N;
131 | bool firstTimeAlloc = true;
132 |
133 | int allocsize = 0;
134 | SSpanNode * spanNode = NULL;
135 |
136 | while (needCount > 0) {
137 | #ifdef CACHE_ALLOC_TREE
138 | struct SSpanNode * spanNode = AllocTreeGetObject(rb_first(&spanInfo.alloc_tree));
139 | #else
140 | struct SSpanNode * spanNode = FirstOfAllocList(&spanInfo);
141 | #endif
142 | if (spanNode == NULL) {
143 | FT_LOG(FT_ERROR, "no span_node, list/tree empty! clazz:%d", clz);
144 | break;
145 | }
146 | FT_LOG(FT_DEBUG, "%s", spanNode->c_string());
147 | FT_LOG(FT_DEBUG, "N:%d, needsize:%d", N, needCount);
148 |
149 | size_t can_alloc = FT_MIN(spanNode->free_size, needCount);
150 | SLL_PopRange(&spanNode->object_list, can_alloc, &s, &e);
151 |
152 | spanNode->free_size -= can_alloc;
153 | needCount -= can_alloc;
154 |
155 | FT_LOG(FT_DEBUG, "%s", spanNode->c_string());
156 | FT_LOG(FT_DEBUG, "N:%d, needsize:%d", N, needCount);
157 |
158 | if (spanNode->free_size == 0) {
159 | #ifdef CACHE_ALLOC_TREE
160 | RbRemove(&spanInfo.alloc_tree, spanNode, &CCentralCacheMgr::AllocTreeNode);
161 | #else
162 | RemoveAllocList(spanNode);
163 | #endif
164 | }
165 |
166 | if (firstTimeAlloc) {
167 | firstTimeAlloc = false;
168 |
169 | *start = s;
170 | *end = e;
171 | } else {
172 | SLL_SetNext(*end, s);
173 | *end = e;
174 | }
175 | }
176 |
177 | int allocNum = N - needCount;
178 | spanInfo.free_object -= allocNum;
179 | FT_LOG(FT_DEBUG, "allocnum:%d, wantsize:%d, start:%p, end:%p", allocNum, N, *start, *end);
180 |
181 | return allocNum;
182 | }
183 |
184 | void CCentralCacheMgr::ReleaseToSpan(int iclz, void * object)
185 | {
186 | size_t clz = iclz;
187 | FT_LOG(FT_DEBUG, "clz:%zd, objaddr:%p", clz, object);
188 |
189 | struct SSpanInfo & spanInfo = m_sSpanList[clz];
190 | FT_LOG(FT_DEBUG, "%s", spanInfo.c_string());
191 |
192 | struct SSpanNode * spanNode = m_pSpanNodeCache;
193 | if (spanNode == NULL || m_iLastClazz != clz || SpanTreeSearch(clz, spanNode, object)) { //cache invalid.
194 | FT_LOG(FT_INFO, "span info cache invalid, spanInfo:%p, lastclz:%zd, clz:%zd, %s, search from rbtree", &spanInfo, m_iLastClazz, clz, spanNode == NULL ? "NULL" : spanNode->c_string());
195 | spanNode = RbSearch(clz, &spanInfo.span_tree, object, &CCentralCacheMgr::SpanTreeGetObject, &CCentralCacheMgr::SpanTreeSearch);
196 | m_pSpanNodeCache = spanNode;
197 | m_iLastClazz = clz;
198 | }
199 |
200 | FT_LOG(FT_DEBUG, "spannode:%p", spanNode);
201 | if (spanNode == NULL) {
202 | FT_LOG(FT_ERROR, "Error, can't find spanInfo for obj:%p", object);
203 | return;
204 | }
205 | FT_LOG(FT_DEBUG, "%s", spanNode->c_string());
206 |
207 | bool needInsert = (spanNode->free_size == 0);
208 | SLL_Push(&(spanNode->object_list), object);
209 |
210 | spanNode->free_size++;
211 | spanInfo.free_object++;
212 | FT_LOG(FT_DEBUG, "insert alloc node:%d, %s", needInsert, spanNode->c_string());
213 |
214 | if (needInsert) {
215 | #ifdef CACHE_ALLOC_TREE
216 | RbInsert(&spanInfo.alloc_tree, spanNode, &CCentralCacheMgr::AllocTreeGetObject, &CCentralCacheMgr::AllocTreeNode, &CCentralCacheMgr::AllocTreeInsert);
217 | #else
218 | InsertAllocList(&spanInfo, spanNode);
219 | #endif
220 | }
221 |
222 | ReleaseSpan(clz, spanNode);
223 | }
224 |
225 | int CCentralCacheMgr::AllocSpan(int clz)
226 | {
227 | struct SSpanInfo & spanInfo = m_sSpanList[clz];
228 | FT_LOG(FT_DEBUG, "clz:%d, %s", clz, spanInfo.c_string());
229 |
230 | size_t nodeSize = CSizeMap::GetInstance().class_to_size(clz);
231 | size_t wantPages = CSizeMap::GetInstance().class_to_pages(clz);
232 | size_t allocSize = wantPages << FT_PAGE_BIT;
233 | size_t allocNodes = allocSize / nodeSize;
234 |
235 | void * allocAddr = (void *)CPageMgr::GetInstance().AllocPages(wantPages);
236 | FT_LOG(FT_DEBUG, "alloc new spaninfo, %p", allocAddr);
237 |
238 | struct SSpanNode * spanNode = s_spannode_allocator.AllocNode();
239 | {
240 | spanNode->span_addr = allocAddr;
241 | spanNode->span_size = allocNodes;
242 | spanNode->free_size = allocNodes;
243 | RB_NODE_INIT(spanNode->span_rbnode);
244 | #ifdef CACHE_ALLOC_TREE
245 | RB_NODE_INIT(spanNode->alloc_rbnode);
246 | #else
247 | INIT_LIST_NODE(&spanNode->alloc_listnode);
248 | #endif
249 |
250 | size_t start = (size_t)spanNode->span_addr;
251 | size_t end = start + allocSize;
252 |
253 | size_t curr = start;
254 | size_t next = curr + nodeSize;
255 |
256 | while (next < end) {
257 | SLL_SetNext((void *)curr, (void *)next);
258 | next += nodeSize;
259 | curr += nodeSize;
260 | }
261 | SLL_SetNext((void *)curr, NULL);
262 | SLL_SetNext(&spanNode->object_list, spanNode->span_addr);
263 | FT_LOG(FT_DEBUG, "%s", spanNode->c_string());
264 | }
265 |
266 | InsertSpan(clz, spanNode);
267 | FT_LOG(FT_DEBUG, "End of allocspan, %s", spanInfo.c_string());
268 |
269 | AddAllocPages(wantPages);
270 | FT_LOG(FT_DEBUG, "Now, allocate out pages:%zd, bytes:%zd", AllocOutPages(), AllocOutBytes());
271 | }
272 |
273 | int CCentralCacheMgr::ReleaseSpan(int clz, struct SSpanNode * spanNode)
274 | {
275 | FT_LOG(FT_DEBUG, "clz:%d, %s", clz, spanNode->c_string());
276 | if (spanNode->free_size != spanNode->span_size) {
277 | return -1;
278 | }
279 |
280 | struct SSpanInfo & spanInfo = m_sSpanList[clz];
281 | FT_LOG(FT_DEBUG, "%s", spanInfo.c_string());
282 |
283 | if (m_pSpanNodeCache== spanNode) {
284 | m_pSpanNodeCache = NULL;
285 | m_iLastClazz = -1;
286 | }
287 |
288 | RbRemove(&spanInfo.span_tree, spanNode, &CCentralCacheMgr::SpanTreeNode);
289 |
290 | #ifdef CACHE_ALLOC_TREE
291 | RbRemove(&spanInfo.alloc_tree, spanNode, &CCentralCacheMgr::AllocTreeNode);
292 | #else
293 | RemoveAllocList(spanNode);
294 | #endif
295 |
296 | spanInfo.span_count --;
297 |
298 | CSizeMap & sizemap = CSizeMap::GetInstance();
299 |
300 | size_t pages2free = sizemap.class_to_pages(clz);
301 | spanInfo.free_object -= (pages2free << FT_PAGE_BIT) / sizemap.class_to_size(clz);
302 |
303 | CPageMgr::GetInstance().ReleasePages((void *)spanNode->span_addr, pages2free);
304 |
305 | s_spannode_allocator.ReleaseNode(spanNode);
306 | FT_LOG(FT_DEBUG, "%s", spanInfo.c_string());
307 |
308 | DecAllocPages(pages2free);
309 | FT_LOG(FT_DEBUG, "Now, allocate out pages:%zd, bytes:%zd", AllocOutPages(), AllocOutBytes());
310 |
311 | return 0;
312 | }
313 |
314 | int CCentralCacheMgr::InsertSpan(int clz, struct SSpanNode * spanNode)
315 | {
316 | FT_LOG(FT_DEBUG, "clz:%d, %s", clz, spanNode->c_string());
317 |
318 | struct SSpanInfo & spanInfo = m_sSpanList[clz];
319 | spanInfo.span_count++;
320 |
321 | CSizeMap & sizemap = CSizeMap::GetInstance();
322 | spanInfo.free_object += (sizemap.class_to_pages(clz) << FT_PAGE_BIT) / sizemap.class_to_size(clz);
323 |
324 | RbInsert(&spanInfo.span_tree, spanNode, &CCentralCacheMgr::SpanTreeGetObject, &CCentralCacheMgr::SpanTreeNode, &CCentralCacheMgr::SpanTreeInsert);
325 | #ifdef CACHE_ALLOC_TREE
326 | RbInsert(&spanInfo.alloc_tree, spanNode, &CCentralCacheMgr::AllocTreeGetObject, &CCentralCacheMgr::AllocTreeNode, &CCentralCacheMgr::AllocTreeInsert);
327 | #else
328 | InsertAllocList(&spanInfo, spanNode);
329 | #endif
330 | return 0;
331 | }
332 |
333 | void CCentralCacheMgr::ReleaseBytes(size_t bytes)
334 | {
335 | m_llAllocBytes -= bytes;
336 | }
337 |
338 | void CCentralCacheMgr::AllocBytes(size_t bytes)
339 | {
340 | m_llAllocBytes += bytes;
341 | }
342 |
343 | void CCentralCacheMgr::AddAllocPages(size_t pages)
344 | {
345 | m_llAllocPages += pages;
346 | }
347 |
348 | void CCentralCacheMgr::DecAllocPages(size_t pages)
349 | {
350 | m_llAllocPages -= pages;
351 | }
352 |
353 | size_t CCentralCacheMgr::AllocOutPages()
354 | {
355 | return m_llAllocPages;
356 | }
357 |
358 | size_t CCentralCacheMgr::AllocOutBytes()
359 | {
360 | return m_llAllocBytes;
361 | }
362 |
363 | size_t CCentralCacheMgr::SpanTreeSearch(size_t clz, const void * lhs, const void * rhs)
364 | {
365 | size_t object_size = CSizeMap::GetInstance().class_to_size(clz);
366 |
367 | struct SSpanNode & spanInfo = *(struct SSpanNode *)lhs;
368 | size_t start_addr = (size_t)spanInfo.span_addr;
369 | size_t end_addr = start_addr + object_size * spanInfo.span_size;
370 |
371 | size_t object_addr = (size_t)rhs;
372 |
373 | FT_LOG(FT_DEBUG, "start:%p, length:%zd, obj:%p", spanInfo.span_addr, object_size * spanInfo.span_size, (void *)object_addr);
374 | FT_LOG(FT_DEBUG, "start:%zd, end:%zd, obj:%zd", (size_t)start_addr, (size_t)end_addr, (size_t)object_addr);
375 |
376 | if (object_addr >= start_addr && object_addr < end_addr) {
377 | return 0;
378 | }
379 | else if (object_addr >= end_addr) {
380 | return 1;
381 | }
382 | else {
383 | return -1;
384 | }
385 | }
386 |
387 | size_t CCentralCacheMgr::SpanTreeInsert(const void * lhs, const void * rhs)
388 | {
389 | struct SSpanNode & lNode = *(struct SSpanNode *)lhs;
390 | struct SSpanNode & rNode = *(struct SSpanNode *)rhs;
391 |
392 | FT_LOG(FT_DEBUG, "lhs:%p, rhs:%p", lNode.span_addr, rNode.span_addr);
393 | FT_LOG(FT_DEBUG, "lhs:%zd, rhs:%zd", (size_t)lNode.span_addr, (size_t)rNode.span_addr);
394 |
395 | return (size_t)lNode.span_addr - (size_t)rNode.span_addr;
396 | }
397 |
398 | CCentralCacheMgr::SSpanNode * CCentralCacheMgr::SpanTreeGetObject(rb_node * rbNode)
399 | {
400 | return container_of(rbNode, struct SSpanNode, span_rbnode);
401 | }
402 |
403 | rb_node * CCentralCacheMgr::SpanTreeNode(struct SSpanNode * spanNode)
404 | {
405 | return &spanNode->span_rbnode;
406 | }
407 |
408 | #ifdef CACHE_ALLOC_TREE
409 | size_t CCentralCacheMgr::AllocTreeSearch(size_t clz, const void * lhs, const void * rhs)
410 | {
411 | return (size_t)lhs - (size_t)rhs;
412 | }
413 |
414 | size_t CCentralCacheMgr::AllocTreeInsert(const void * lhs, const void * rhs)
415 | {
416 | return (size_t)lhs - (size_t)rhs;
417 | }
418 |
419 | CCentralCacheMgr::SSpanNode * CCentralCacheMgr::AllocTreeGetObject(rb_node * rbNode)
420 | {
421 | return container_of(rbNode, struct SSpanNode, alloc_rbnode);
422 | }
423 |
424 | rb_node * CCentralCacheMgr::AllocTreeNode(SSpanNode * spanNode)
425 | {
426 | return &(spanNode->alloc_rbnode);
427 | }
428 | #else
429 | struct CCentralCacheMgr::SSpanNode * CCentralCacheMgr::FirstOfAllocList(struct SSpanInfo * spanInfo)
430 | {
431 | return list_first_entry(&spanInfo->alloc_list, struct SSpanNode, alloc_listnode);
432 | }
433 |
434 | void CCentralCacheMgr::InsertAllocList(struct SSpanInfo * spanInfo, struct SSpanNode * spanNode)
435 | {
436 | list_add_tail(&spanNode->alloc_listnode, &spanInfo->alloc_list);
437 | }
438 |
439 | void CCentralCacheMgr::RemoveAllocList(struct SSpanNode * spanNode)
440 | {
441 | list_del(&spanNode->alloc_listnode);
442 | }
443 | #endif
444 |
445 | struct CCentralCacheMgr::SSpanNode * CCentralCacheMgr::RbSearch(size_t clz, struct rb_root *root,
446 | void * object, RbGetObjectFunc getObject, RbSearchFunc search)
447 | {
448 | struct rb_node *node = root->rb_node;
449 |
450 | while (node) {
451 | struct SSpanNode *spanNode = (this->*getObject)(node);
452 | size_t result = (this->*search)(clz, (void *)spanNode, object);
453 |
454 | if (result < 0)
455 | node = node->rb_left;
456 | else if (result > 0)
457 | node = node->rb_right;
458 | else
459 | return spanNode;
460 | }
461 | return NULL;
462 | }
463 |
464 | size_t CCentralCacheMgr::RbInsert(struct rb_root *root, struct SSpanNode *data,
465 | RbGetObjectFunc getObject, RbGetNodeFunc getNode, RbInsertFunc compare)
466 | {
467 | struct rb_node **newnode = &(root->rb_node), *parent = NULL;
468 |
469 | /* Figure out where to put new node */
470 | while (*newnode) {
471 | struct SSpanNode *thisnode = (this->*getObject)(*newnode);
472 | size_t result = (this->*compare)(data, thisnode);
473 |
474 | parent = *newnode;
475 | if (result < 0)
476 | newnode = &((*newnode)->rb_left);
477 | else if (result > 0)
478 | newnode = &((*newnode)->rb_right);
479 | else
480 | return 0;
481 | }
482 |
483 | /* Add new node and rebalance tree. */
484 | rb_link_node((this->*getNode)(data), parent, newnode);
485 | rb_insert_color((this->*getNode)(data), root);
486 |
487 | return 1;
488 | }
489 |
490 | void CCentralCacheMgr::RbRemove(rb_root * root, struct SSpanNode * spanNode,
491 | RbGetNodeFunc getNode)
492 | {
493 | rb_erase((this->*getNode)(spanNode), root);
494 | }
495 | }
496 |
--------------------------------------------------------------------------------
/src/ft_central_cache_mgr.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_CENTRAL_CACHE_MGR_H__
9 | #define __FT_CENTRAL_CACHE_MGR_H__
10 |
11 | //#define CACHE_ALLOC_TREE
12 |
13 | #ifndef CACHE_ALLOC_TREE
14 | #define CACHE_ALLOC_LIST
15 | #endif
16 |
17 | #include "ft_rb_tree.h"
18 | #include "ft_sizemap.h"
19 |
20 | #ifdef CACHE_ALLOC_LIST
21 | #include "ft_list.h"
22 | #endif
23 |
24 | namespace ftmalloc
25 | {
26 | class CCentralCacheMgr
27 | {
28 | public:
29 | struct SSpanNode
30 | {
31 | int span_size;
32 | int free_size;
33 |
34 | struct rb_node span_rbnode;
35 | #ifdef CACHE_ALLOC_TREE
36 | struct rb_node alloc_rbnode;
37 | #else
38 | struct list_head alloc_listnode;
39 | #endif
40 |
41 | void * span_addr;
42 | void * object_list;
43 |
44 | const char * c_string() {
45 | static char buf[128];
46 | #ifdef C_STRING_FUNC
47 | snprintf(buf, 128, "SSpanNode, span_size:%d, free_size:%d, span_addr:%p, object_list:%p",
48 | span_size, free_size, span_addr, object_list);
49 | #else
50 | buf[0] = 'N';
51 | buf[1] = '\0';
52 | #endif
53 | return buf;
54 | }
55 | };
56 |
57 | struct SSpanInfo
58 | {
59 | int span_count;
60 | int free_object;
61 | struct rb_root span_tree; //rb_tree root.
62 |
63 | #ifdef CACHE_ALLOC_TREE
64 | struct rb_root alloc_tree;
65 | #else
66 | struct list_head alloc_list;
67 | #endif
68 |
69 | const char * c_string() {
70 | static char buf[128];
71 | #ifdef C_STRING_FUNC
72 | snprintf(buf, 128, "SSpanInfo, span_count:%d, free_object:%d",
73 | span_count, free_object);
74 | #else
75 | buf[0] = 'N';
76 | buf[1] = '\0';
77 | #endif
78 | return buf;
79 | }
80 | };
81 |
82 | public:
83 | static CCentralCacheMgr & GetInstance();
84 |
85 | ~CCentralCacheMgr();
86 |
87 | void InsertRange(int clz, void *start, void *end, int N);
88 |
89 | int RemoveRange(int clz, void **start, void **end, int N);
90 |
91 | void * AllocPages(int wantPages);
92 |
93 | void FreePages(void * pageAddr, int pagesFree);
94 |
95 | void ShowInfo();
96 | private:
97 | CCentralCacheMgr();
98 | CCentralCacheMgr(const CCentralCacheMgr &);
99 | CCentralCacheMgr & operator=(const CCentralCacheMgr &);
100 |
101 | private:
102 | typedef SSpanNode * (CCentralCacheMgr::*RbGetObjectFunc)(rb_node * rbNode);
103 | typedef rb_node * (CCentralCacheMgr::*RbGetNodeFunc)(struct SSpanNode * spanNode);
104 | typedef size_t (CCentralCacheMgr::*RbSearchFunc)(size_t clz, const void * lhs, const void * rhs);
105 | typedef size_t (CCentralCacheMgr::*RbInsertFunc)(const void * lhs, const void * rhs);
106 |
107 | private:
108 | void ReleaseBytes(size_t bytes);
109 | void AllocBytes(size_t bytes);
110 |
111 | void AddAllocPages(size_t pages);
112 | void DecAllocPages(size_t pages);
113 |
114 | size_t AllocOutPages();
115 | size_t AllocOutBytes();
116 |
117 | int FetchFromSpan(int clz, int N, void ** start, void ** end);
118 | void ReleaseToSpan(int clz, void * object);
119 |
120 | int AllocSpan(int clz);
121 | int ReleaseSpan(int clz, struct SSpanNode * spanNode);
122 | int InsertSpan(int clz, struct SSpanNode * spanInfo);
123 |
124 | private:
125 | struct SSpanNode * RbSearch(size_t clz, struct rb_root *root, void * object,
126 | RbGetObjectFunc getObject, RbSearchFunc search);
127 | size_t RbInsert(struct rb_root *root, struct SSpanNode *data,
128 | RbGetObjectFunc getObject, RbGetNodeFunc getNode, RbInsertFunc compare);
129 | void RbRemove(rb_root * root, struct SSpanNode * spanNode, RbGetNodeFunc getNode);
130 |
131 | private:
132 | //help function for SSpanInfo.span_tree.
133 | size_t SpanTreeSearch(size_t clz, const void * lhs, const void * rhs);
134 | size_t SpanTreeInsert(const void * lhs, const void * rhs);
135 | struct SSpanNode * SpanTreeGetObject(rb_node * rbNode);
136 | rb_node * SpanTreeNode(struct SSpanNode * spanNode);
137 |
138 | #ifdef CACHE_ALLOC_TREE
139 | //help function for SSpanInfo.alloc_tree.
140 | size_t AllocTreeSearch(size_t clz, const void * lhs, const void * rhs);
141 | size_t AllocTreeInsert(const void * lhs, const void * rhs);
142 | struct SSpanNode * AllocTreeGetObject(rb_node * rbNode);
143 | rb_node * AllocTreeNode(struct SSpanNode * spanNode);
144 | #else
145 | struct SSpanNode * FirstOfAllocList(struct SSpanInfo * spanInfo);
146 | void InsertAllocList(struct SSpanInfo * spanInfo, struct SSpanNode * spanNode);
147 | void RemoveAllocList(struct SSpanNode * spanNode);
148 | #endif
149 |
150 | private:
151 | struct SSpanInfo m_sSpanList[kNumClasses];
152 | struct SSpanNode * m_pSpanNodeCache;
153 | size_t m_iLastClazz;
154 |
155 | size_t m_llAllocPages;
156 | size_t m_llAllocBytes;
157 |
158 | static CCentralCacheMgr sInstace;
159 | };
160 | }
161 |
162 | #endif
163 |
--------------------------------------------------------------------------------
/src/ft_free_list.h:
--------------------------------------------------------------------------------
1 | #ifndef __FREE_LIST_H__
2 | #define __FREE_LIST_H__
3 |
4 | #include "ft_malloc_util.h"
5 |
6 | namespace ftmalloc
7 | {
8 | inline void *SLL_Next(void *t) {
9 | return *(reinterpret_cast(t));
10 | }
11 |
12 | inline void SLL_SetNext(void *t, void *n) {
13 | *(reinterpret_cast(t)) = n;
14 | }
15 |
16 | inline void SLL_Push(void **list, void *element) {
17 | SLL_SetNext(element, *list);
18 | *list = element;
19 | }
20 |
21 | inline void *SLL_Pop(void **list) {
22 | void *result = *list;
23 | *list = SLL_Next(*list);
24 | return result;
25 | }
26 |
27 | // Remove N elements from a linked list to which head points. head will be
28 | // modified to point to the new head. start and end will point to the first
29 | // and last nodes of the range. Note that end will point to NULL after this
30 | // function is called.
31 | inline void SLL_PopRange(void **head, size_t N, void **start, void **end) {
32 | if (N == 0) {
33 | *start = NULL;
34 | *end = NULL;
35 | return;
36 | }
37 |
38 | void *tmp = *head;
39 | for (size_t i = 1; i < N; ++i) {
40 | tmp = SLL_Next(tmp);
41 | }
42 |
43 | *start = *head;
44 | *end = tmp;
45 | *head = SLL_Next(tmp);
46 | // Unlink range from list.
47 | SLL_SetNext(tmp, NULL);
48 | }
49 |
50 | inline void SLL_PushRange(void **head, void *start, void *end) {
51 | if (!start) return;
52 | SLL_SetNext(end, *head);
53 | *head = start;
54 | }
55 |
56 | inline size_t SLL_Size(void *head) {
57 | size_t count = 0;
58 | while (head) {
59 | count++;
60 | head = SLL_Next(head);
61 | }
62 | return count;
63 | }
64 |
65 | class CFreeList
66 | {
67 | public:
68 | CFreeList()
69 | : list_(NULL)
70 | , length_(0)
71 | , lowater_(0)
72 | , max_length_(1)
73 | , length_overages_(0)
74 | {
75 |
76 | }
77 | ~CFreeList() {}
78 |
79 | private:
80 | void* list_; // Linked list of nodes
81 | // On 64-bit hardware, manipulating 16-bit values may be slightly slow.
82 | size_t length_; // Current length.
83 | size_t lowater_; // Low water mark for list length.
84 | size_t max_length_; // Dynamic max list length based on usage.
85 | // Tracks the number of times a deallocation has caused
86 | // length_ > max_length_. After the kMaxOverages'th time, max_length_
87 | // shrinks and length_overages_ is reset to zero.
88 | size_t length_overages_;
89 |
90 | public:
91 | void Init() {
92 | list_ = NULL;
93 | length_ = 0;
94 | lowater_ = 0;
95 | max_length_ = 1;
96 | length_overages_ = 0;
97 | }
98 |
99 | // Return current length of list
100 | size_t length() const {
101 | return length_;
102 | }
103 |
104 | // Return the maximum length of the list.
105 | size_t max_length() const {
106 | return max_length_;
107 | }
108 |
109 | // Set the maximum length of the list. If 'new_max' > length(), the
110 | // client is responsible for removing objects from the list.
111 | void set_max_length(size_t new_max) {
112 | max_length_ = new_max;
113 | }
114 |
115 | // Return the number of times that length() has gone over max_length().
116 | size_t length_overages() const {
117 | return length_overages_;
118 | }
119 |
120 | void set_length_overages(size_t new_count) {
121 | length_overages_ = new_count;
122 | }
123 |
124 | // Is list empty?
125 | bool empty() const {
126 | return list_ == NULL;
127 | }
128 |
129 | // Low-water mark management
130 | size_t lowwatermark() const { return lowater_; }
131 | void clear_lowwatermark() { lowater_ = length_; }
132 |
133 | void Push(void* ptr) {
134 | SLL_Push(&list_, ptr);
135 | length_++;
136 | }
137 |
138 | void* Pop() {
139 | //ASSERT(list_ != NULL);
140 | length_--;
141 | if (length_ < lowater_) lowater_ = length_;
142 | return SLL_Pop(&list_);
143 | }
144 |
145 | void* Next() {
146 | return SLL_Next(&list_);
147 | }
148 |
149 | void PushRange(size_t N, void *start, void *end) {
150 | SLL_PushRange(&list_, start, end);
151 | length_ += N;
152 | }
153 |
154 | void PopRange(size_t N, void **start, void **end) {
155 | SLL_PopRange(&list_, N, start, end);
156 | //ASSERT(length_ >= N);
157 | length_ -= N;
158 | if (length_ < lowater_) lowater_ = length_;
159 | }
160 | };
161 | }
162 |
163 | #endif //__FREE_LIST_H__
164 |
--------------------------------------------------------------------------------
/src/ft_list.h:
--------------------------------------------------------------------------------
1 | #ifndef __FT_LIST_H__
2 | #define __FT_LIST_H__
3 |
4 | #include
5 |
6 | namespace ftmalloc
7 | {
8 | #ifdef _MSC_VER
9 | #define container_of(ptr, type, member) ((type *)((char *)ptr - offsetof(type, member)))
10 | #else
11 | #define container_of(ptr, type, member) ({ \
12 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \
13 | (type *)( (char *)__mptr - offsetof(type,member) );})
14 | #endif
15 |
16 | #if defined(offsetof)
17 | #undef offsetof
18 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
19 | #endif
20 |
21 | struct list_head {
22 | struct list_head * prev;
23 | struct list_head * next;
24 | };
25 |
26 | #define LIST_HEAD_INIT(name) { &(name), &(name) }
27 |
28 | #define LIST_HEAD(name) \
29 | struct list_head name = LIST_HEAD_INIT(name)
30 |
31 | #define INIT_LIST_NODE(node) \
32 | init_list_head(node)
33 |
34 | #define INIT_LIST_HEAD(head) \
35 | init_list_head(head)
36 |
37 | static inline void init_list_head(struct list_head *list)
38 | {
39 | list->next = list;
40 | list->prev = list;
41 | }
42 |
43 | static inline void __list_add(struct list_head *newnode,
44 | struct list_head *prev,
45 | struct list_head *next)
46 | {
47 | next->prev = newnode;
48 | newnode->next = next;
49 | newnode->prev = prev;
50 | prev->next = newnode;
51 | }
52 |
53 | static inline void list_add(struct list_head *newnode, struct list_head *head)
54 | {
55 | __list_add(newnode, head, head->next);
56 | }
57 |
58 | static inline void list_add_tail(struct list_head *newnode, struct list_head *head)
59 | {
60 | __list_add(newnode, head->prev, head);
61 | }
62 |
63 | static inline void __list_del(struct list_head * prev, struct list_head * next)
64 | {
65 | next->prev = prev;
66 | prev->next = next;
67 | }
68 |
69 | static inline void list_del(struct list_head *entry)
70 | {
71 | __list_del(entry->prev, entry->next);
72 | entry->next = NULL;
73 | entry->prev = NULL;
74 | }
75 |
76 | static inline int list_is_last(const struct list_head *list,
77 | const struct list_head *head)
78 | {
79 | return list->next == head;
80 | }
81 |
82 | static inline int list_empty(const struct list_head *head)
83 | {
84 | return head->next == head;
85 | }
86 |
87 | #define list_entry(ptr, type, member) \
88 | container_of(ptr, type, member)
89 |
90 | #define list_first_entry(ptr, type, member) \
91 | list_entry((ptr)->next, type, member)
92 |
93 | }
94 | #endif //__RB_TREE_H__
95 |
--------------------------------------------------------------------------------
/src/ft_lock.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_LOCK_H__
9 | #define __FT_LOCK_H__
10 |
11 | #include
12 |
13 | namespace ftmalloc
14 | {
15 | typedef pthread_mutex_t CMutexType;
16 |
17 | #define FT_MUTEX_INITIALIZER() \
18 | PTHREAD_MUTEX_INITIALIZER
19 |
20 | #define FT_MUTEX_CREATE(mutex) \
21 | pthread_mutex_init(&mutex, NULL);
22 |
23 | #define FT_MUTEX_DESTROY(mutex) \
24 | pthread_mutex_destroy(&mutex);
25 |
26 | #define FT_MUTEX_LOCK(mutex) \
27 | pthread_mutex_lock(&mutex)
28 |
29 | #define FT_MUTEX_UNLOCK(mutex) \
30 | pthread_mutex_unlock(&mutex)
31 |
32 | class CAutoLock
33 | {
34 | public:
35 | CAutoLock(CMutexType & mutex)
36 | : mMutex(mutex)
37 | {
38 | FT_MUTEX_LOCK(mMutex);
39 | }
40 |
41 | ~CAutoLock()
42 | {
43 | FT_MUTEX_UNLOCK(mMutex);
44 | }
45 | private:
46 | CMutexType & mMutex;
47 | };
48 | }
49 |
50 | #endif
--------------------------------------------------------------------------------
/src/ft_malloc_alias.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MALLOC_ALIAS_H__
9 | #define __FT_MALLOC_ALIAS_H__
10 |
11 | #ifndef __THROW
12 | #define __THROW
13 | #endif
14 |
15 | #define ALIAS(name) __attribute__ ((alias (#name)))
16 |
17 | /*
18 | void* operator new(size_t size) throw (std::bad_alloc)
19 | ALIAS(ft_new);
20 | void operator delete(void* p) __THROW
21 | ALIAS(ft_delete);
22 | void* operator new[](size_t size) throw (std::bad_alloc)
23 | ALIAS(ft_newarray);
24 | void operator delete[](void* p) __THROW
25 | ALIAS(ft_deletearray);
26 | void* operator new(size_t size, const std::nothrow_t& nt) __THROW
27 | ALIAS(ft_new_nothrow);
28 | void* operator new[](size_t size, const std::nothrow_t& nt) __THROW
29 | ALIAS(ft_newarray_nothrow);
30 | void operator delete(void* p, const std::nothrow_t& nt) __THROW
31 | ALIAS(ft_delete_nothrow);
32 | void operator delete[](void* p, const std::nothrow_t& nt) __THROW
33 | ALIAS(ft_deletearray_nothrow);
34 | */
35 |
36 | extern "C"
37 | {
38 | void* malloc(size_t size) __THROW ALIAS(ft_malloc);
39 | void free(void* ptr) __THROW ALIAS(ft_free);
40 |
41 | /*
42 | void* realloc(void* ptr, size_t size) __THROW ALIAS(ft_realloc);
43 | void* calloc(size_t n, size_t size) __THROW ALIAS(ft_calloc);
44 | void cfree(void* ptr) __THROW ALIAS(ft_cfree);
45 | void* memalign(size_t align, size_t s) __THROW ALIAS(ft_memalign);
46 | void* valloc(size_t size) __THROW ALIAS(ft_valloc);
47 | void* pvalloc(size_t size) __THROW ALIAS(ft_pvalloc);
48 | int posix_memalign(void** r, size_t a, size_t s) __THROW ALIAS(ft_posix_memalign);
49 | int mallopt(int cmd, int value) __THROW ALIAS(ft_mallopt);
50 | size_t malloc_size(void* p) __THROW ALIAS(ft_malloc_size);
51 | size_t malloc_usable_size(void* p) __THROW ALIAS(ft_malloc_size);
52 | */
53 | }
54 |
55 | #undef ALIAS
56 |
57 | #endif
58 |
--------------------------------------------------------------------------------
/src/ft_malloc_intf.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_malloc_intf.h"
9 | #include "ft_thread_cache.h"
10 | #include "ft_mem_alloc_intf.h"
11 |
12 | namespace ftmalloc
13 | {
14 | void * __Malloc(size_t size)
15 | {
16 | IMemAlloc * allocator = CThreadCache::GetInstance().GetAllocator();
17 | return allocator->Malloc(size);
18 | }
19 |
20 | void * __ReAlloc(void * ptr, size_t size)
21 | {
22 | IMemAlloc * allocator = CThreadCache::GetInstance().GetAllocator();
23 | return allocator->ReAlloc(ptr, size);
24 | }
25 |
26 | void * __Calloc(size_t nmemb, size_t size)
27 | {
28 | IMemAlloc * allocator = CThreadCache::GetInstance().GetAllocator();
29 | return allocator->Calloc(nmemb, size);
30 | }
31 |
32 | void __Free(void * ptr)
33 | {
34 | IMemAlloc * allocator = CThreadCache::GetInstance().GetAllocator();
35 | if (allocator != NULL) {
36 | allocator->Free(ptr);
37 | }
38 | }
39 | }
--------------------------------------------------------------------------------
/src/ft_malloc_intf.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MALLOC_INTF_H__
9 | #define __FT_MALLOC_INTF_H__
10 |
11 | #include
12 |
13 | namespace ftmalloc
14 | {
15 | void * __Malloc(size_t);
16 |
17 | void * __ReAlloc(void *, size_t);
18 |
19 | void * __Calloc(size_t, size_t);
20 |
21 | void __Free(void *);
22 | }
23 |
24 | #endif
--------------------------------------------------------------------------------
/src/ft_malloc_log.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_malloc_log.h"
9 |
10 | #if defined(FT_LOG_EMRGE)
11 | int s_ft_log_level = FT_EMERG;
12 | #elif defined(FT_LOG_ALERT)
13 | int s_ft_log_level = FT_ALERT;
14 | #elif defined(FT_LOG_CRIT)
15 | int s_ft_log_level = FT_CRIT;
16 | #elif defined(FT_LOG_ERROR)
17 | int s_ft_log_level = FT_ERROR;
18 | #elif defined(FT_LOG_NOTICE)
19 | int s_ft_log_level = FT_NOTICE;
20 | #elif defined(FT_LOG_INFO)
21 | int s_ft_log_level = FT_INFO;
22 | #elif defined(FT_LOG_DEBUG)
23 | int s_ft_log_level = FT_DEBUG;
24 | #else
25 | int s_ft_log_level = FT_INFO;
26 | #endif
27 |
28 | const char * s_ft_log_level_string[] =
29 | {
30 | "",
31 | "EMERG",
32 | "ALERT",
33 | "CRIT",
34 | "ERROR",
35 | "NOTICE",
36 | "INFO",
37 | "DEBUG",
38 | };
39 |
40 |
--------------------------------------------------------------------------------
/src/ft_malloc_log.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MALLOC_LOG_H__
9 | #define __FT_MALLOC_LOG_H__
10 |
11 | #include
12 | #include
13 |
14 | #define FT_EMERG 1
15 | #define FT_ALERT 2
16 | #define FT_CRIT 3
17 | #define FT_ERROR 4
18 | #define FT_NOTICE 5
19 | #define FT_INFO 6
20 | #define FT_DEBUG 7
21 |
22 | extern const char * s_ft_log_level_string[];
23 | extern int s_ft_log_level;
24 |
25 | #ifdef LOG_PRINTF
26 | #define FT_LOG(level, fmt, ...) \
27 | do { \
28 | if (level <= s_ft_log_level) { \
29 | printf("%30s%20s:%5d:[TID:%lu][%s]:"fmt"\n", \
30 | __FILE__, __FUNCTION__, __LINE__, pthread_self(), \
31 | s_ft_log_level_string[level], ##__VA_ARGS__); \
32 | } \
33 | } while (0)
34 | #else
35 | #define FT_LOG(level, fmt, ...)
36 | #endif
37 |
38 | #endif
39 |
--------------------------------------------------------------------------------
/src/ft_malloc_slab.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MALLOC_SLAB_H__
9 | #define __FT_MALLOC_SLAB_H__
10 |
11 | #include "ft_sys_alloc_intf.h"
12 | #include "ft_free_list.h"
13 | #include "ft_malloc_log.h"
14 | #include "ft_malloc_util.h"
15 |
16 | #include
17 | #include
18 |
19 | namespace ftmalloc
20 | {
21 | template
22 | class CSlab
23 | {
24 | public:
25 | CSlab(const char * name, ISysAlloc & allocator, size_t page_bits = FT_PAGE_BIT)
26 | : _sys_allocator(allocator)
27 | , _freelist(NULL)
28 | , _freenum(0)
29 | , _totalnum(0)
30 | , _page_bits(page_bits)
31 | {
32 | if (_page_bits <= 0) {
33 | _page_bits = FT_PAGE_BIT;
34 | } else if (_page_bits < 12) {
35 | _page_bits = 12; //4// 4k. 1 << 12.
36 | }
37 | strncpy(_name, name, FT_MIN(1023, strlen(name)));
38 | FT_LOG(FT_INFO, "cslab(%s) create, page_bit:%zd, nodesize:%lu",
39 | _name, _page_bits, sizeof(T));
40 | }
41 |
42 | ~CSlab()
43 | {
44 | }
45 |
46 | T * AllocNode()
47 | {
48 | void * node = NULL;
49 | FT_LOG(FT_DEBUG, "cslab(%s), want a node!", _name);
50 |
51 | if (_freelist == NULL || _freenum == 0) {
52 | void * addr = _sys_allocator.SysAlloc(1 << _page_bits);
53 |
54 | FT_LOG(FT_INFO, "cslab(%s), allocator %p, %d", _name, addr, (1 << _page_bits));
55 |
56 | size_t nodesize = sizeof(T);
57 | size_t start = (size_t)addr;
58 | size_t end = (size_t)(start + (1 << _page_bits));
59 | size_t curr = start;
60 | size_t next = curr + nodesize;
61 |
62 | while (next < end) {
63 | SLL_SetNext((void *)curr, (void *)next);
64 | next += nodesize;
65 | curr += nodesize;
66 | _freenum++;
67 | _totalnum++;
68 | }
69 |
70 | SLL_SetNext((void *)curr, _freelist);
71 | SLL_SetNext(&_freelist, addr);
72 | }
73 |
74 | node = SLL_Pop(&_freelist);
75 | _freenum--;
76 |
77 | FT_LOG(FT_DEBUG, "cslab(%s), alloc node:%p", _name, node);
78 |
79 | ::new((void *)node) T();
80 | //T * ptr = (T *)ptr;
81 | //ptr->T::T();
82 | return (T *)node;
83 | }
84 |
85 | void ReleaseNode(T * &node)
86 | {
87 | if (node == NULL) {
88 | FT_LOG(FT_INFO, "cslab(%s), release NULL!", _name);
89 | return;
90 | }
91 |
92 | FT_LOG(FT_DEBUG, "cslab(%s), release node:%p", _name, node);
93 | node->~T();
94 | SLL_Push(&_freelist, (void *)node);
95 | _freenum++;
96 |
97 | node = NULL;
98 | }
99 |
100 | private:
101 | void * _freelist;
102 | size_t _freenum;
103 | size_t _totalnum;
104 | size_t _page_bits;
105 |
106 | ISysAlloc & _sys_allocator;
107 | char _name[1024];
108 | };
109 | }
110 |
111 | #endif
--------------------------------------------------------------------------------
/src/ft_malloc_util.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MALLOC_UTIL_H__
9 | #define __FT_MALLOC_UTIL_H__
10 |
11 | #define SAFE_FREE(ptr) \
12 | do { \
13 | if (ptr != NULL) { \
14 | free(ptr); \
15 | ptr = NULL; \
16 | } \
17 | } while (0)
18 |
19 | #define SAFE_DELETE(ptr) \
20 | do { \
21 | if (ptr != NULL) { \
22 | delete (ptr); \
23 | ptr = NULL; \
24 | } \
25 | } while (0)
26 |
27 | #define FT_PAGE_BIT 16
28 |
29 | #define FT_MAX(a, b) ((a) > (b) ? (a) : (b))
30 | #define FT_MIN(a, b) ((a) < (b) ? (a) : (b))
31 |
32 | #endif
33 |
--------------------------------------------------------------------------------
/src/ft_mem_alloc_intf.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_mem_alloc_intf.h"
9 | #include "ft_malloc_slab.h"
10 | #include "ft_cache_allocator.h"
11 | #include "ft_mmap_page_allocator.h"
12 | #include "ft_lock.h"
13 | #include "ft_malloc_log.h"
14 |
15 | namespace ftmalloc
16 | {
17 | extern CMmapPageAllocator s_mmap_page_allocator;
18 | const size_t s_tc_page_bit = FT_PAGE_BIT;
19 | static CSlab s_mem_alloc_slab("cache_allocator", s_mmap_page_allocator, s_tc_page_bit);
20 | static CMutexType sCacheAllocateLock = FT_MUTEX_INITIALIZER();
21 |
22 | IMemAlloc * IMemAlloc::CreateMemAllocator()
23 | {
24 | FT_LOG(FT_INFO, "create memory allocator!");
25 | CAutoLock lock(sCacheAllocateLock);
26 | CCacheAllocator * allocator = s_mem_alloc_slab.AllocNode();
27 | return allocator;
28 | }
29 |
30 | void IMemAlloc::DestroyMemAllocator(IMemAlloc * &allocator)
31 | {
32 | FT_LOG(FT_INFO, "destroy memory allocator!");
33 | if (allocator != NULL) {
34 | CCacheAllocator * ptr = (CCacheAllocator *)allocator;
35 | s_mem_alloc_slab.ReleaseNode(ptr);
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/src/ft_mem_alloc_intf.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MEM_ALLOC_INTF_H__
9 | #define __FT_MEM_ALLOC_INTF_H__
10 |
11 | #include
12 |
13 | namespace ftmalloc
14 | {
15 | class IMemAlloc
16 | {
17 | public:
18 | virtual ~IMemAlloc() {}
19 |
20 | virtual void * Malloc(size_t) = 0;
21 | virtual void * ReAlloc(void *, size_t) = 0;
22 | virtual void * Calloc(size_t, size_t) = 0;
23 | virtual void Free(void *) = 0;
24 |
25 | static IMemAlloc * CreateMemAllocator();
26 | static void DestroyMemAllocator(IMemAlloc * &allocator);
27 | };
28 | }
29 |
30 | #endif
31 |
--------------------------------------------------------------------------------
/src/ft_mmap_page_allocator.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_mmap_page_allocator.h"
9 | #include "ft_malloc_log.h"
10 |
11 | #include
12 | #include
13 |
14 | #include
15 |
16 | namespace ftmalloc
17 | {
18 | CMmapPageAllocator s_mmap_page_allocator;
19 |
20 | CMmapPageAllocator::CMmapPageAllocator()
21 | {
22 | }
23 |
24 | CMmapPageAllocator::~CMmapPageAllocator()
25 | {
26 | }
27 |
28 | void * CMmapPageAllocator::SysAlloc(size_t size)
29 | {
30 | void * ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
31 | if (ptr == NULL) {
32 | FT_LOG(FT_ERROR, "mmap failed, errno:%d, %s", errno, strerror(errno));
33 | }
34 |
35 | FT_LOG(FT_DEBUG, "mmap addr:%p, size:%zd", ptr, size);
36 |
37 | return ptr;
38 | }
39 |
40 | void CMmapPageAllocator::SysRelease(void * ptr, size_t size)
41 | {
42 | FT_LOG(FT_DEBUG, "munmap, addr:%p, size:%zd", ptr, size);
43 | if (!munmap(ptr, size)) {
44 | FT_LOG(FT_ERROR, "munmap failed, errno:%d, %s", errno, strerror(errno));
45 | }
46 | }
47 |
48 | }
--------------------------------------------------------------------------------
/src/ft_mmap_page_allocator.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MMAP_PAGE_ALLOCATOR_H__
9 | #define __FT_MMAP_PAGE_ALLOCATOR_H__
10 |
11 | #include "ft_sys_alloc_intf.h"
12 |
13 | namespace ftmalloc
14 | {
15 | class CMmapPageAllocator : public ISysAlloc
16 | {
17 | public:
18 | CMmapPageAllocator();
19 | virtual ~CMmapPageAllocator();
20 |
21 | virtual void * SysAlloc(size_t size);
22 | virtual void SysRelease(void * ptr, size_t size);
23 | };
24 | }
25 |
26 | #endif
--------------------------------------------------------------------------------
/src/ft_page_mgr.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_page_mgr.h"
9 | #include "ft_sbrk_page_allocator.h"
10 | #include "ft_mmap_page_allocator.h"
11 | #include "ft_malloc_slab.h"
12 | #include "ft_free_list.h"
13 | #include "ft_malloc_log.h"
14 |
15 | #include
16 |
17 | namespace ftmalloc
18 | {
19 | #ifndef FT_PAGE_IDLE_RATE
20 | #define FT_PAGE_IDLE_RATE 30
21 | #endif
22 |
23 | #ifndef FT_PAGE_MIN
24 | #define FT_PAGE_MIN 20
25 | #endif
26 |
27 | extern CMmapPageAllocator s_mmap_page_allocator;
28 | extern CSbrkPageAllocator s_sbrk_page_allocator;
29 |
30 | static CSlab s_pageinfo_slab("page_info", s_mmap_page_allocator);
31 | static CSlab s_indexinfo_slab("index_info", s_mmap_page_allocator);
32 | static CSlab s_countinfo_slab("count_info", s_mmap_page_allocator);
33 |
34 | CPageMgr CPageMgr::sInstance;
35 |
36 | CPageMgr & CPageMgr::GetInstance()
37 | {
38 | return sInstance;
39 | }
40 |
41 | CPageMgr::CPageMgr()
42 | : m_llTopBrkAddress(0)
43 | , m_iAddressTreeSize(0)
44 | , m_iMaxContinuePages(0)
45 | , m_iFreePages(0)
46 | , m_iSbrkPages(0)
47 | , m_iMmapPages(0)
48 | , m_iFlags(0)
49 | {
50 | RB_ROOT_INIT(m_cAddressTree);
51 | RB_ROOT_INIT(m_cCountTree);
52 |
53 | for (int i = 0; i < E_HASH_SIZE; i++) {
54 | RB_ROOT_INIT(m_cHash[i].hash_tree);
55 | }
56 |
57 | //mmap direction. top-down OR down-top.
58 | struct rlimit rlim;
59 | if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
60 | FT_LOG(FT_INFO, "process mmap direction:%s!", (rlim.rlim_cur == RLIM_INFINITY ? "down-top" : "top-down"));
61 |
62 | if (rlim.rlim_cur == RLIM_INFINITY) {
63 | m_iFlags &= ~(1 << E_MMAP_DIRECTION_BIT);
64 | } else {
65 | m_iFlags |= (1 << E_MMAP_DIRECTION_BIT);
66 | }
67 | }
68 | }
69 |
70 | CPageMgr::~CPageMgr()
71 | {
72 | FT_LOG(FT_DEBUG, "page mgr deconstructor!");
73 | }
74 |
75 | void * CPageMgr::AllocPages(size_t wantpages)
76 | {
77 | void * addr = NULL;
78 |
79 | FT_LOG(FT_DEBUG, "want page:%zd, max continue:%zd", wantpages, m_iMaxContinuePages);
80 |
81 | if (m_iMaxContinuePages < wantpages) {
82 | size_t allocpages = FT_MAX(FT_PAGE_MIN, wantpages);
83 | AllocPagesFromSys(allocpages);
84 | }
85 |
86 | FT_LOG(FT_DEBUG, "want page:%zd, max continue:%zd", wantpages, m_iMaxContinuePages);
87 |
88 | size_t bestIndex = GetBestFitIndex(wantpages);
89 | struct SIndexInfo * indexInfo = GetIndexInfo(bestIndex);
90 | struct SPageInfo * pageInfo = GetPageInfo(indexInfo);
91 |
92 | FT_LOG(FT_DEBUG, "want page:%zd, bestIndex:%zd", wantpages, bestIndex);
93 | if (pageInfo->page_count == wantpages) {
94 | addr = (void *)pageInfo->base_address;
95 |
96 | RemovePageInfo(pageInfo);
97 | m_iAddressTreeSize --;
98 |
99 | FT_LOG(FT_DEBUG, "after alloc, address tree size:%zd", m_iAddressTreeSize);
100 | } else {
101 | size_t offset = (pageInfo->page_count - wantpages) << FT_PAGE_BIT;
102 |
103 | addr = (void *)(pageInfo->base_address + offset);
104 | RemoveFreeTree(pageInfo);
105 | RemoveCountTreeIfNeed(pageInfo->page_count);
106 | RemoveIndexTreeIfNeed(pageInfo->page_count);
107 |
108 | pageInfo->page_count -= wantpages;
109 | InsertIndexTreeIfNeed(pageInfo->page_count);
110 | InsertCountTreeIfNeed(pageInfo->page_count);
111 | InsertFreeTree(pageInfo);
112 | }
113 |
114 | m_iFreePages -= wantpages;
115 | m_iMaxContinuePages = GetMaxContinuePages();
116 |
117 | FT_LOG(FT_DEBUG, "want page:%zd, max continue:%zd, ret:%p", wantpages, m_iMaxContinuePages, addr);
118 | return addr;
119 | }
120 |
121 | void CPageMgr::ReleasePages(void * ptr, size_t pages)
122 | {
123 | FT_LOG(FT_DEBUG, "release, addr:%p, pages:%zd", ptr, pages);
124 |
125 | size_t addr = (size_t)ptr;
126 | bool frommmap = (m_llTopBrkAddress < addr) ? true : false;
127 | InsertPageInfo(ptr, pages, frommmap);
128 |
129 | ReleasePageIfNeed();
130 | }
131 |
132 | void CPageMgr::ReleasePageIfNeed()
133 | {
134 | size_t idleRate = m_iFreePages * 100 / (m_iMmapPages + m_iSbrkPages);
135 | FT_LOG(FT_DEBUG, "freepages:%zd, mmap:%zd, sbrk:%zd, idle rate:%zd",
136 | m_iFreePages, m_iMmapPages, m_iSbrkPages, idleRate);
137 |
138 | size_t freesize = m_iFreePages * FT_PAGE_IDLE_RATE / 100;
139 | FT_LOG(FT_DEBUG, "need release %zd pages!", freesize);
140 |
141 | if ((m_iFreePages - freesize) < FT_PAGE_MIN || idleRate < FT_PAGE_IDLE_RATE) {
142 | FT_LOG(FT_INFO, "no need release pages!, idlerate:%d, min_pages:%d",
143 | FT_PAGE_IDLE_RATE, FT_PAGE_MIN);
144 | return;
145 | }
146 |
147 | if (freesize <= 0) {
148 | return;
149 | }
150 |
151 | struct SPageInfo * pageInfo = (struct SPageInfo *)GetPageInfoByAddress(m_llTopBrkAddress - 1);
152 | if (pageInfo == NULL) {
153 | FT_LOG(FT_INFO, "sbrk top address is inuse, can't brk down!");
154 | } else {
155 | FT_LOG(FT_DEBUG, "sbrk top info, addr:%p, pagecount:%zd",
156 | (void*)pageInfo->base_address, pageInfo->page_count);
157 | size_t sbrk_freesize = FT_MIN(pageInfo->page_count, freesize);
158 | if (sbrk_freesize > 0) {
159 | void * brk_addr = DecPageCount(pageInfo, sbrk_freesize);
160 | ReleasePagesToSys(brk_addr, sbrk_freesize, false);
161 | }
162 | freesize -= sbrk_freesize;
163 | }
164 |
165 | if (freesize > 0) {
166 | rb_node * right = rb_last(&m_cAddressTree);
167 | while (right) {
168 | struct SPageInfo * pInfo = (struct SPageInfo *)AddressTreeGetObject(right);
169 | if (pInfo->FromBrk()) {
170 | break;
171 | }
172 |
173 | size_t mmap_freesize = FT_MIN(freesize, pInfo->page_count);
174 | void * mmap_addr = DecPageCount(pInfo, mmap_freesize);
175 | ReleasePagesToSys(mmap_addr, mmap_freesize, true);
176 |
177 | freesize -= mmap_freesize;
178 |
179 | if (freesize == 0) {
180 | break;
181 | }
182 |
183 | right = rb_last(&m_cAddressTree);
184 | }
185 | }
186 |
187 | FT_LOG(FT_DEBUG, "freepages:%zd, mmap:%zd, sbrk:%zd", m_iFreePages, m_iMmapPages, m_iSbrkPages);
188 | FT_LOG(FT_DEBUG, "sbrk top:%p", (void *)m_llTopBrkAddress);
189 |
190 | }
191 |
192 | void * CPageMgr::DecPageCount(struct SPageInfo * pageInfo, size_t freesize)
193 | {
194 | FT_LOG(FT_DEBUG, "dec pagecount, remove curr page info!");
195 | RemoveFreeTree(pageInfo);
196 | RemoveAddressTree(pageInfo);
197 | RemoveCountTreeIfNeed(pageInfo->page_count);
198 | RemoveIndexTreeIfNeed(pageInfo->page_count);
199 | m_iFreePages -= pageInfo->page_count;
200 |
201 | void * addr = NULL;
202 | bool fromBrk = pageInfo->FromBrk();
203 |
204 | FT_LOG(FT_DEBUG, "dec pagecount, pagesize:%zd, sub freesize:%zd!", pageInfo->page_count, freesize);
205 | if (freesize == pageInfo->page_count) {
206 | addr = (void *)pageInfo->base_address;
207 | ReleasePageInfo(pageInfo);
208 | } else {
209 | size_t off = pageInfo->page_count - freesize;
210 | addr = (void *)(pageInfo->base_address + (off << FT_PAGE_BIT));
211 | pageInfo->page_count -= freesize;
212 | InsertPageInfo(pageInfo);
213 | }
214 |
215 | if (fromBrk) {
216 | m_llTopBrkAddress = (size_t)addr;
217 | }
218 |
219 | return addr;
220 | }
221 |
222 | int CPageMgr::AllocPagesFromSys(size_t pages)
223 | {
224 | void * ptr = NULL;
225 | size_t size = pages << FT_PAGE_BIT;
226 | bool from_mmap = false;
227 |
228 | ptr = s_sbrk_page_allocator.SysAlloc(size);
229 | if (ptr == NULL) {
230 | ptr = s_mmap_page_allocator.SysAlloc(size);
231 | if (ptr != NULL) {
232 | from_mmap = true;
233 | } else {
234 | return -1;
235 | }
236 | }
237 |
238 | if (ptr != NULL) {
239 | if (!from_mmap) {
240 | if ((size_t)ptr + size > m_llTopBrkAddress) {
241 | m_llTopBrkAddress = (size_t)ptr + size;
242 | }
243 | m_iSbrkPages += pages;
244 | } else {
245 | m_iMmapPages += pages;
246 | }
247 | InsertPageInfo(ptr, pages, from_mmap);
248 | }
249 | FT_LOG(FT_DEBUG, "sbrk top:%p", (void *)m_llTopBrkAddress);
250 |
251 | return 0;
252 | }
253 |
254 | int CPageMgr::ReleasePagesToSys(void * addr, size_t pages, bool frommmap)
255 | {
256 | size_t size = pages << FT_PAGE_BIT;
257 |
258 | if (frommmap) {
259 | s_mmap_page_allocator.SysRelease(addr, size);
260 | m_iMmapPages -= pages;
261 | } else {
262 | s_sbrk_page_allocator.SysRelease(addr, size);
263 | m_iSbrkPages -= pages;
264 | }
265 | }
266 |
267 | void CPageMgr::InsertPageInfo(void * addr, size_t pagecount, bool frommmap)
268 | {
269 | struct SPageInfo * pageInfo = AllocPageInfo();
270 | RB_NODE_INIT(pageInfo->address_node);
271 | RB_NODE_INIT(pageInfo->free_node);
272 | pageInfo->base_address = (size_t)addr;
273 | pageInfo->page_count = pagecount;
274 | if (frommmap) {
275 | pageInfo->UnSetFlag(SPageInfo::E_MEM_SOURCE_OFF);
276 | } else {
277 | pageInfo->SetFlag(SPageInfo::E_MEM_SOURCE_OFF);
278 | }
279 |
280 | FT_LOG(FT_DEBUG, "addr:%p, pages:%zd, mmap:%d", addr, pagecount, frommmap);
281 | FT_LOG(FT_DEBUG, "mgr info, address tree size:%zd", m_iAddressTreeSize);
282 |
283 | if (m_iAddressTreeSize == 0) {
284 | InsertPageInfo(pageInfo);
285 | m_iMaxContinuePages = pageInfo->page_count;
286 | m_iAddressTreeSize ++;
287 | } else if (m_iAddressTreeSize == 1) {
288 | rb_node * node = rb_first(&m_cAddressTree);
289 | struct SPageInfo * pInfo = (struct SPageInfo *)AddressTreeGetObject(node);
290 |
291 | if (pInfo->flag == pageInfo->flag &&
292 | (pInfo->BeginAddress() == pageInfo->EndAddress() ||
293 | pageInfo->BeginAddress() == pInfo->EndAddress())) {
294 | RemoveFreeTree(pInfo);
295 | RemoveCountTreeIfNeed(pInfo->page_count);
296 | RemoveIndexTreeIfNeed(pInfo->page_count);
297 |
298 | pInfo->base_address = FT_MIN(pInfo->base_address, pInfo->base_address);
299 | pInfo->page_count += pageInfo->page_count;
300 | InsertIndexTreeIfNeed(pInfo->page_count);
301 | InsertCountTreeIfNeed(pInfo->page_count);
302 | InsertFreeTree(pInfo);
303 |
304 | ReleasePageInfo(pageInfo);
305 |
306 | m_iMaxContinuePages = pInfo->page_count;
307 | } else {
308 | InsertPageInfo(pageInfo);
309 | m_iMaxContinuePages = FT_MAX(pageInfo->page_count, pInfo->page_count);
310 | m_iAddressTreeSize ++;
311 | }
312 | } else {
313 | struct SPageInfo * prevInfo = GetPageInfoByAddress(pageInfo->BeginAddress() - 1);
314 | struct SPageInfo * nextInfo = GetPageInfoByAddress(pageInfo->EndAddress());
315 |
316 | if (prevInfo != NULL && prevInfo->flag == pageInfo->flag) {
317 | pageInfo->base_address = prevInfo->base_address;
318 | pageInfo->page_count += prevInfo->page_count;
319 |
320 | RemovePageInfo(prevInfo);
321 | m_iAddressTreeSize --;
322 | prevInfo = NULL;
323 | }
324 |
325 | if (nextInfo != NULL && nextInfo->flag == pageInfo->flag) {
326 | pageInfo->page_count += nextInfo->page_count;
327 |
328 | RemovePageInfo(nextInfo);
329 | m_iAddressTreeSize --;
330 | nextInfo = NULL;
331 | }
332 |
333 | InsertPageInfo(pageInfo);
334 | m_iMaxContinuePages = FT_MAX(pageInfo->page_count, m_iMaxContinuePages);
335 | m_iAddressTreeSize ++;
336 | }
337 |
338 | m_iFreePages += pagecount;
339 | FT_LOG(FT_INFO, "max continut:%zd, address tree node:%zd, free:%zd",
340 | m_iMaxContinuePages, m_iAddressTreeSize, m_iFreePages);
341 | }
342 |
343 | void CPageMgr::InsertPageInfo(struct SPageInfo * pageInfo)
344 | {
345 | InsertIndexTreeIfNeed(pageInfo->page_count);
346 | InsertCountTreeIfNeed(pageInfo->page_count);
347 | InsertAddressTree(pageInfo);
348 | InsertFreeTree(pageInfo);
349 | }
350 |
351 | void CPageMgr::InsertIndexTreeIfNeed(size_t pagecount)
352 | {
353 | struct SHashNode * hashNode = GetHashNode(pagecount);
354 | struct SIndexInfo * indexInfo = (struct SIndexInfo *)RbSearch(&hashNode->hash_tree,
355 | &pagecount, &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);
356 |
357 | if (indexInfo == NULL) {
358 | indexInfo = AllocIndexInfo();
359 |
360 | RB_ROOT_INIT(indexInfo->free_tree);
361 | RB_NODE_INIT(indexInfo->hash_node);
362 | indexInfo->page_count = pagecount;
363 |
364 | RbInsert(&hashNode->hash_tree, indexInfo, &CPageMgr::HashTreeGetObject,
365 | &CPageMgr::HashTreeGetRbNode, &CPageMgr::HashTreeInsert);
366 | }
367 | }
368 |
369 | void CPageMgr::InsertCountTreeIfNeed(size_t pagecount)
370 | {
371 | struct SCountInfo * countInfo = (struct SCountInfo *)RbSearch(&m_cCountTree,
372 | &pagecount, &CPageMgr::CountTreeGetObject, &CPageMgr::CountTreeSearch);
373 |
374 | if (countInfo == NULL) {
375 | countInfo = AllocCountInfo();
376 |
377 | countInfo->page_count = pagecount;
378 | RB_NODE_INIT(countInfo->count_node);
379 |
380 | RbInsert(&m_cCountTree, countInfo, &CPageMgr::CountTreeGetObject,
381 | &CPageMgr::CountTreeGetRbNode, &CPageMgr::CountTreeInsert);
382 | }
383 | }
384 |
385 | void CPageMgr::InsertAddressTree(struct SPageInfo * pageInfo)
386 | {
387 | RbInsert(&m_cAddressTree, pageInfo, &CPageMgr::AddressTreeGetObject,
388 | &CPageMgr::AddressTreeGetRbNode, &CPageMgr::AddressTreeInsert);
389 | }
390 |
391 | void CPageMgr::InsertFreeTree(struct SPageInfo * pageInfo)
392 | {
393 | struct SHashNode * hashNode = GetHashNode(pageInfo->page_count);
394 | struct SIndexInfo * indexInfo = (struct SIndexInfo *)RbSearch(&hashNode->hash_tree,
395 | &pageInfo->page_count, &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);
396 |
397 | if (indexInfo == NULL) {
398 | //ERROR!
399 | }
400 |
401 | RbInsert(&indexInfo->free_tree, pageInfo, &CPageMgr::FreeTreeGetObject,
402 | &CPageMgr::FreeTreeGetRbNode, &CPageMgr::FreeTreeInsert);
403 | }
404 |
405 | void CPageMgr::RemovePageInfo(struct SPageInfo * pageInfo)
406 | {
407 | RemoveFreeTree(pageInfo);
408 | RemoveAddressTree(pageInfo);
409 | RemoveCountTreeIfNeed(pageInfo->page_count);
410 | RemoveIndexTreeIfNeed(pageInfo->page_count);
411 |
412 | ReleasePageInfo(pageInfo);
413 | }
414 |
415 | void CPageMgr::RemoveIndexTreeIfNeed(size_t pagecount)
416 | {
417 | struct SHashNode * hashNode = GetHashNode(pagecount);
418 | struct SIndexInfo * indexInfo = (struct SIndexInfo *)RbSearch(&hashNode->hash_tree,
419 | &pagecount, &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);
420 |
421 | if (RB_EMPTY_ROOT(&indexInfo->free_tree)) {
422 | RbRemove(&hashNode->hash_tree, indexInfo, &CPageMgr::HashTreeGetRbNode);
423 | ReleaseIndexInfo(indexInfo);
424 | }
425 | }
426 |
427 | void CPageMgr::RemoveCountTreeIfNeed(size_t pagecount)
428 | {
429 | struct SHashNode * hashNode = GetHashNode(pagecount);
430 | struct SIndexInfo * indexInfo = (struct SIndexInfo *)RbSearch(&hashNode->hash_tree,
431 | &pagecount, &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);
432 |
433 | if (RB_EMPTY_ROOT(&indexInfo->free_tree)) {
434 | struct SCountInfo * countInfo = (struct SCountInfo *)RbSearch(&m_cCountTree,
435 | &pagecount, &CPageMgr::CountTreeGetObject, &CPageMgr::CountTreeSearch);
436 |
437 | RbRemove(&m_cCountTree, countInfo, &CPageMgr::CountTreeGetRbNode);
438 | ReleaseCountInfo(countInfo);
439 | }
440 | }
441 |
442 | void CPageMgr::RemoveAddressTree(struct SPageInfo * pageInfo)
443 | {
444 | RbRemove(&m_cAddressTree, pageInfo, &CPageMgr::AddressTreeGetRbNode);
445 | }
446 |
447 | void CPageMgr::RemoveFreeTree(struct SPageInfo * pageInfo)
448 | {
449 | struct SHashNode * hashNode = GetHashNode(pageInfo->page_count);
450 | struct SIndexInfo * indexInfo = (struct SIndexInfo *)RbSearch(&hashNode->hash_tree,
451 | &pageInfo->page_count, &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);
452 |
453 | RbRemove(&indexInfo->free_tree, pageInfo, &CPageMgr::FreeTreeGetRbNode);
454 | }
455 |
456 | struct CPageMgr::SPageInfo * CPageMgr::GetPageInfoByAddress(size_t address)
457 | {
458 | return (struct SPageInfo *)RbSearch(&m_cAddressTree, &address,
459 | &CPageMgr::AddressTreeGetObject, &CPageMgr::AddressTreeSearch);
460 | }
461 |
462 | struct CPageMgr::SIndexInfo * CPageMgr::GetIndexInfo(size_t pagecount)
463 | {
464 | struct SHashNode * hashNode = GetHashNode(pagecount);
465 | return (struct SIndexInfo *)RbSearch(&hashNode->hash_tree, &pagecount,
466 | &CPageMgr::HashTreeGetObject, &CPageMgr::HashTreeSearch);
467 | }
468 |
469 | struct CPageMgr::SPageInfo * CPageMgr::GetPageInfo(struct SIndexInfo * indexInfo)
470 | {
471 | rb_node * first = rb_first(&indexInfo->free_tree);
472 | return (struct SPageInfo *)FreeTreeGetObject(first);
473 | }
474 |
475 | size_t CPageMgr::GetBestFitIndex(size_t wantpages)
476 | {
477 | struct rb_node * node = m_cCountTree.rb_node;
478 | struct SCountInfo * bestCountInfo = NULL;
479 |
480 | while (node) {
481 | struct SCountInfo * countInfo = (struct SCountInfo *)CountTreeGetObject(node);
482 |
483 | if (countInfo->page_count >= wantpages) {
484 | bestCountInfo = countInfo;
485 | node = node->rb_left;
486 | } else {
487 | if (bestCountInfo != NULL) {
488 | break;
489 | }
490 | node = node->rb_right;
491 | }
492 | }
493 |
494 | return bestCountInfo->page_count;
495 | }
496 |
497 | size_t CPageMgr::GetMaxContinuePages()
498 | {
499 | struct rb_node * node = m_cCountTree.rb_node;
500 | struct rb_node * find = node;
501 |
502 | while (node) {
503 | find = node;
504 | node = node->rb_right;
505 | }
506 |
507 | if (find == NULL) {
508 | return 0;
509 | }
510 |
511 | struct SCountInfo * countInfo = (struct SCountInfo *)CountTreeGetObject(find);
512 | return countInfo->page_count;
513 | }
514 |
515 | CPageMgr::SPageInfo * CPageMgr::AllocPageInfo()
516 | {
517 | return s_pageinfo_slab.AllocNode();
518 | }
519 |
520 | CPageMgr::SIndexInfo * CPageMgr::AllocIndexInfo()
521 | {
522 | return s_indexinfo_slab.AllocNode();
523 | }
524 |
525 | CPageMgr::SCountInfo * CPageMgr::AllocCountInfo()
526 | {
527 | return s_countinfo_slab.AllocNode();
528 | }
529 |
530 | void CPageMgr::ReleasePageInfo(struct SPageInfo * &pageInfo)
531 | {
532 | s_pageinfo_slab.ReleaseNode(pageInfo);
533 | }
534 |
535 | void CPageMgr::ReleaseIndexInfo(struct SIndexInfo * &indexInfo)
536 | {
537 | s_indexinfo_slab.ReleaseNode(indexInfo);
538 | }
539 |
540 | void CPageMgr::ReleaseCountInfo(struct SCountInfo * &countInfo)
541 | {
542 | s_countinfo_slab.ReleaseNode(countInfo);
543 | }
544 |
545 | bool CPageMgr::TopDownMMap()
546 | {
547 | return (m_iFlags & (1 << E_MMAP_DIRECTION_BIT));
548 | }
549 |
550 | size_t CPageMgr::Hash(size_t pagecount)
551 | {
552 | return (pagecount) & (E_HASH_SIZE - 1);
553 | }
554 |
555 | struct CPageMgr::SHashNode * CPageMgr::GetHashNode(size_t pagecount)
556 | {
557 | size_t bucket = Hash(pagecount);
558 | return &m_cHash[bucket];
559 | }
560 |
561 | void * CPageMgr::HashTreeGetObject(void * node)
562 | {
563 | return (void *)rb_entry((rb_node *)node, struct SIndexInfo, hash_node);
564 | }
565 |
566 | rb_node * CPageMgr::HashTreeGetRbNode(void * object)
567 | {
568 | return &(((struct SIndexInfo *)object)->hash_node);
569 | }
570 |
571 | size_t CPageMgr::HashTreeSearch(const void * lhs, const void * rhs)
572 | {
573 | struct SIndexInfo * lInfo = (struct SIndexInfo *)lhs;
574 | size_t & pagecount = *(size_t *)rhs;
575 |
576 | return lInfo->page_count - pagecount;
577 | }
578 |
579 | size_t CPageMgr::HashTreeInsert(const void * lhs, const void * rhs)
580 | {
581 | struct SIndexInfo * lInfo = (struct SIndexInfo *)lhs;
582 | struct SIndexInfo * rInfo = (struct SIndexInfo *)rhs;
583 |
584 | return lInfo->page_count - rInfo->page_count;
585 | }
586 |
587 | void * CPageMgr::AddressTreeGetObject(void * node)
588 | {
589 | return (void *)rb_entry((rb_node *)node, struct SPageInfo, address_node);
590 | }
591 |
592 | rb_node * CPageMgr::AddressTreeGetRbNode(void * object)
593 | {
594 | return &(((struct SPageInfo *)object)->address_node);
595 | }
596 |
597 | size_t CPageMgr::AddressTreeSearch(const void * lhs, const void * rhs)
598 | {
599 | struct SPageInfo * lInfo = (struct SPageInfo *)lhs;
600 | size_t start = lInfo->base_address;
601 | size_t end = lInfo->base_address + (lInfo->page_count << FT_PAGE_BIT);
602 | size_t & address = *(size_t *)rhs;
603 |
604 | if (start <= address && address < end) {
605 | return 0;
606 | } else if (address >= end) {
607 | return 1;
608 | } else {
609 | return -1;
610 | }
611 | }
612 |
613 | size_t CPageMgr::AddressTreeInsert(const void * lhs, const void * rhs)
614 | {
615 | struct SPageInfo * lInfo = (struct SPageInfo *)lhs;
616 | struct SPageInfo * rInfo = (struct SPageInfo *)rhs;
617 |
618 | return lInfo->base_address - rInfo->base_address;
619 | }
620 |
621 | void * CPageMgr::CountTreeGetObject(void * node)
622 | {
623 | return (void *)rb_entry((rb_node *)node, struct SCountInfo, count_node);
624 | }
625 |
626 | rb_node * CPageMgr::CountTreeGetRbNode(void * object)
627 | {
628 | return &(((struct SCountInfo *)object)->count_node);
629 | }
630 |
631 | size_t CPageMgr::CountTreeSearch(const void * lhs, const void * rhs)
632 | {
633 | struct SCountInfo * lInfo = (struct SCountInfo *)lhs;
634 | size_t & pagecount = *(size_t *)rhs;
635 |
636 | return lInfo->page_count - pagecount;
637 | }
638 |
639 | size_t CPageMgr::CountTreeInsert(const void * lhs, const void * rhs)
640 | {
641 | struct SCountInfo * lInfo = (struct SCountInfo *)lhs;
642 | struct SCountInfo * rInfo = (struct SCountInfo *)rhs;
643 |
644 | return lInfo->page_count - rInfo->page_count;
645 | }
646 |
647 | void * CPageMgr::FreeTreeGetObject(void * node)
648 | {
649 | return (void *)rb_entry((rb_node *)node, struct SPageInfo, free_node);
650 | }
651 |
652 | rb_node * CPageMgr::FreeTreeGetRbNode(void * object)
653 | {
654 | return &(((struct SPageInfo *)object)->free_node);
655 | }
656 |
657 | size_t CPageMgr::FreeTreeSearch(const void * lhs, const void * rhs)
658 | {
659 | struct SPageInfo * lInfo = (struct SPageInfo *)lhs;
660 | struct SPageInfo * rInfo = (struct SPageInfo *)rhs;
661 |
662 | return lInfo->base_address - rInfo->base_address;
663 | }
664 |
665 | size_t CPageMgr::FreeTreeInsert(const void * lhs, const void * rhs)
666 | {
667 | struct SPageInfo * lInfo = (struct SPageInfo *)lhs;
668 | struct SPageInfo * rInfo = (struct SPageInfo *)rhs;
669 |
670 | return lInfo->base_address - rInfo->base_address;
671 | }
672 |
673 |
674 | void * CPageMgr::RbSearch(struct rb_root *root,
675 | void * object, RbGetObjectFunc getObject, RbSearchFunc search)
676 | {
677 | struct rb_node *node = root->rb_node;
678 |
679 | while (node) {
680 | void * datanode = (this->*getObject)(node);
681 | size_t result = (this->*search)(datanode, object);
682 |
683 | if (result < 0)
684 | node = node->rb_left;
685 | else if (result > 0)
686 | node = node->rb_right;
687 | else
688 | return datanode;
689 | }
690 | return NULL;
691 | }
692 |
693 | int CPageMgr::RbInsert(struct rb_root *root, void *data,
694 | RbGetObjectFunc getObject, RbGetNodeFunc getNode, RbInsertFunc compare)
695 | {
696 | struct rb_node **newnode = &(root->rb_node), *parent = NULL;
697 |
698 | /* Figure out where to put new node */
699 | while (*newnode) {
700 | void * thisnode = (this->*getObject)(*newnode);
701 | size_t result = (this->*compare)(thisnode, data);
702 |
703 | parent = *newnode;
704 | if (result < 0)
705 | newnode = &((*newnode)->rb_left);
706 | else if (result > 0)
707 | newnode = &((*newnode)->rb_right);
708 | else
709 | return 0;
710 | }
711 |
712 | /* Add new node and rebalance tree. */
713 | rb_link_node((this->*getNode)(data), parent, newnode);
714 | rb_insert_color((this->*getNode)(data), root);
715 |
716 | return 1;
717 | }
718 |
719 | void CPageMgr::RbRemove(rb_root * root, void * object,
720 | RbGetNodeFunc getNode)
721 | {
722 | rb_erase((this->*getNode)(object), root);
723 | }
724 | }
725 |
--------------------------------------------------------------------------------
/src/ft_page_mgr.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_PAGE_MGR_H__
9 | #define __FT_PAGE_MGR_H__
10 |
11 | #include "ft_malloc_util.h"
12 | #include "ft_rb_tree.h"
13 |
14 | #include
15 |
16 | namespace ftmalloc
17 | {
18 | class CPageMgr
19 | {
20 | public:
21 | struct SPageInfo
22 | {
23 | enum {
24 | E_MEM_SOURCE_OFF = 0,
25 | };
26 |
27 | enum {
28 | E_SRC_MMAP = 0,
29 | E_SRC_SBRK = 1,
30 | };
31 |
32 | void SetFlag(size_t offset)
33 | {
34 | flag |= (1 << offset);
35 | }
36 |
37 | void UnSetFlag(size_t offset)
38 | {
39 | flag &= ~(1 << offset);
40 | }
41 |
42 | bool FromBrk()
43 | {
44 | return (flag & (1 << E_MEM_SOURCE_OFF)) == E_SRC_SBRK;
45 | }
46 |
47 | size_t BeginAddress()
48 | {
49 | return base_address;
50 | }
51 |
52 | size_t EndAddress()
53 | {
54 | return base_address + (page_count << FT_PAGE_BIT);
55 | }
56 |
57 | size_t base_address;
58 | size_t page_count;
59 | size_t flag;
60 |
61 | struct rb_node address_node;
62 | struct rb_node free_node;
63 | };
64 |
65 | struct SIndexInfo
66 | {
67 | size_t page_count;
68 | struct rb_node hash_node;
69 | struct rb_root free_tree;
70 | };
71 |
72 | struct SCountInfo
73 | {
74 | size_t page_count;
75 | struct rb_node count_node;
76 | };
77 |
78 | struct SHashNode
79 | {
80 | struct rb_root hash_tree;
81 | };
82 |
83 | public:
84 | static CPageMgr & GetInstance();
85 |
86 | ~CPageMgr();
87 | void * AllocPages(size_t wantpages);
88 | void ReleasePages(void * ptr, size_t pages);
89 |
90 | private:
91 | int AllocPagesFromSys(size_t pages);
92 | int ReleasePagesToSys(void * addr, size_t pages, bool frommmap);
93 |
94 | void InsertPageInfo(void * addr, size_t pagecount, bool frommmap = false);
95 | void InsertPageInfo(struct SPageInfo * pageInfo);
96 | void InsertIndexTreeIfNeed(size_t pagecount);
97 | void InsertCountTreeIfNeed(size_t pagecount);
98 | void InsertAddressTree(struct SPageInfo * pageInfo);
99 | void InsertFreeTree(struct SPageInfo * pageInfo);
100 |
101 | void RemovePageInfo(struct SPageInfo * pageInfo);
102 | void RemoveIndexTreeIfNeed(size_t pagecount);
103 | void RemoveCountTreeIfNeed(size_t pagecount);
104 | void RemoveAddressTree(struct SPageInfo * pageInfo);
105 | void RemoveFreeTree(struct SPageInfo * pageInfo);
106 |
107 | struct SPageInfo * GetPageInfoByAddress(size_t address);
108 | struct SPageInfo * GetPageInfo(struct SIndexInfo * indexInfo);
109 | struct SIndexInfo * GetIndexInfo(size_t pagecount);
110 | size_t GetBestFitIndex(size_t wantpages);
111 | size_t GetMaxContinuePages();
112 |
113 | void ReleasePageIfNeed();
114 | void * DecPageCount(struct SPageInfo * pageInfo, size_t freesize);
115 | private:
116 | struct SPageInfo * AllocPageInfo();
117 | struct SIndexInfo * AllocIndexInfo();
118 | struct SCountInfo * AllocCountInfo();
119 |
120 | void ReleasePageInfo(struct SPageInfo * &pageInfo);
121 | void ReleaseIndexInfo(struct SIndexInfo * &indexInfo);
122 | void ReleaseCountInfo(struct SCountInfo * &countInfo);
123 |
124 | bool TopDownMMap();
125 | size_t Hash(size_t pagecount);
126 | struct SHashNode * GetHashNode(size_t pagecount);
127 |
128 | private:
129 | typedef void * (CPageMgr::*RbGetObjectFunc)(void * node);
130 | typedef rb_node * (CPageMgr::*RbGetNodeFunc)(void * object);
131 | typedef size_t (CPageMgr::*RbSearchFunc)(const void * lhs, const void * rhs);
132 | typedef size_t (CPageMgr::*RbInsertFunc)(const void * lhs, const void * rhs);
133 |
134 | private:
135 | void * RbSearch(struct rb_root *root, void * object,
136 | RbGetObjectFunc getObject, RbSearchFunc search);
137 | int RbInsert(struct rb_root *root, void *data,
138 | RbGetObjectFunc getObject, RbGetNodeFunc getNode, RbInsertFunc compare);
139 | void RbRemove(rb_root * root, void * object, RbGetNodeFunc getNode);
140 |
141 | private:
142 | void * HashTreeGetObject(void * node);
143 | rb_node * HashTreeGetRbNode(void * object);
144 | size_t HashTreeSearch(const void * lhs, const void * rhs);
145 | size_t HashTreeInsert(const void * lhs, const void * rhs);
146 |
147 | private:
148 | void * AddressTreeGetObject(void * node);
149 | rb_node * AddressTreeGetRbNode(void * object);
150 | size_t AddressTreeSearch(const void * lhs, const void * rhs);
151 | size_t AddressTreeInsert(const void * lhs, const void * rhs);
152 |
153 | private:
154 | void * CountTreeGetObject(void * node);
155 | rb_node * CountTreeGetRbNode(void * object);
156 | size_t CountTreeSearch(const void * lhs, const void * rhs);
157 | size_t CountTreeInsert(const void * lhs, const void * rhs);
158 |
159 | private:
160 | void * FreeTreeGetObject(void * node);
161 | rb_node * FreeTreeGetRbNode(void * object);
162 | size_t FreeTreeSearch(const void * lhs, const void * rhs);
163 | size_t FreeTreeInsert(const void * lhs, const void * rhs);
164 | private:
165 | CPageMgr();
166 | CPageMgr(const CPageMgr &);
167 | CPageMgr & operator=(const CPageMgr &);
168 |
169 | static CPageMgr sInstance;
170 |
171 | private:
172 | enum {
173 | E_HASH_SIZE = 4096,
174 |
175 | E_MMAP_DIRECTION_BIT = 0,
176 | };
177 | struct SHashNode m_cHash[E_HASH_SIZE];
178 | struct rb_root m_cAddressTree;
179 | struct rb_root m_cCountTree;
180 |
181 | size_t m_llTopBrkAddress;
182 |
183 | size_t m_iAddressTreeSize;
184 | size_t m_iMaxContinuePages;
185 | size_t m_iFreePages;
186 | size_t m_iSbrkPages;
187 | size_t m_iMmapPages;
188 | size_t m_iFlags;
189 | };
190 | }
191 |
192 | #endif
193 |
--------------------------------------------------------------------------------
/src/ft_rb_tree.cpp:
--------------------------------------------------------------------------------
1 | #include "ft_rb_tree.h"
2 |
3 | namespace ftmalloc
4 | {
5 | static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
6 | {
7 | struct rb_node *right = node->rb_right;
8 | struct rb_node *parent = rb_parent(node);
9 |
10 | if ((node->rb_right = right->rb_left))
11 | rb_set_parent(right->rb_left, node);
12 | right->rb_left = node;
13 |
14 | rb_set_parent(right, parent);
15 |
16 | if (parent)
17 | {
18 | if (node == parent->rb_left)
19 | parent->rb_left = right;
20 | else
21 | parent->rb_right = right;
22 | }
23 | else
24 | root->rb_node = right;
25 | rb_set_parent(node, right);
26 | }
27 |
28 |
29 | static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
30 | {
31 | struct rb_node *left = node->rb_left;
32 | struct rb_node *parent = rb_parent(node);
33 |
34 | if ((node->rb_left = left->rb_right))
35 | rb_set_parent(left->rb_right, node);
36 | left->rb_right = node;
37 |
38 | rb_set_parent(left, parent);
39 |
40 | if (parent)
41 | {
42 | if (node == parent->rb_right)
43 | parent->rb_right = left;
44 | else
45 | parent->rb_left = left;
46 | }
47 | else
48 | root->rb_node = left;
49 | rb_set_parent(node, left);
50 | }
51 |
52 | void rb_insert_color(struct rb_node *node, struct rb_root *root)
53 | {
54 | struct rb_node *parent, *gparent;
55 |
56 | while ((parent = rb_parent(node)) && rb_is_red(parent))
57 | {
58 | gparent = rb_parent(parent);
59 |
60 | if (parent == gparent->rb_left)
61 | {
62 | {
63 | register struct rb_node *uncle = gparent->rb_right;
64 | if (uncle && rb_is_red(uncle))
65 | {
66 | rb_set_black(uncle);
67 | rb_set_black(parent);
68 | rb_set_red(gparent);
69 | node = gparent;
70 | continue;
71 | }
72 | }
73 |
74 | if (parent->rb_right == node)
75 | {
76 | register struct rb_node *tmp;
77 | __rb_rotate_left(parent, root);
78 | tmp = parent;
79 | parent = node;
80 | node = tmp;
81 | }
82 |
83 | rb_set_black(parent);
84 | rb_set_red(gparent);
85 | __rb_rotate_right(gparent, root);
86 | }
87 | else {
88 | {
89 | register struct rb_node *uncle = gparent->rb_left;
90 | if (uncle && rb_is_red(uncle))
91 | {
92 | rb_set_black(uncle);
93 | rb_set_black(parent);
94 | rb_set_red(gparent);
95 | node = gparent;
96 | continue;
97 | }
98 | }
99 |
100 | if (parent->rb_left == node)
101 | {
102 | register struct rb_node *tmp;
103 | __rb_rotate_right(parent, root);
104 | tmp = parent;
105 | parent = node;
106 | node = tmp;
107 | }
108 |
109 | rb_set_black(parent);
110 | rb_set_red(gparent);
111 | __rb_rotate_left(gparent, root);
112 | }
113 | }
114 |
115 | rb_set_black(root->rb_node);
116 | }
117 |
118 | static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
119 | struct rb_root *root)
120 | {
121 | struct rb_node *other;
122 |
123 | while ((!node || rb_is_black(node)) && node != root->rb_node)
124 | {
125 | if (parent->rb_left == node)
126 | {
127 | other = parent->rb_right;
128 | if (rb_is_red(other))
129 | {
130 | rb_set_black(other);
131 | rb_set_red(parent);
132 | __rb_rotate_left(parent, root);
133 | other = parent->rb_right;
134 | }
135 | if ((!other->rb_left || rb_is_black(other->rb_left)) &&
136 | (!other->rb_right || rb_is_black(other->rb_right)))
137 | {
138 | rb_set_red(other);
139 | node = parent;
140 | parent = rb_parent(node);
141 | }
142 | else
143 | {
144 | if (!other->rb_right || rb_is_black(other->rb_right))
145 | {
146 | rb_set_black(other->rb_left);
147 | rb_set_red(other);
148 | __rb_rotate_right(other, root);
149 | other = parent->rb_right;
150 | }
151 | rb_set_color(other, rb_color(parent));
152 | rb_set_black(parent);
153 | rb_set_black(other->rb_right);
154 | __rb_rotate_left(parent, root);
155 | node = root->rb_node;
156 | break;
157 | }
158 | }
159 | else
160 | {
161 | other = parent->rb_left;
162 | if (rb_is_red(other))
163 | {
164 | rb_set_black(other);
165 | rb_set_red(parent);
166 | __rb_rotate_right(parent, root);
167 | other = parent->rb_left;
168 | }
169 | if ((!other->rb_left || rb_is_black(other->rb_left)) &&
170 | (!other->rb_right || rb_is_black(other->rb_right)))
171 | {
172 | rb_set_red(other);
173 | node = parent;
174 | parent = rb_parent(node);
175 | }
176 | else
177 | {
178 | if (!other->rb_left || rb_is_black(other->rb_left))
179 | {
180 | rb_set_black(other->rb_right);
181 | rb_set_red(other);
182 | __rb_rotate_left(other, root);
183 | other = parent->rb_left;
184 | }
185 | rb_set_color(other, rb_color(parent));
186 | rb_set_black(parent);
187 | rb_set_black(other->rb_left);
188 | __rb_rotate_right(parent, root);
189 | node = root->rb_node;
190 | break;
191 | }
192 | }
193 | }
194 | if (node)
195 | rb_set_black(node);
196 | }
197 |
198 | void rb_erase(struct rb_node *node, struct rb_root *root)
199 | {
200 | struct rb_node *child, *parent;
201 | int color;
202 |
203 | if (!node->rb_left)
204 | child = node->rb_right;
205 | else if (!node->rb_right)
206 | child = node->rb_left;
207 | else
208 | {
209 | struct rb_node *old = node, *left;
210 |
211 | node = node->rb_right;
212 | while ((left = node->rb_left) != NULL)
213 | node = left;
214 |
215 | if (rb_parent(old)) {
216 | if (rb_parent(old)->rb_left == old)
217 | rb_parent(old)->rb_left = node;
218 | else
219 | rb_parent(old)->rb_right = node;
220 | }
221 | else
222 | root->rb_node = node;
223 |
224 | child = node->rb_right;
225 | parent = rb_parent(node);
226 | color = rb_color(node);
227 |
228 | if (parent == old) {
229 | parent = node;
230 | }
231 | else {
232 | if (child)
233 | rb_set_parent(child, parent);
234 | parent->rb_left = child;
235 |
236 | node->rb_right = old->rb_right;
237 | rb_set_parent(old->rb_right, node);
238 | }
239 |
240 | node->rb_parent_color = old->rb_parent_color;
241 | node->rb_left = old->rb_left;
242 | rb_set_parent(old->rb_left, node);
243 |
244 | goto color;
245 | }
246 |
247 | parent = rb_parent(node);
248 | color = rb_color(node);
249 |
250 | if (child)
251 | rb_set_parent(child, parent);
252 | if (parent)
253 | {
254 | if (parent->rb_left == node)
255 | parent->rb_left = child;
256 | else
257 | parent->rb_right = child;
258 | }
259 | else
260 | root->rb_node = child;
261 |
262 | color:
263 | if (color == RB_BLACK)
264 | __rb_erase_color(child, parent, root);
265 | }
266 |
267 | /*
268 | * This function returns the first node (in sort order) of the tree.
269 | */
270 | struct rb_node *rb_first(const struct rb_root *root)
271 | {
272 | struct rb_node *n;
273 |
274 | n = root->rb_node;
275 | if (!n)
276 | return NULL;
277 | while (n->rb_left)
278 | n = n->rb_left;
279 | return n;
280 | }
281 |
282 | struct rb_node *rb_last(const struct rb_root *root)
283 | {
284 | struct rb_node *n;
285 |
286 | n = root->rb_node;
287 | if (!n)
288 | return NULL;
289 | while (n->rb_right)
290 | n = n->rb_right;
291 | return n;
292 | }
293 |
294 | struct rb_node *rb_next(const struct rb_node *node)
295 | {
296 | struct rb_node *parent;
297 |
298 | if (rb_parent(node) == node)
299 | return NULL;
300 |
301 | /* If we have a right-hand child, go down and then left as far
302 | as we can. */
303 | if (node->rb_right) {
304 | node = node->rb_right;
305 | while (node->rb_left)
306 | node = node->rb_left;
307 | return (struct rb_node *)node;
308 | }
309 |
310 | /* No right-hand children. Everything down and left is
311 | smaller than us, so any 'next' node must be in the general
312 | direction of our parent. Go up the tree; any time the
313 | ancestor is a right-hand child of its parent, keep going
314 | up. First time it's a left-hand child of its parent, said
315 | parent is our 'next' node. */
316 | while ((parent = rb_parent(node)) && node == parent->rb_right)
317 | node = parent;
318 |
319 | return parent;
320 | }
321 |
322 | struct rb_node *rb_prev(const struct rb_node *node)
323 | {
324 | struct rb_node *parent;
325 |
326 | if (rb_parent(node) == node)
327 | return NULL;
328 |
329 | /* If we have a left-hand child, go down and then right as far
330 | as we can. */
331 | if (node->rb_left) {
332 | node = node->rb_left;
333 | while (node->rb_right)
334 | node = node->rb_right;
335 | return (struct rb_node *)node;
336 | }
337 |
338 | /* No left-hand children. Go up till we find an ancestor which
339 | is a right-hand child of its parent */
340 | while ((parent = rb_parent(node)) && node == parent->rb_left)
341 | node = parent;
342 |
343 | return parent;
344 | }
345 |
346 | void rb_replace_node(struct rb_node *victim, struct rb_node *newnode, struct rb_root *root)
347 | {
348 | struct rb_node *parent = rb_parent(victim);
349 |
350 | /* Set the surrounding nodes to point to the replacement */
351 | if (parent) {
352 | if (victim == parent->rb_left)
353 | parent->rb_left = newnode;
354 | else
355 | parent->rb_right = newnode;
356 | }
357 | else {
358 | root->rb_node = newnode;
359 | }
360 | if (victim->rb_left)
361 | rb_set_parent(victim->rb_left, newnode);
362 | if (victim->rb_right)
363 | rb_set_parent(victim->rb_right, newnode);
364 |
365 | /* Copy the pointers/colour from the victim to the replacement */
366 | *newnode = *victim;
367 | }
368 | }
--------------------------------------------------------------------------------
/src/ft_rb_tree.h:
--------------------------------------------------------------------------------
1 | #ifndef __FT_RB_TREE_H__
2 | #define __FT_RB_TREE_H__
3 |
4 | #include
5 |
6 | namespace ftmalloc
7 | {
8 | #ifdef _MSC_VER
9 | #define container_of(ptr, type, member) ((type *)((char *)ptr - offsetof(type, member)))
10 | #else
11 | #define container_of(ptr, type, member) ({ \
12 | const typeof( ((type *)0)->member ) *__mptr = (ptr); \
13 | (type *)( (char *)__mptr - offsetof(type,member) );})
14 | #endif
15 |
16 | #if defined(offsetof)
17 | #undef offsetof
18 | #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
19 | #endif
20 |
21 | #define RB_RED 0
22 | #define RB_BLACK 1
23 |
24 | #ifdef _MSC_VER
25 | #pragma pack(push)
26 | #pragma pack(8)
27 | struct rb_node {
28 | size_t rb_parent_color;
29 | struct rb_node *rb_right;
30 | struct rb_node *rb_left;
31 | };
32 | #pragma pack(pop)
33 | #else
34 | struct rb_node {
35 | size_t rb_parent_color;
36 | struct rb_node *rb_right;
37 | struct rb_node *rb_left;
38 | } __attribute__((aligned(sizeof(long))));
39 | /* The alignment might seem pointless, but allegedly CRIS needs it */
40 | #endif
41 |
42 | struct rb_root {
43 | struct rb_node * rb_node;
44 | };
45 |
46 | #define rb_parent(r) ((struct rb_node *)((r)->rb_parent_color & ~3))
47 | #define rb_color(r) ((r)->rb_parent_color & 1)
48 | #define rb_is_red(r) (!rb_color(r))
49 | #define rb_is_black(r) rb_color(r)
50 | #define rb_set_red(r) do { (r)->rb_parent_color &= ~1; } while (0)
51 | #define rb_set_black(r) do { (r)->rb_parent_color |= 1; } while (0)
52 |
53 | static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
54 | {
55 | rb->rb_parent_color = (rb->rb_parent_color & 3) | (size_t)p;
56 | }
57 | static inline void rb_set_color(struct rb_node *rb, int color)
58 | {
59 | rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
60 | }
61 |
62 | #define RB_ROOT (struct rb_root) { NULL, }
63 | #define rb_entry(ptr, type, member) container_of(ptr, type, member)
64 |
65 |
66 | #define RB_ROOT_INIT(root) do { root.rb_node = NULL;} while (0)
67 | #define RB_NODE_INIT(node) do { node.rb_left = node.rb_right = NULL; node.rb_parent_color = 0;} while (0)
68 |
69 | #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
70 | #define RB_EMPTY_NODE(node) (rb_parent(node) == node)
71 | #define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
72 |
73 | extern void rb_insert_color(struct rb_node *, struct rb_root *);
74 | extern void rb_erase(struct rb_node *, struct rb_root *);
75 |
76 | /* Find logical next and previous nodes in a tree */
77 | extern struct rb_node *rb_next(const struct rb_node *);
78 | extern struct rb_node *rb_prev(const struct rb_node *);
79 | extern struct rb_node *rb_first(const struct rb_root *);
80 | extern struct rb_node *rb_last(const struct rb_root *);
81 |
82 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */
83 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *newnode, struct rb_root *root);
84 |
85 | static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, struct rb_node ** rb_link)
86 | {
87 | node->rb_parent_color = (size_t)parent;
88 | node->rb_left = node->rb_right = NULL;
89 |
90 | *rb_link = node;
91 | }
92 | }
93 | #endif //__RB_TREE_H__
94 |
--------------------------------------------------------------------------------
/src/ft_sbrk_page_allocator.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_sbrk_page_allocator.h"
9 | #include "ft_malloc_util.h"
10 | #include "ft_malloc_log.h"
11 |
12 | #include
13 | #include
14 | #include
15 |
16 | namespace ftmalloc
17 | {
18 | CSbrkPageAllocator s_sbrk_page_allocator;
19 |
20 | CSbrkPageAllocator::CSbrkPageAllocator()
21 | : m_bAlign(false)
22 | {
23 | }
24 |
25 | CSbrkPageAllocator::~CSbrkPageAllocator()
26 | {
27 | }
28 |
29 | void * CSbrkPageAllocator::SysAlloc(size_t size)
30 | {
31 | if (!m_bAlign) {
32 | void * paddr = sbrk(0);
33 | size_t addr = (size_t)paddr;
34 | FT_LOG(FT_INFO, "algin, addr:%p", paddr);
35 |
36 | if (addr & ((1 << FT_PAGE_BIT) - 1)) {
37 | addr = addr + (1 << FT_PAGE_BIT);
38 | addr = addr & (~((1 << FT_PAGE_BIT) - 1));
39 |
40 | size_t align_size = addr - (size_t)paddr;
41 | sbrk(align_size);
42 | FT_LOG(FT_INFO, "after algin, addr:%p, alignsize:%zd, newaddr:%p", paddr, align_size, sbrk(0));
43 | }
44 | m_bAlign = true;
45 | }
46 |
47 | void * addr = sbrk(size);
48 | if (addr == NULL) {
49 | FT_LOG(FT_ERROR, "sbrk failed, errno:%d, %s", errno, strerror(errno));
50 | }
51 | FT_LOG(FT_DEBUG, "sbrk, size:%zd, addr:%p", size, addr);
52 |
53 | return addr;
54 | }
55 |
56 | void CSbrkPageAllocator::SysRelease(void * ptr, size_t size)
57 | {
58 | FT_LOG(FT_DEBUG, "brk size:%zd, addr:%p", size, ptr);
59 | if (brk(ptr)) {
60 | FT_LOG(FT_ERROR, "brk failed, errno:%d, %s", errno, strerror(errno));
61 | }
62 | }
63 | }
--------------------------------------------------------------------------------
/src/ft_sbrk_page_allocator.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_SBRK_PAGE_ALLOCATOR_H__
9 | #define __FT_SBRK_PAGE_ALLOCATOR_H__
10 |
11 | #include "ft_sys_alloc_intf.h"
12 |
13 | namespace ftmalloc
14 | {
15 | class CSbrkPageAllocator : public ISysAlloc
16 | {
17 | public:
18 | CSbrkPageAllocator();
19 | virtual ~CSbrkPageAllocator();
20 |
21 | virtual void * SysAlloc(size_t size);
22 | virtual void SysRelease(void * ptr, size_t size);
23 |
24 | private:
25 | bool m_bAlign;
26 | };
27 | }
28 | #endif
--------------------------------------------------------------------------------
/src/ft_sizemap.cpp:
--------------------------------------------------------------------------------
1 | #include "ft_sizemap.h"
2 | #include "ft_malloc_log.h"
3 |
4 | namespace ftmalloc
5 | {
6 | size_t CSizeMap::FLAGS_tcmalloc_transfer_num_objects = kPageSize >> 3;
7 |
8 | // Note: the following only works for "n"s that fit in 32-bits, but
9 | // that is fine since we only use it for small sizes.
10 | static inline int LgFloor(size_t n) {
11 | int log = 0;
12 | for (int i = 4; i >= 0; --i) {
13 | int shift = (1 << i);
14 | size_t x = n >> shift;
15 | if (x != 0) {
16 | n = x;
17 | log += shift;
18 | }
19 | }
20 | //ASSERT(n == 1);
21 | return log;
22 | }
23 |
24 | int AlignmentForSize(size_t size) {
25 | int alignment = kAlignment;
26 | if (size > kMaxSize) {
27 | // Cap alignment at kPageSize for large sizes.
28 | alignment = kPageSize;
29 | }
30 | else if (size >= 128) {
31 | // Space wasted due to alignment is at most 1/8, i.e., 12.5%.
32 | alignment = (1 << LgFloor(size)) / 8;
33 | }
34 | else if (size >= kMinAlign) {
35 | // We need an alignment of at least 16 bytes to satisfy
36 | // requirements for some SSE types.
37 | alignment = kMinAlign;
38 | }
39 | // Maximum alignment allowed is page size alignment.
40 | if (alignment > kPageSize) {
41 | alignment = kPageSize;
42 | }
43 | //CHECK_CONDITION(size < kMinAlign || alignment >= kMinAlign);
44 | //CHECK_CONDITION((alignment & (alignment - 1)) == 0);
45 | return alignment;
46 | }
47 |
48 | CSizeMap & CSizeMap::GetInstance()
49 | {
50 | static CSizeMap sInstance;
51 | static bool bInit = false;
52 | if (!bInit) {
53 | bInit = true;
54 | sInstance.Init();
55 | sInstance.print();
56 | }
57 |
58 | return sInstance;
59 | }
60 |
61 | // Initialize the mapping arrays
62 | void CSizeMap::Init() {
63 | //InitTCMallocTransferNumObjects();
64 |
65 | // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
66 | if (ClassIndex(0) != 0) {
67 | }
68 | if (ClassIndex(kMaxSize) >= sizeof(class_array_)) {
69 | }
70 |
71 | // Compute the size classes we want to use
72 | int sc = 1; // Next size class to assign
73 | int alignment = kAlignment;
74 | //CHECK_CONDITION(kAlignment <= kMinAlign);
75 | for (size_t size = kAlignment; size <= kMaxSize; size += alignment) {
76 | alignment = AlignmentForSize(size);
77 | //CHECK_CONDITION((size % alignment) == 0);
78 |
79 | int blocks_to_move = NumMoveSize(size) / 4;
80 | size_t psize = 0;
81 | do {
82 | psize += kPageSize;
83 | // Allocate enough pages so leftover is less than 1/8 of total.
84 | // This bounds wasted space to at most 12.5%.
85 | while ((psize % size) > (psize >> 3)) {
86 | psize += kPageSize;
87 | }
88 | // Continue to add pages until there are at least as many objects in
89 | // the span as are needed when moving objects from the central
90 | // freelists and spans to the thread caches.
91 | } while ((psize / size) < (blocks_to_move));
92 | const size_t my_pages = psize >> kPageShift;
93 |
94 | if (sc > 1 && my_pages == class_to_pages_[sc - 1]) {
95 | // See if we can merge this into the previous class without
96 | // increasing the fragmentation of the previous class.
97 | const size_t my_objects = (my_pages << kPageShift) / size;
98 | const size_t prev_objects = (class_to_pages_[sc - 1] << kPageShift)
99 | / class_to_size_[sc - 1];
100 | if (my_objects == prev_objects) {
101 | // Adjust last class to include this size
102 | class_to_size_[sc - 1] = size;
103 | continue;
104 | }
105 | }
106 |
107 | // Add new class
108 | class_to_pages_[sc] = my_pages;
109 | class_to_size_[sc] = size;
110 | sc++;
111 | }
112 | if (sc != kNumClasses) {
113 | }
114 |
115 | // Initialize the mapping arrays
116 | int next_size = 0;
117 | for (int c = 1; c < kNumClasses; c++) {
118 | const int max_size_in_class = class_to_size_[c];
119 | for (int s = next_size; s <= max_size_in_class; s += kAlignment) {
120 | class_array_[ClassIndex(s)] = c;
121 | }
122 | next_size = max_size_in_class + kAlignment;
123 | }
124 |
125 | // Double-check sizes just to be safe
126 | for (size_t size = 0; size <= kMaxSize;) {
127 | const int sc = SizeClass(size);
128 | if (sc <= 0 || sc >= kNumClasses) {
129 | }
130 | if (sc > 1 && size <= class_to_size_[sc - 1]) {
131 | }
132 | const size_t s = class_to_size_[sc];
133 | if (size > s || s == 0) {
134 | }
135 | if (size <= kMaxSmallSize) {
136 | size += 8;
137 | }
138 | else {
139 | size += 128;
140 | }
141 | }
142 |
143 | // Initialize the num_objects_to_move array.
144 | for (size_t cl = 1; cl < kNumClasses; ++cl) {
145 | num_objects_to_move_[cl] = class_to_pages_[cl] * kPageSize / class_to_size_[cl];
146 | //num_objects_to_move_[cl] = NumMoveSize(ByteSizeForClass(cl));
147 | }
148 | }
149 |
150 | int CSizeMap::NumMoveSize(size_t size) {
151 | if (size == 0) return 0;
152 | // Use approx 64k transfers between thread and central caches.
153 | int num = static_cast(64.0 * 1024.0 / size);
154 | if (num < 2) num = 2;
155 |
156 | // Avoid bringing too many objects into small object free lists.
157 | // If this value is too large:
158 | // - We waste memory with extra objects sitting in the thread caches.
159 | // - The central freelist holds its lock for too long while
160 | // building a linked list of objects, slowing down the allocations
161 | // of other threads.
162 | // If this value is too small:
163 | // - We go to the central freelist too often and we have to acquire
164 | // its lock each time.
165 | // This value strikes a balance between the constraints above.
166 |
167 | #if 1
168 | if (num > FLAGS_tcmalloc_transfer_num_objects)
169 | num = FLAGS_tcmalloc_transfer_num_objects;
170 | #else
171 |
172 | #endif
173 | return num;
174 | }
175 |
176 | void CSizeMap::print()
177 | {
178 | size_t length = sizeof(class_to_size_) / sizeof(class_to_size_[0]);
179 | for (size_t i = 0; i < length; i++) {
180 | FT_LOG(FT_DEBUG, "class_to_size_[%zd] = %zd, class_to_pages_[%zd] = %zd, number:%d",
181 | i, class_to_size_[i], i, class_to_pages_[i], num_objects_to_move_[i]);
182 | }
183 | }
184 | }
185 |
--------------------------------------------------------------------------------
/src/ft_sizemap.h:
--------------------------------------------------------------------------------
1 | #ifndef __FT_SIZEMAP_H__
2 | #define __FT_SIZEMAP_H__
3 |
4 | #include
5 | #include
6 |
7 | namespace ftmalloc
8 | {
9 | #if 1//defined(TCMALLOC_ALIGN_8BYTES)
10 | // Unless we force to use 8 bytes alignment we use an alignment of
11 | // at least 16 bytes to statisfy requirements for some SSE types.
12 | // Keep in mind when using the 16 bytes alignment you can have a space
13 | // waste due alignment of 25%. (eg malloc of 24 bytes will get 32 bytes)
14 | static const size_t kMinAlign = 8;
15 | // Number of classes created until reach page size 128.
16 | static const size_t kBaseClasses = 16;
17 | #else
18 | static const size_t kMinAlign = 16;
19 | static const size_t kBaseClasses = 9;
20 | #endif
21 |
22 | static const size_t kPageShift = 16;
23 | static const size_t kNumClasses = kBaseClasses + 73;
24 |
25 | static const size_t kMaxThreadCacheSize = 4 << 20;
26 |
27 | static const size_t kPageSize = 1 << kPageShift;
28 | static const size_t kMaxSize = 256 * 1024;
29 | static const size_t kAlignment = 8;
30 | static const size_t kLargeSizeClass = 0;
31 |
32 | int AlignmentForSize(size_t size);
33 |
34 | // Size-class information + mapping
35 | class CSizeMap
36 | {
37 | private:
38 | // Number of objects to move between a per-thread list and a central
39 | // list in one shot. We want this to be not too small so we can
40 | // amortize the lock overhead for accessing the central list. Making
41 | // it too big may temporarily cause unnecessary memory wastage in the
42 | // per-thread free list until the scavenger cleans up the list.
43 | int num_objects_to_move_[kNumClasses];
44 |
45 | //-------------------------------------------------------------------
46 | // Mapping from size to size_class and vice versa
47 | //-------------------------------------------------------------------
48 |
49 | // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
50 | // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
51 | // So for these larger sizes we have an array indexed by ceil(size/128).
52 | //
53 | // We flatten both logical arrays into one physical array and use
54 | // arithmetic to compute an appropriate index. The constants used by
55 | // ClassIndex() were selected to make the flattening work.
56 | //
57 | // Examples:
58 | // Size Expression Index
59 | // -------------------------------------------------------
60 | // 0 (0 + 7) / 8 0
61 | // 1 (1 + 7) / 8 1
62 | // ...
63 | // 1024 (1024 + 7) / 8 128
64 | // 1025 (1025 + 127 + (120<<7)) / 128 129
65 | // ...
66 | // 32768 (32768 + 127 + (120<<7)) / 128 376
67 | static const int kMaxSmallSize = 1024;
68 | static const size_t kClassArraySize =
69 | ((kMaxSize + 127 + (120 << 7)) >> 7) + 1;
70 | unsigned char class_array_[kClassArraySize];
71 |
72 | // Compute index of the class_array[] entry for a given size
73 | static inline size_t ClassIndex(int s) {
74 | // Use unsigned arithmetic to avoid unnecessary sign extensions.
75 | //ASSERT(0 <= s);
76 | //ASSERT(s <= kMaxSize);
77 | //if (LIKELY(s <= kMaxSmallSize)) {
78 | if ((s <= kMaxSmallSize)) {
79 | return (static_cast(s) + 7) >> 3;
80 | }
81 | else {
82 | return (static_cast(s) + 127 + (120 << 7)) >> 7;
83 | }
84 | }
85 |
86 | int NumMoveSize(size_t size);
87 |
88 | // Mapping from size class to max size storable in that class
89 | size_t class_to_size_[kNumClasses];
90 |
91 | // Mapping from size class to number of pages to allocate at a time
92 | size_t class_to_pages_[kNumClasses];
93 |
94 | static size_t FLAGS_tcmalloc_transfer_num_objects;
95 |
96 | public:
97 | // Constructor should do nothing since we rely on explicit Init()
98 | // call, which may or may not be called before the constructor runs.
99 | CSizeMap() { }
100 |
101 | static CSizeMap & GetInstance();
102 |
103 | // Initialize the mapping arrays
104 | void Init();
105 |
106 | inline int SizeClass(int size) {
107 | return class_array_[ClassIndex(size)];
108 | }
109 |
110 | // Get the byte-size for a specified class
111 | inline size_t ByteSizeForClass(size_t cl) {
112 | return class_to_size_[cl];
113 | }
114 |
115 | // Mapping from size class to max size storable in that class
116 | inline size_t class_to_size(size_t cl) {
117 | return class_to_size_[cl];
118 | }
119 |
120 | // Mapping from size class to number of pages to allocate at a time
121 | inline size_t class_to_pages(size_t cl) {
122 | return class_to_pages_[cl];
123 | }
124 |
125 | // Number of objects to move between a per-thread list and a central
126 | // list in one shot. We want this to be not too small so we can
127 | // amortize the lock overhead for accessing the central list. Making
128 | // it too big may temporarily cause unnecessary memory wastage in the
129 | // per-thread free list until the scavenger cleans up the list.
130 | inline int num_objects_to_move(size_t cl) {
131 | return num_objects_to_move_[cl];
132 | }
133 |
134 | void print();
135 | };
136 | }
137 |
138 | #endif //__SIZEMAP_H__
--------------------------------------------------------------------------------
/src/ft_sys_alloc_intf.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_SYS_ALLOC_INTF_H__
9 | #define __FT_SYS_ALLOC_INTF_H__
10 |
11 | #include
12 |
13 | namespace ftmalloc
14 | {
15 | class ISysAlloc
16 | {
17 | public:
18 | virtual ~ISysAlloc() {}
19 |
20 | virtual void * SysAlloc(size_t size) = 0;
21 | virtual void SysRelease(void * ptr, size_t size) = 0;
22 | };
23 | }
24 |
25 | #endif
26 |
--------------------------------------------------------------------------------
/src/ft_thread_cache.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ft_thread_cache.h"
9 | #include "ft_mem_alloc_intf.h"
10 | #include "ft_malloc_log.h"
11 | #include "ft_malloc_util.h"
12 |
13 | namespace ftmalloc
14 | {
15 | static void TLSDestructor(void * object)
16 | {
17 | IMemAlloc * pAllcator = static_cast(object);
18 | FT_LOG(FT_INFO, "obj:%p", object);
19 |
20 | if (pAllcator != NULL) {
21 | IMemAlloc::DestroyMemAllocator(pAllcator);
22 | }
23 | FT_LOG(FT_INFO, "obj:%p, end!", object);
24 | }
25 |
26 | CThreadCache CThreadCache::sInstance;
27 | pthread_mutex_t CThreadCache::sMutex = PTHREAD_MUTEX_INITIALIZER;
28 |
29 | CThreadCache & CThreadCache::GetInstance()
30 | {
31 | return sInstance;
32 | }
33 |
34 | CThreadCache::CThreadCache()
35 | : m_cKey()
36 | {
37 | FT_LOG(FT_DEBUG, "create CThreadCache");
38 | pthread_key_create(&m_cKey, TLSDestructor);
39 | }
40 |
41 | CThreadCache::~CThreadCache()
42 | {
43 | FT_LOG(FT_DEBUG, "destroy CThreadCache");
44 | }
45 |
46 | IMemAlloc * CThreadCache::GetAllocator()
47 | {
48 | IMemAlloc * pAllocator = static_cast(pthread_getspecific(m_cKey));
49 | if(pAllocator == NULL) {
50 | pAllocator = IMemAlloc::CreateMemAllocator();
51 | FT_LOG(FT_INFO, "object:%p", pAllocator);
52 | pthread_setspecific(m_cKey, static_cast(pAllocator));
53 | }
54 | return pAllocator;
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/src/ft_thread_cache.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | **/
7 |
8 | #ifndef __FT_THREAD_CACHE_H__
9 | #define __FT_THREAD_CACHE_H__
10 |
11 | #include
12 |
13 | namespace ftmalloc
14 | {
15 | class IMemAlloc;
16 |
17 | class CThreadCache
18 | {
19 | public:
20 | ~CThreadCache();
21 |
22 | static CThreadCache & GetInstance();
23 |
24 | IMemAlloc * GetAllocator();
25 |
26 | private:
27 | CThreadCache();
28 | CThreadCache(const CThreadCache &);
29 | CThreadCache & operator=(const CThreadCache &);
30 |
31 | static CThreadCache sInstance;
32 | static pthread_mutex_t sMutex;
33 |
34 | pthread_key_t m_cKey;
35 | };
36 | }
37 |
38 | #endif
39 |
--------------------------------------------------------------------------------
/src/ftmalloc.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include "ftmalloc.h"
9 | #include "ft_malloc_intf.h"
10 |
11 | #include "ft_malloc_alias.h"
12 |
13 | extern "C"
14 | {
15 | const char * ft_version(int* major, int* minor, const char** patch) __THROW
16 | {
17 | if (major) *major = HK_VERSION_MAJOR;
18 | if (minor) *minor = HK_VERSION_MINOR;
19 | if (patch) *patch = HK_VERSION_PATCH;
20 |
21 | return HK_VERSION_STRING;
22 | }
23 |
24 | void * ft_malloc(size_t size) __THROW
25 | {
26 | void * result = ftmalloc::__Malloc(size);
27 | return result;
28 | }
29 |
30 | void * ft_malloc_skip_new_handler(size_t size) __THROW
31 | {
32 | return NULL;
33 | }
34 |
35 | void ft_free(void* ptr) __THROW
36 | {
37 | /*
38 | * I found that call pthread_join, will free TCB after tls_decontructor.
39 | * It will recreate tls-allocator again.
40 | */
41 | if (ptr == NULL) {
42 | return;
43 | }
44 |
45 | ftmalloc::__Free(ptr);
46 | }
47 |
48 | void * ft_realloc(void* ptr, size_t size) __THROW
49 | {
50 | void * result = ftmalloc::__ReAlloc(ptr, size);
51 | return result;
52 | }
53 |
54 | void * ft_calloc(size_t nmemb, size_t size) __THROW
55 | {
56 | void * result = ftmalloc::__Calloc(nmemb, size);
57 | return result;
58 | }
59 |
60 | void ft_cfree(void* ptr) __THROW
61 | {
62 | ftmalloc::__Free(ptr);
63 | }
64 |
65 | void * ft_memalign(size_t __alignment, size_t __size) __THROW
66 | {
67 | }
68 |
69 | int ft_posix_memalign(void** ptr, size_t align, size_t size) __THROW
70 | {
71 | }
72 |
73 | void * ft_valloc(size_t __size) __THROW
74 | {
75 | }
76 |
77 | void * ft_pvalloc(size_t __size) __THROW
78 | {
79 | }
80 |
81 | void ft_malloc_stats(void) __THROW
82 | {
83 | }
84 |
85 | int ft_mallopt(int cmd, int value) __THROW
86 | {
87 | }
88 |
89 | int ft_set_new_mode(int flag) __THROW
90 | {
91 | }
92 |
93 | void * ft_new(size_t size)
94 | {
95 | }
96 |
97 | void * ft_new_nothrow(size_t size, const std::nothrow_t&) __THROW
98 | {
99 | }
100 |
101 | void ft_delete(void* p) __THROW
102 | {
103 | }
104 |
105 | void ft_delete_nothrow(void* p, const std::nothrow_t&) __THROW
106 | {
107 | }
108 |
109 | void * ft_newarray(size_t size)
110 | {
111 | }
112 |
113 | void * ft_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
114 | {
115 | }
116 |
117 | void ft_deletearray(void* p) __THROW
118 | {
119 | }
120 |
121 | void ft_deletearray_nothrow(void* p, const std::nothrow_t&) __THROW
122 | {
123 | }
124 |
125 | }
--------------------------------------------------------------------------------
/src/ftmalloc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #ifndef __FT_MALLOC_H__
9 | #define __FT_MALLOC_H__
10 |
11 | #ifndef __THROW
12 | #define __THROW
13 | #endif
14 |
15 | #include
16 |
17 | #define HK_VERSION_MAJOR 1
18 | #define HK_VERSION_MINOR 0
19 | #define HK_VERSION_PATCH ""
20 | #define HK_VERSION_STRING "sudoku.huang's fast malloc 1.0"
21 |
22 | #ifdef __cplusplus
23 | namespace std {
24 | struct nothrow_t;
25 | }
26 |
27 | extern "C" {
28 | #endif
29 |
30 | const char * ft_version(int* major, int* minor, const char** patch) __THROW;
31 |
32 | void * ft_malloc(size_t size) __THROW;
33 | void * ft_malloc_skip_new_handler(size_t size) __THROW;
34 | void ft_free(void* ptr) __THROW;
35 | void * ft_realloc(void* ptr, size_t size) __THROW;
36 | void * ft_calloc(size_t nmemb, size_t size) __THROW;
37 | void ft_cfree(void* ptr) __THROW;
38 |
39 | void * ft_memalign(size_t __alignment, size_t __size) __THROW;
40 | int ft_posix_memalign(void** ptr, size_t align, size_t size) __THROW;
41 | void * ft_valloc(size_t __size) __THROW;
42 | void * ft_pvalloc(size_t __size) __THROW;
43 |
44 | void ft_malloc_stats(void) __THROW;
45 | int ft_mallopt(int cmd, int value) __THROW;
46 | #if 0
47 | struct mallinfo ft_mallinfo(void) __THROW;
48 | #endif
49 |
50 | size_t ft_malloc_size(void* ptr) __THROW;
51 |
52 | #ifdef __cplusplus
53 | int ft_set_new_mode(int flag) __THROW;
54 | void * ft_new(size_t size);
55 | void * ft_new_nothrow(size_t size, const std::nothrow_t&) __THROW;
56 | void ft_delete(void* p) __THROW;
57 | void ft_delete_nothrow(void* p, const std::nothrow_t&) __THROW;
58 | void * ft_newarray(size_t size);
59 | void * ft_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW;
60 | void ft_deletearray(void* p) __THROW;
61 | void ft_deletearray_nothrow(void* p, const std::nothrow_t&) __THROW;
62 | }
63 | #endif
64 |
65 | #endif
66 |
--------------------------------------------------------------------------------
/test/Makefile:
--------------------------------------------------------------------------------
1 | cxx=g++
2 | SERVER = test
3 | #SOURCES = $(wildcard *.cpp)
4 |
5 | SOURCES = $(wildcard *.cpp)
6 |
7 | DEP_SOURCES = $(notdir $(SOURCES))
8 | DEP_OBJECTS = $(patsubst %.cpp, %.o, $(DEP_SOURCES))
9 |
10 | INC_PATH = $(shell pwd)
11 | LIB_PATH = $(shell pwd)
12 | LIBRARYS= -pthread -lftmalloc
13 | #LIBRARYS= -pthread -ltcmalloc
14 |
15 | MACROS = -DC_STRING_FUNC -DLOG_PRINTF -DFT_LOG_DEBUG
16 | SHARED = #-shared -fPIC
17 | CPPFLAGS += -g -O0 -Wno-deprecated -Wnonnull
18 |
19 | INC_PATH += ../src
20 | LIB_PATH += ../src /usr/lib/
21 |
22 | INC_DIR = $(patsubst %, -I%, $(INC_PATH))
23 | LIB_DIR = $(patsubst %, -L%, $(LIB_PATH))
24 |
25 |
26 | all : $(SERVER)
27 |
28 | $(SERVER) : $(DEP_OBJECTS)
29 | $(cxx) $(MACROS) $(CPPFLAGS) $(SHARED) -o $@ $^ $(INC_DIR) $(LIB_DIR) $(LIBRARYS)
30 |
31 | .PHONY : clean
32 | clean:
33 | -rm -f $(SERVER)
34 | -rm -f *.o
35 | -rm -f *.d
36 |
37 | ifneq "$(MAKECMDGOALS)" "clean"
38 | include $(DEP_SOURCES:.cpp=.d)
39 | endif
40 |
41 | %.d :
42 | @t1=$*; t2=$${t1##*/}; \
43 | rely_file=""; for file in $(SOURCES); \
44 | do \
45 | find=$${file%%$$t2.cpp}; \
46 | if [ $${#find} != $${#file} ]; then \
47 | rely_file=$$file; \
48 | fi; \
49 | done; \
50 | set -e; rm -f $@; \
51 | $(cxx) -MM $(CPPFLAGS) $(INC_DIR) $$rely_file > $@.; \
52 | cat $@. > $@; \
53 | echo " $(cxx) $(CPPFLAGS) $(MACROS) $(SHARED) -c -o $$t2.o $(INC_DIR) $$rely_file" >> $@; \
54 | sed 's/\.o/\.d/g' < $@. >> $@; \
55 | rm -f $@. ;
56 |
57 |
--------------------------------------------------------------------------------
/test/main.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * @Author: kun huang
3 | * @Email: sudoku.huang@gmail.com
4 | * @Desc: reference google-tcmalloc,
5 | * memory allocator and manager.
6 | */
7 |
8 | #include
9 | #include
10 | #include
11 |
12 | #if 0
13 | #include "ftmalloc.h"
14 |
15 | void * thread_func(void * args)
16 | {
17 | printf(" T1 begin!, test ft_malloc!!\n");
18 | getchar();
19 |
20 | printf("T1 alloc int!\n");
21 | int * p = (int *)ft_malloc(sizeof(int));
22 | getchar();
23 |
24 | printf("T1 assign value to int!\n");
25 | *p = 10;
26 | getchar();
27 |
28 | printf("T1 free int!\n");
29 | ft_free((void *)p);
30 | getchar();
31 |
32 | }
33 | #else
34 | void * thread_func(void * args)
35 | {
36 | printf(" T1 begin!, test malloc!!\n");
37 | getchar();
38 |
39 | printf("T1 alloc int!\n");
40 | int * p = (int *)malloc(sizeof(int));
41 | getchar();
42 |
43 | printf("T1 assign value to int!\n");
44 | *p = 10;
45 | getchar();
46 |
47 | printf("T1 free int!\n");
48 | free((void *)p);
49 | getchar();
50 |
51 | printf("T1 alloc page.\n");
52 | char * page = (char *)malloc(1024 * 1024);
53 | getchar();
54 |
55 | printf("T1, free page\n");
56 | free((void *)page);
57 | getchar();
58 |
59 | printf("thread exit!\n");
60 | getchar();
61 | }
62 | #endif
63 |
64 | int main()
65 | {
66 | char * byte = (char *)malloc(1);
67 | free(byte);
68 |
69 | pthread_t pt;
70 | pthread_create(&pt, NULL, thread_func, NULL);
71 | pthread_join(pt, NULL);
72 | printf("main thread exit\n");
73 | return 0;
74 | }
75 |
--------------------------------------------------------------------------------