├── .astylerc
├── .github
└── workflows
│ └── build_docs.yml
├── .gitignore
├── CONTRIBUTING.md
├── DEVELOP.md
├── LICENSE
├── README.md
├── TODO.md
├── docs
├── .doxygen
├── .gitignore
├── Makefile
├── build_docs.py
├── requirements.txt
└── source
│ ├── conf.py
│ └── index.rst
├── examples
└── bw_tester
│ ├── bw_tester.c
│ └── meson.build
├── fio-ioengine
├── flexalloc.c
├── flexalloc.fio
└── meson.build
├── flan
└── meson.build
├── meson.build
├── meson_options.txt
├── scripts
├── _set_env.sh
├── build_python_bindings.sh
├── run
├── run_tests.sh
└── style.sh
├── src
├── flexalloc.c
├── flexalloc.h
├── flexalloc_bits.c
├── flexalloc_bits.h
├── flexalloc_cli_common.c
├── flexalloc_cli_common.h
├── flexalloc_cs.c
├── flexalloc_cs.h
├── flexalloc_cs_cns.c
├── flexalloc_cs_cns.h
├── flexalloc_cs_zns.c
├── flexalloc_cs_zns.h
├── flexalloc_daemon.c
├── flexalloc_daemon_base.c
├── flexalloc_daemon_base.h
├── flexalloc_dp.c
├── flexalloc_dp.h
├── flexalloc_dp_fdp.c
├── flexalloc_dp_fdp.h
├── flexalloc_dp_noop.h
├── flexalloc_freelist.c
├── flexalloc_freelist.h
├── flexalloc_hash.c
├── flexalloc_hash.h
├── flexalloc_inspect.c
├── flexalloc_introspection.c
├── flexalloc_introspection.h
├── flexalloc_ll.c
├── flexalloc_ll.h
├── flexalloc_mkfs.c
├── flexalloc_mm.c
├── flexalloc_mm.h
├── flexalloc_pool.c
├── flexalloc_pool.h
├── flexalloc_shared.h
├── flexalloc_slabcache.c
├── flexalloc_slabcache.h
├── flexalloc_test_client.c
├── flexalloc_uthash.h
├── flexalloc_util.c
├── flexalloc_util.h
├── flexalloc_utlist.h
├── flexalloc_xnvme_env.c
├── flexalloc_xnvme_env.h
├── libflexalloc.c
└── libflexalloc.h
└── tests
├── flexalloc_rt_hash.c
├── flexalloc_rt_lib_open_close.c
├── flexalloc_rt_mkfs.c
├── flexalloc_rt_multi_pool_read_write.c
├── flexalloc_rt_object_allocate.c
├── flexalloc_rt_object_overread_overwrite.c
├── flexalloc_rt_object_read_write.c
├── flexalloc_rt_object_unaligned_write.c
├── flexalloc_rt_pool.c
├── flexalloc_rt_strp_object_read_write.c
├── flexalloc_rt_xnvme_to_from.c
├── flexalloc_tests_common.c
├── flexalloc_tests_common.h
├── flexalloc_ut_bits.c
├── flexalloc_ut_freelist.c
├── flexalloc_ut_hash.c
└── flexalloc_ut_slab.c
/.astylerc:
--------------------------------------------------------------------------------
1 | --mode=c
2 | --style=allman
3 | --indent=spaces=2
4 | --max-code-length=100
5 | --min-conditional-indent=0
6 | --suffix=none
7 | --break-return-type
8 |
--------------------------------------------------------------------------------
/.github/workflows/build_docs.yml:
--------------------------------------------------------------------------------
1 | name: Build docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | - jwd/sphinx
8 |
9 | defaults:
10 | run:
11 | shell: bash
12 |
13 | jobs:
14 | generate-docs:
15 | runs-on: ubuntu-20.04
16 |
17 | steps:
18 | - name: Get source
19 | uses: actions/checkout@v2
20 |
21 | - name: Install doxygen
22 | run: sudo apt-get install -y doxygen
23 |
24 | - name: Prepare python
25 | uses: actions/setup-python@v2
26 | with:
27 | python-version: '>=3.9'
28 |
29 | - name: Prepare Python virtual environment
30 | uses: syphar/restore-virtualenv@v1
31 | id: cache-docs-venv
32 | with:
33 | requirement_files: docs/requirements.txt
34 |
35 | - name: Install requirements
36 | run: pip install -r docs/requirements.txt
37 | if: steps.cache-docs-venv.outputs.cache-hit != 'true'
38 |
39 | - name: Generate docs
40 | run: python docs/build_docs.py
41 |
42 | - name: Publish docs
43 | uses: peaceiris/actions-gh-pages@v3
44 | with:
45 | github_token: ${{ secrets.GITHUB_TOKEN }}
46 | publish_dir: ./docs/build
47 | user_name: 'github-actions[bot]'
48 | user_email: 'github-actions[bot]@users.noreply.github.com'
49 | force_orphan: true
50 | publish_branch: gh-pages
51 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Environment
2 | .cache
3 |
4 | # build directory
5 | build
6 | *.build/
7 | debug
8 |
9 | # Prerequisites
10 | *.d
11 |
12 | # Object files
13 | *.o
14 | *.ko
15 | *.obj
16 | *.elf
17 |
18 | # Linker output
19 | *.ilk
20 | *.map
21 | *.exp
22 |
23 | # Precompiled Headers
24 | *.gch
25 | *.pch
26 |
27 | # Libraries
28 | *.lib
29 | *.a
30 | *.la
31 | *.lo
32 |
33 | # Shared objects (inc. Windows DLLs)
34 | *.dll
35 | *.so
36 | *.so.*
37 | *.dylib
38 |
39 | # Executables
40 | *.exe
41 | *.out
42 | *.app
43 | *.i*86
44 | *.x86_64
45 | *.hex
46 |
47 | # Debug files
48 | *.dSYM/
49 | *.su
50 | *.idb
51 | *.pdb
52 |
53 | # Kernel Module Compile Results
54 | *.mod*
55 | *.cmd
56 | .tmp_versions/
57 | modules.order
58 | Module.symvers
59 | Mkfile.old
60 | dkms.conf
61 |
62 | #gdb
63 | .gdbinit
64 |
65 | #ignore IDE files
66 | .idea
67 | .vscode
68 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | We're glad you want to contribute! This document will help answer common
4 | questions you may have during your first contribution.
5 |
6 | ## Contribution Process
7 |
8 | We have a 3 step process for contributions:
9 |
10 | 1. Commit changes to a git branch, making sure to sign-off those changes for the
11 | [Developer Certificate of Origin](#developer-certification-of-origin-dco).
12 | 2. Create a GitHub Pull Request for your change, following the instructions in
13 | the pull request template.
14 | 3. Perform a [Code Review](#code-review-process) with the project maintainers on
15 | the pull request.
16 |
17 | ### Developer Certification of Origin (DCO)
18 |
19 | Licensing is very important to open source projects. It helps ensure the
20 | software continues to be available under the terms that the author desired.
21 |
22 | FlexAlloc uses the Apache 2.0 LICENSE to strike a balance between open contribution
23 | and allowing you to use the software however you would like to.
24 |
25 | The license tells you what rights you have that are provided by the copyright
26 | holder. It is important that the contributor fully understands what rights they
27 | are licensing and agrees to them. Sometimes the copyright holder isn't the
28 | contributor, such as when the contributor is doing work on behalf of a company.
29 |
30 | To make a good faith effort to ensure these criteria are met, FlexAlloc requires
31 | the Developer Certificate of Origin (DCO) process to be followed.
32 |
33 | The DCO is an attestation attached to every contribution made by every
34 | developer. In the commit message of the contribution, the developer simply adds
35 | a Signed-off-by statement and thereby agrees to the DCO, which you can find
36 | below or at .
37 |
38 | ```
39 | Developer's Certificate of Origin 1.1
40 |
41 | By making a contribution to this project, I certify that:
42 |
43 | (a) The contribution was created in whole or in part by me and I
44 | have the right to submit it under the open source license
45 | indicated in the file; or
46 |
47 | (b) The contribution is based upon previous work that, to the
48 | best of my knowledge, is covered under an appropriate open
49 | source license and I have the right under that license to
50 | submit that work with modifications, whether created in whole
51 | or in part by me, under the same open source license (unless
52 | I am permitted to submit under a different license), as
53 | Indicated in the file; or
54 |
55 | (c) The contribution was provided directly to me by some other
56 | person who certified (a), (b) or (c) and I have not modified
57 | it.
58 |
59 | (d) I understand and agree that this project and the contribution
60 | are public and that a record of the contribution (including
61 | all personal information I submit with it, including my
62 | sign-off) is maintained indefinitely and may be redistributed
63 | consistent with this project or the open source license(s)
64 | involved.
65 | ```
66 |
67 | #### DCO Sign-Off Methods
68 |
69 | The DCO requires a sign-off message in the following format appear on each
70 | commit in the pull request:
71 |
72 | ```
73 | Signed-off-by: John Doe
74 | ```
75 |
76 | The DCO text can either be manually added to your commit body, or you can add
77 | either **-s** or **--signoff** to your usual git commit commands. If you forget
78 | to add the sign-off you can also amend a previous commit with the sign-off by
79 | running **git commit --amend -s**. If you've pushed your changes to GitHub
80 | already you'll need to force push your branch after this with **git push -f**.
81 |
82 |
83 |
--------------------------------------------------------------------------------
/DEVELOP.md:
--------------------------------------------------------------------------------
1 | # Building
2 | **Dependencies**
3 | * Flexalloc depends on the following projects
4 | - [xnvme](https://github.com/OpenMPDK/xNVMe) - a library for flexible access to storage device
5 | - [meson](https://mesonbuild.com) - build system leveraged by flexalloc. [How to install](#installmesonninja)
6 | - [ninja](https://pypi.org/project/ninja/) - small build system leveraged by meson. [How to install](#installmesonninja)
7 |
8 |
9 | **Obtain Source**
10 | ```shell
11 | git clone https://github.com/OpenMPDK/FlexAlloc
12 | cd flexalloc
13 | ```
14 |
15 | **Build**
16 | ```shell
17 | meson build
18 | meson compile -C build
19 | ```
20 |
21 | **Install**
22 | ```shell
23 | meson install -C build
24 | ```
25 |
26 | **Running tests**
27 | ```shell
28 | meson test -C build
29 | ```
30 |
31 | # Conventions:
32 | **Code Formating**
33 | * Execute the astyle script to format your code properly.
34 |
35 | **Return Value**
36 | * Always return int as error: In general methods should return an int where
37 | zero means that no errors occured and any other value means an error.
38 | - If errno is detected or set within the function it should be returned as -errno
39 | - If an error different from what errno defines is detected or set it should be
40 | returned as a positive number.
41 |
42 | * Return void when there are no errors to handle: Use void when there are no
43 | errors to handle within the method. This is quite rare but happens when you for
44 | example wrap a simple free.
45 |
46 | **Naming**
47 | * Prefix "fla_" for functions and variables : In general, functions that might
48 | involve a name clash must start with "fla_". This includes API functions.
49 |
50 | * When creating a regression tests you should include "_rt_" in the name, in the
51 | same way include "_ut_" when creating unit tests.
52 |
53 | **Error handling**
54 | * For every error encountered the `FLA_ERR_*` methods should be used. These will
55 | print an error message to stderr and set the appropriate return value.
56 | Using these macros allows us to control the message format and how these get
57 | output.
58 | * Regular execution of libflexalloc should NOT output anything to
59 | std{out,err,in}. Only when there is an error should the stderr be used. This
60 | is true only when FLA_VERBOSITY is set to 0.
61 | * Where desired, use the `FLA_DBG_\*` macros. The output will then be shown when
62 | FlexAlloc is compiled with debugging enabled, such as the unit- and regression tests.
63 |
64 | **Feature Test Macros**
65 | * No thank you
66 |
67 | **Documentation**
68 | * We use doxygen for all comments in source. It is prefered for the documentation
69 | to include all subsections : general comments, arguments, variables, return
70 | values etc. Documentation can be forgone or reduced when what is being
71 | documented has high probability of changing (avoid documenting in vain). Reduced
72 | documentation means that only a small doxygen formated comment can be included.
73 |
74 | **Versioning**
75 | * We follow semantic versioning. https://semver.org/.
76 |
77 | **Compile Options**
78 | * To set the options you can either do it at setup or configure time. In both cases you need
79 | to add the -Doption=value to the command line in order to set the value.
80 | * FLA_VERBOSITY is an int options that can either be 0 or 1. 0 will show no extra output.
81 | 1 means that messages called with FLA_VBS_PRINTF will be shown. It is set at 0 by default.
82 |
83 | # Troubleshooting
84 |
85 | **Cannot install messon or ninja**
86 | * You will need the [meson](https://mesonbuild.com) build system. The easiest way to
87 | get the latest meson package is through [pipx](https://pipxproject.github.io/pipx/) like so:
88 | ```shell
89 | # (replace 'python3' with 'python' if this is your Python 3 binary)
90 | python3 -m pip install --user pipx
91 | python3 -m pipx ensurepath
92 |
93 | # Afterward, install `meson` and `ninja` into their own virtual environment:
94 | pipx install meson
95 | pipx runpip meson install ninja
96 | ```
97 |
98 | * If you see errors indicating that you miss `venv` or `virtualenv`. In some Distribution,
99 | this Python module is shipped separately. For Ubuntu/Debian you will need to install the
100 | `python3-venv` package.
101 |
102 | **Cannot open loopback-control for writing**
103 | * Most tests create a loopback device in lieu of using a real hardware device.
104 | To do this, your user must be able to create loopback devices. Try the following and if
105 | you don't see `permission denied`, you are ready to run the tests.
106 | ```shell
107 | # Create an image file to test with:
108 | dd if=/dev/zero of=/tmp/loop1.img bs=1M count=10
109 |
110 | # Mount the image as a loopback device:
111 | losetup -f /tmp/loop1.img
112 | ```
113 |
114 | * If you see `permission denied`, check the permissions of the `/dev/loop-control` file.
115 | ```shell
116 | $ ls -la /dev/loop-control
117 | crw-rw---- 1 root disk 10, 237 Mar 17 13:06 /dev/loop-control
118 | ```
119 | - In this case, the `disk` group also has read-write access. In this case, run the
120 | following command to add your user to the `disk` group:
121 | ```sell
122 | usermod -aG disk `whoami`
123 | ```
124 | - Log out and then back into your desktop environment or run `newgrp disk` in the
125 | relevant terminal window for the permissions change to take effect.
126 |
127 |
128 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # FlexAlloc
2 |
3 | FlexAlloc is a lean, nameless, object allocator that forms the basis of
4 | open-source user space storage management software focused on bridging the gap
5 | between raw block device access and data management through traditional file
6 | systems interfaces.
7 |
8 | **Key Features**
9 | * Flexible object allocation based on a fixed object size per pool
10 | * Multiple pools per device
11 | * Pools backed by slabs which can be released and obtained dynamically
12 |
13 | **Benefits**
14 | * No external fragmentation based on fixed object size design
15 | * Performant since object handle translates directly to device offsets
16 | * Light-weight, metadata updates not on the fast path
17 |
18 | **Design**
19 | * FlexAlloc is implemented as a slab allocator. The disk is divided into a series
20 | of uniformly sized block ranges (slabs) which are then distributed to pools
21 | (Fig 1). A slab, when owned by a pool, is partitioned into an array of
22 | uniformly sized objects which is defined at run time.
23 | ```
24 | +----------+-------------------+-------------------+-------------------+-------------------+
25 | | Metadata | Slab 1 (Pool 1) | Slab 2 (Pool 2) | .... | Slab N |
26 | +----------+----+----+----+----+----+----+----+----+-------------------+-------------------+
27 | | | | | | | | | | |
28 | | |Obj1|... |... |Obj2| .... | Obj3 | .... | Empty |
29 | | | | | | | | | | |
30 | +----------+----+----+----+----+----+----+----+----+-------------------+-------------------+
31 | Fig 1. Metadata is at the start of the device. Pool 1 is made up of Slab 1 and contains 2
32 | objects. Pool 2 is made up of Slab 2 and contains 1 object.
33 | ```
34 |
35 | **Build**
36 | * [DEVELOP.md](DEVELOP.md)
37 |
38 | **Preliminary Benchmark Results**
39 | * We benchmark on a Samsung PM9A3 SSD and find that FlexAlloc has 0.38 less WAF
40 | than XFS and and writes 6 times faster than XFS. We execute 3 concurrent
41 | random write workloads of queue depth 1 with fio (10000 10Mib files, 10000
42 | 100Mib files, 2000 1Gib files)
43 |
44 | **More Information**
45 | * We usually hangout on discord [FlexAlloc Discord Channel](https://discord.gg/qx8yME4Dcd)
46 | * Feel free to drop us a mail:
47 | - j.granados@samsung.com
48 | - j.devantier@samsung.com
49 | - a.manzanares@samsung.com
50 | * Find our 2021 SDC presentation [FlexAlloc SDC presentation](https://www.youtube.com/watch?v=Hf23EQQv7g8)
51 |
52 |
53 |
--------------------------------------------------------------------------------
/TODO.md:
--------------------------------------------------------------------------------
1 | # Bugs
2 | - [ ] We removed the wlist.txt and its related tests from sources because of licence issues. Activate that tests again.
3 | # Enhancements
4 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | doxygen.build/
2 | build/
3 | .venv
4 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/build_docs.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import subprocess
3 | from contextlib import contextmanager
4 | from typing import Union
5 | import os
6 | import sys
7 |
8 |
9 | DOCS_ROOT = Path(__file__).resolve().parent
10 |
11 |
12 | class CommandNotFoundError(Exception):
13 | def __init__(self, command: str):
14 | super().__init__(f"command '{command}' not found")
15 | self.command = command
16 |
17 |
18 | @contextmanager
19 | def cwd(path: Union[str, Path]):
20 | current_path = Path.cwd()
21 |
22 | if not isinstance(path, Path):
23 | path = Path(path)
24 | try:
25 | os.chdir(path)
26 | yield
27 | finally:
28 | os.chdir(current_path)
29 |
30 |
31 | def run(cmd, **kwargs):
32 | if "check" not in kwargs:
33 | kwargs["check"] = True
34 | print(f"""> {" ".join(cmd)}""")
35 | try:
36 | return subprocess.run(cmd, **kwargs)
37 | except FileNotFoundError as e:
38 | raise CommandNotFoundError(cmd[0]) from e
39 |
40 |
41 | def main():
42 | if not os.environ.get("VIRTUAL_ENV", ""):
43 | print("ERROR> script not running in ")
44 | try:
45 | import sphinx
46 | except ImportError:
47 | print("ERROR> 'sphinx' package not found")
48 | print("")
49 | print("To run this script, set up a virtual environment with all dependencies:")
50 | print("-------------------------")
51 | print("$ python3 -m venv .venv")
52 | print("$ source .venv/bin/activate")
53 | print("(.venv) $ pip install -r requirements.txt")
54 | print("-------------------------")
55 | print("")
56 | print("To run the script, enter the virtual environment, then run the script")
57 | print("-------------------------")
58 | print("$ source .venv/bin/activate")
59 | print("(.venv) $ python build_docs.py")
60 | sys.exit(1)
61 |
62 | print("generate API docs with doxygen...")
63 | print(f"DOCS ROOT: {DOCS_ROOT}")
64 |
65 | with cwd(DOCS_ROOT):
66 | try:
67 | run(["doxygen", ".doxygen"],
68 | cwd=DOCS_ROOT)
69 |
70 | run(["sphinx-build", "-b", "html", "source", "build"])
71 | except CommandNotFoundError as e:
72 | print(str(e))
73 | sys.exit(1)
74 | except subprocess.CalledProcessError as e:
75 | print("script aborted due to non-zero exit code from program")
76 | sys.exit(e.returncode)
77 |
78 |
79 | if __name__ == "__main__":
80 | main()
81 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | docutils>=0.17.1,<0.18
2 | breathe>=4.32<=4.4
3 | kmdo>=0.0.5<0.1
4 | pydata-sphinx-theme>=0.8,<0.9
5 | Sphinx>=4.4,<4.5
6 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | # import os
14 | # import sys
15 | # sys.path.insert(0, os.path.abspath('.'))
16 | from pathlib import Path
17 |
18 |
19 | # -- Project information -----------------------------------------------------
20 |
21 | project = 'FlexAlloc'
22 | copyright = '2022, Samsung'
23 | author = 'Adam Manzanares, Jesper Wendel Devantier, Joel Andres Granados'
24 |
25 |
26 | # -- General configuration ---------------------------------------------------
27 |
28 | # Add any Sphinx extension module names here, as strings. They can be
29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
30 | # ones.
31 | extensions = [
32 | 'breathe'
33 | ]
34 |
35 | # '/path/to/flexalloc/docs/source/conf.py' -> '/path/to/flexalloc/docs'
36 | docs_root = Path(__file__).resolve().parent.parent
37 |
38 | breathe_projects = {
39 | "flexalloc": str(docs_root / "doxygen.build" / "xml")
40 | }
41 | breathe_default_project = "flexalloc"
42 |
43 | # Add any paths that contain templates here, relative to this directory.
44 | templates_path = ['_templates']
45 |
46 | # List of patterns, relative to source directory, that match files and
47 | # directories to ignore when looking for source files.
48 | # This pattern also affects html_static_path and html_extra_path.
49 | exclude_patterns = []
50 |
51 |
52 | # -- Options for HTML output -------------------------------------------------
53 |
54 | # The theme to use for HTML and HTML Help pages. See the documentation for
55 | # a list of builtin themes.
56 | #
57 | html_theme = 'pydata_sphinx_theme'
58 |
59 | # Add any paths that contain custom static files (such as style sheets) here,
60 | # relative to this directory. They are copied after the builtin static files,
61 | # so a file named "default.css" will overwrite the builtin "default.css".
62 | html_static_path = ['_static']
63 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. FlexAlloc documentation master file, created by
2 | sphinx-quickstart on Thu Feb 3 13:07:32 2022.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to FlexAlloc's documentation!
7 | =====================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 | .. doxygenindex::
14 |
15 | Indices and tables
16 | ==================
17 |
18 | * :ref:`genindex`
19 | * :ref:`modindex`
20 | * :ref:`search`
21 |
--------------------------------------------------------------------------------
/examples/bw_tester/bw_tester.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2022 Adam Manzanares
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 |
9 | #define POOL_NAME "TEST_POOL"
10 | #define USAGE "./bw_tester fla_dev fla_md_dev num_rw obj_size_blks strp_objs strp_nbytes wrt_nbytes verify num_strp_objs"
11 |
12 | int r_w_obj(struct flexalloc *fs, struct fla_pool *pool, struct fla_object *obj,
13 | uint64_t num, uint64_t wrt_nbytes, bool write, bool verify, int rand_num)
14 | {
15 | int ret;
16 | char *buf;
17 | struct xnvme_timer time;
18 |
19 | buf = fla_buf_alloc(fs, wrt_nbytes);
20 | if (!buf) {
21 | printf("Allocating object buffer fails\n");
22 | return -ENOMEM;
23 | }
24 |
25 | if (write)
26 | {
27 | for (uint64_t cur_byte = 0; cur_byte < wrt_nbytes; cur_byte++)
28 | buf[cur_byte] = (char)cur_byte + rand_num;
29 | }
30 |
31 | xnvme_timer_start(&time);
32 | for (uint64_t cur = 0; cur < num; cur++) {
33 | ret = write
34 | ? fla_object_write(fs, pool, obj, buf, cur * wrt_nbytes, wrt_nbytes)
35 | : fla_object_read(fs, pool, obj, buf, cur * wrt_nbytes, wrt_nbytes);
36 |
37 | if (ret) {
38 | printf("Object %s fails. Cur:%lu\n", write ? "write" : "read", cur);
39 | return ret;
40 | }
41 |
42 | if (!write && verify)
43 | {
44 | for (uint64_t cur_byte = 0; cur_byte < wrt_nbytes; cur_byte++)
45 | {
46 | if (buf[cur_byte] != (char)(cur_byte + rand_num))
47 | {
48 | printf("Data mismatch cur:%lu offset:%lu,expected:[%c],got:[%c]\n", cur, cur_byte,
49 | (char)cur_byte, buf[cur_byte]);
50 | return -1;
51 | }
52 | }
53 | }
54 | }
55 |
56 | xnvme_timer_stop(&time);
57 |
58 | if (write)
59 | xnvme_timer_bw_pr(&time, "wall-clock write", wrt_nbytes * num);
60 | else
61 | xnvme_timer_bw_pr(&time, "wall-clock read", wrt_nbytes * num);
62 |
63 | return ret;
64 | }
65 |
66 | int main(int argc, char **argv)
67 | {
68 | struct flexalloc *fs;
69 | struct fla_pool *pool;
70 | struct fla_object obj;
71 | int ret;
72 | char *dev, *md_dev;
73 | uint64_t num_writes, obj_nlb, strp_nobjs, strp_nbytes, wrt_nbytes, num_strp_objs;
74 | bool verify;
75 | struct fla_open_opts open_opts = {0};
76 | struct xnvme_opts x_opts = xnvme_opts_default();
77 |
78 | if (argc != 10) {
79 | printf("Usage:%s\n", USAGE);
80 | return -1;
81 | }
82 |
83 | dev = argv[1];
84 | md_dev = argv[2];
85 | num_writes = atoi(argv[3]);
86 | obj_nlb = atoi(argv[4]);
87 | strp_nobjs = atoi(argv[5]);
88 | strp_nbytes = atoi(argv[6]);
89 | wrt_nbytes = atoi(argv[7]);
90 | verify = atoi(argv[8]);
91 | num_strp_objs = atoi(argv[9]);
92 |
93 | if (num_strp_objs == 0) // Fill until failure when num_strp_objs == 0
94 | num_strp_objs = num_strp_objs - 1;
95 |
96 | open_opts.dev_uri = dev;
97 | open_opts.md_dev_uri = md_dev;
98 | open_opts.opts = &x_opts;
99 | open_opts.opts->async = "io_uring_cmd";
100 | ret = fla_open(&open_opts, &fs);
101 | if (ret) {
102 | printf("Error on open\n");
103 | goto exit;
104 | }
105 |
106 | struct fla_pool_create_arg pool_arg =
107 | {
108 | .flags = 0,
109 | .name = POOL_NAME,
110 | .name_len = strlen(POOL_NAME),
111 | .obj_nlb = obj_nlb,
112 | .strp_nobjs = strp_nobjs,
113 | .strp_nbytes = strp_nbytes,
114 | };
115 |
116 |
117 | ret = fla_pool_create(fs, &pool_arg, &pool);
118 | if (ret) {
119 | printf("Error on pool create\n");
120 | goto close;
121 | }
122 |
123 | srand(getpid());
124 | for(int i = 0 ; i < num_strp_objs ; ++i)
125 | {
126 | int rand_num = rand();
127 | ret = fla_object_create(fs, pool, &obj);
128 | if (ret) {
129 | printf("Object create fails\n");
130 | goto close;
131 | }
132 |
133 | ret = r_w_obj(fs, pool, &obj, num_writes, wrt_nbytes, true, false, rand_num);
134 | if(ret)
135 | goto close;
136 |
137 | ret = r_w_obj(fs, pool, &obj, num_writes, wrt_nbytes, false, verify, rand_num);
138 | if(ret)
139 | goto close;
140 |
141 | ret = fla_object_seal(fs , pool, &obj);
142 | if(ret)
143 | goto close;
144 | }
145 |
146 | close:
147 | fla_close(fs);
148 |
149 | exit:
150 | return ret;
151 | }
152 |
--------------------------------------------------------------------------------
/examples/bw_tester/meson.build:
--------------------------------------------------------------------------------
1 | executable('bw_tester', 'bw_tester.c',
2 | fla_common_files, xnvme_env_files, libflexalloc_files, fla_util_files,
3 | dependencies : xnvme_deps,
4 | include_directories : libflexalloc_header_dirs)
5 |
--------------------------------------------------------------------------------
/fio-ioengine/flexalloc.fio:
--------------------------------------------------------------------------------
1 | # this assumes that a flexalloc file system
2 | # has been created on /dev/ram0
3 | [test]
4 | ioengine=external:/usr/local/lib/x86_64-linux-gnu/libflexalloc-fio-engine.so
5 | rw=randwrite
6 | verify=crc32c
7 | direct=1 # required for buffer alignment
8 | thread=1 # required for sharing FS handles
9 | # threads not required for daemon mode
10 | dev_uri=/dev/ram0 # device where FS resides
11 | #daemon_uri=/tmp/flexalloc.socket # use this instead for daemon mode
12 | filesize=2M # size of each object
13 | nrfiles=100 # number of objects
14 | poolname=testpool
15 |
--------------------------------------------------------------------------------
/fio-ioengine/meson.build:
--------------------------------------------------------------------------------
1 | cc = meson.get_compiler('c')
2 | rt_dep = cc.find_library('rt', required: true)
3 | pconf = import('pkgconfig')
4 | fs = import('fs')
5 |
6 | fio_dir = get_option('fio_source_dir')
7 | if not fs.exists(fio_dir)
8 | error('fio source directory does not exist!')
9 | elif not fs.is_dir(fio_dir)
10 | error('fio_source_dir does not point to a directory!')
11 | elif not fs.exists(fio_dir / 'config-host.h')
12 | error('no `config-host.h` file in fio_source_directory, run `./configure` in fio dir and re-try!')
13 | endif
14 |
15 | flexallocfioe = shared_module(
16 | meson.project_name() + '-fio-engine',
17 | ['flexalloc.c'],
18 | override_options: ['c_std=gnu11'],
19 | include_directories: [
20 | libflexalloc_header_dirs,
21 | include_directories(fio_dir),
22 | ],
23 | dependencies: [rt_dep],
24 | link_with: library.get_static_lib(),
25 | install: true,
26 | c_args: [
27 | '-D_GNU_SOURCE',
28 | '-include', 'config-host.h'
29 | ]
30 | )
31 |
32 | pconf.generate(
33 | libraries: [ flexallocfioe ],
34 | version: meson.project_version(),
35 | variables: [
36 | 'datadir=' + get_option('prefix') / get_option('datadir') / meson.project_name()
37 | ],
38 | name: 'flexalloc-fioe',
39 | filebase: meson.project_name() + '-fio-engine',
40 | description : 'flexalloc fio I/O engine'
41 | )
--------------------------------------------------------------------------------
/flan/meson.build:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SamsungDS/FlexAlloc/137d41eaf2369ac723064be36ceb66aeade3e1c0/flan/meson.build
--------------------------------------------------------------------------------
/meson.build:
--------------------------------------------------------------------------------
1 | project('flexalloc', 'c', default_options : ['c_std=c11'])
2 | cc = meson.get_compiler('c')
3 | pymod = import('python')
4 | #py = pymod.find_installation('python3', modules: ['pip', 'venv'], required: true)
5 |
6 |
7 | add_project_arguments('-DFLEXALLOC_VERBOSITY=' + get_option('FLEXALLOC_VERBOSITY').to_string(), language : 'c')
8 | if get_option('buildtype').startswith('debug')
9 | add_project_arguments('-DDEBUG', language : 'c')
10 | endif
11 | if get_option('FLEXALLOC_XNVME_IGNORE_MDTS')
12 | add_project_arguments('-DFLEXALLOC_XNVME_IGNORE_MDTS', language : 'c')
13 | endif
14 |
15 | ### Dependencies ###
16 | xnvme_deps = dependency('xnvme', version : '>=0.6.0' )
17 |
18 | ### Files ###
19 | libflexalloc_header_dirs = include_directories('./src')
20 | fla_util_files = files('src/flexalloc_util.c')
21 | fla_introspect_files = files('src/flexalloc_introspection.c')
22 | xnvme_env_files = files('src/flexalloc_xnvme_env.c', 'src/flexalloc_xnvme_env.h')
23 | fla_common_files = files('src/flexalloc.c', 'src/flexalloc_mm.c', 'src/flexalloc_hash.c', 'src/flexalloc_bits.c',
24 | 'src/flexalloc_freelist.c', 'src/flexalloc_ll.c', 'src/flexalloc_pool.c', 'src/flexalloc_slabcache.c',
25 | 'src/flexalloc_dp.c', 'src/flexalloc_cs.c', 'src/flexalloc_cs_zns.c', 'src/flexalloc_cs_cns.c',
26 | 'src/flexalloc_dp_fdp.c')
27 | fla_common_set = [fla_common_files, xnvme_env_files, fla_util_files]
28 |
29 | flexalloc_daemon_files = ['src/flexalloc_daemon_base.c']
30 | libflexalloc_files = files('src/libflexalloc.c')
31 | libflexalloc_set = [libflexalloc_files, fla_common_set]
32 |
33 | ### Executables ###
34 | executable('mkfs.flexalloc',
35 | ['src/flexalloc_mkfs.c', 'src/flexalloc_cli_common.c', fla_common_set],
36 | dependencies: xnvme_deps, install : true)
37 | executable('flexalloc_inspect',
38 | ['src/flexalloc_inspect.c', 'src/flexalloc_cli_common.c',
39 | fla_introspect_files, fla_common_set],
40 | dependencies: xnvme_deps)
41 | daemon_exe = executable('flexalloc_daemon',
42 | ['src/flexalloc_daemon.c', fla_common_set, 'src/flexalloc_cli_common.c',
43 | 'src/flexalloc_daemon_base.c', libflexalloc_set],
44 | dependencies: xnvme_deps)
45 | executable('flexalloc_client',
46 | ['src/flexalloc_test_client.c', fla_common_set,
47 | 'src/flexalloc_daemon_base.c', libflexalloc_set],
48 | dependencies: [xnvme_deps])
49 |
50 | ### Libraries ###
51 | library = both_libraries('flexalloc', [libflexalloc_set, flexalloc_daemon_files],
52 | dependencies: xnvme_deps, install : true)
53 |
54 | foreach header_file: [
55 | 'libflexalloc.h',
56 | 'flexalloc_shared.h',
57 | 'flexalloc_daemon_base.h',
58 | ]
59 | install_headers('src' / header_file)
60 | endforeach
61 |
62 | if get_option('fio_source_dir') != ''
63 | subdir('fio-ioengine')
64 | else
65 | message('*** fio engine build: NO ****\n\t `meson --reconfigure -Dfio_source_dir= ` to enable')
66 | endif
67 |
68 | subdir('examples/bw_tester')
69 | ### Tests ###
70 | flexalloc_testing = ['tests/flexalloc_tests_common.c', 'tests/flexalloc_tests_common.h']
71 | libflexalloc_t_files = ['src/libflexalloc_t.c', 'src/libflexalloc_t.h']
72 |
73 | utils_tests = {
74 | # 'rt_hash'
75 | #: {'sources': 'tests/flexalloc_rt_hash.c',
76 | # 'is_parallel': true,
77 | # 'data_files' : ['wlist.txt'],
78 | # 'suite': 'utils'},
79 | 'ut_hash'
80 | : {'sources': 'tests/flexalloc_ut_hash.c',
81 | 'suite': 'utils'},
82 | 'ut_bits'
83 | : {'sources': 'tests/flexalloc_ut_bits.c',
84 | 'suite': 'utils'},
85 | 'ut_freelist'
86 | : {'sources': 'tests/flexalloc_ut_freelist.c',
87 | 'suite': 'utils'}
88 | }
89 |
90 | xnvme_tests = {
91 | 'from_to_storage'
92 | : {'sources': 'tests/flexalloc_rt_xnvme_to_from.c',
93 | 'suite' : 'xnvme'}
94 | }
95 |
96 | core_tests = {
97 | 'ut_slab'
98 | : {'sources': 'tests/flexalloc_ut_slab.c',
99 | 'suite': 'core'},
100 | 'rt_pool'
101 | : {'sources': 'tests/flexalloc_rt_pool.c',
102 | 'suite': 'core'},
103 | 'rt_object_allocate'
104 | : {'sources': 'tests/flexalloc_rt_object_allocate.c',
105 | 'suite': 'core'},
106 | 'rt_object_read_write'
107 | : {'sources': 'tests/flexalloc_rt_object_read_write.c',
108 | 'suite' : 'core'},
109 | 'rt_strp_object_read_write'
110 | : {'sources': 'tests/flexalloc_rt_strp_object_read_write.c',
111 | 'suite' : 'core'},
112 | 'rt_object_unaligned_write'
113 | : {'sources': 'tests/flexalloc_rt_object_unaligned_write.c',
114 | 'suite': 'core'},
115 | 'rt_object_overread_overwrite'
116 | : {'sources': 'tests/flexalloc_rt_object_overread_overwrite.c',
117 | 'suite': 'core'},
118 | 'rt_multi_pool_read_write'
119 | : {'sources': 'tests/flexalloc_rt_multi_pool_read_write.c',
120 | 'suite': 'core'},
121 | }
122 |
123 | lib_tests = {
124 | 'rt_open_close'
125 | : {'sources': 'tests/flexalloc_rt_lib_open_close.c',
126 | 'suite' : 'lib'}
127 | ,'rt_mkfs'
128 | : {'sources': 'tests/flexalloc_rt_mkfs.c',
129 | 'suite' : 'lib'}
130 | }
131 |
132 | suites = [utils_tests, xnvme_tests, core_tests, lib_tests]
133 | c_test_progs = []
134 |
135 | foreach suite : suites
136 | foreach t_name, opts : suite
137 | assert('sources' in opts, 'error in "' + t_name
138 | + '" test entry must set key \'sources\' to a string or array of strings to C source files')
139 |
140 | t_deps = xnvme_deps
141 | t_sources = opts.get('sources', [])
142 | t_opts = {'timeout': 60, 'is_parallel': false, 'suite' : opts.get('suite', 'Default')} \
143 | + opts.get('test_opts', {})
144 | t_link_args = opts.get('link_args', [])
145 | t_exec = 'test_' + opts.get('executable', t_name)
146 | t_exec_files = opts.get('exec_files', [fla_common_set, flexalloc_testing, libflexalloc_set])
147 | t_data_files = opts.get('data_files', [])
148 |
149 | if t_data_files.length() > 0
150 | foreach data_file : t_data_files
151 | configure_file(input: 'tests/'+data_file, output : data_file, copy : 'true')
152 | endforeach
153 | endif
154 | prog = executable(t_exec, [t_exec_files, t_sources],
155 | dependencies: t_deps, link_args : t_link_args, include_directories : 'src')
156 | test(t_name, prog, kwargs : t_opts)
157 | c_test_progs += prog
158 |
159 | test_data = configuration_data()
160 | test_data.set('flex_alloc_test_executable_name', t_exec)
161 |
162 | endforeach
163 | endforeach
164 |
--------------------------------------------------------------------------------
/meson_options.txt:
--------------------------------------------------------------------------------
1 | option('FLEXALLOC_VERBOSITY', type : 'integer', min : 0, max : 1, value : 0)
2 | option('FLEXALLOC_XNVME_IGNORE_MDTS', type : 'boolean', value : false)
3 | option('fio_source_dir', type: 'string', value: '')
--------------------------------------------------------------------------------
/scripts/_set_env.sh:
--------------------------------------------------------------------------------
1 | if [ -z "${MESON_SOURCE_ROOT}" ] || [ -z "${MESON_BUILD_ROOT}" ]; then
2 | echo "variables MESON_BUILD_ROOT and MESON_SOURCE_ROOT must be set before running this script"
3 | exit 1
4 | fi
5 |
6 | PKG_CONFIG_BIN="${PKG_CONFIG_BIN:-pkg-config}"
7 | if [ -z "${PKG_CONFIG_BIN}" ] ; then
8 | echo "Must have pkg-config installed"
9 | exit 1
10 | fi
11 |
12 | XNVME_LIBDIR=$(${PKG_CONFIG_BIN} --variable=libdir xnvme)
13 | if [ $? -ne 0 ] ; then
14 | echo "You must install xnvme with pkg-config support"
15 | exit 1
16 | fi
17 | if [ -n ${XNVME_LIBDIR} ] ; then
18 | XNVME_LIBDIR="${XNVME_LIBDIR}:"
19 | fi
20 |
21 | # resolv MESON_{SOURCE,BUILD}_ROOT to absolute paths
22 | # this in turn renders all paths derived from these* absolute.
23 | # This, in turn, avoids errors where we change the CWD and execute a command, only to have it
24 | # fail because the (relative) paths to the various commands are then invalid.
25 | # (derived path vars: PY_{SOURCE,BUILD}_ROOT, VIRTUAL_ENV, VENV_{BIN,PY,PIP})
26 | export MESON_SOURCE_ROOT="`readlink -e $MESON_SOURCE_ROOT`"
27 | export MESON_BUILD_ROOT="`readlink -e $MESON_BUILD_ROOT`"
28 |
29 | export PY_SOURCE_ROOT="${MESON_SOURCE_ROOT}/pyflexalloc"
30 | export PY_BUILD_ROOT="${MESON_BUILD_ROOT}/pyflexalloc"
31 | export VIRTUAL_ENV="${PY_SOURCE_ROOT}/.venv"
32 |
33 | PYTHON_BIN="${PYTHON_BIN:-python3}"
34 |
35 | export VENV_BIN="${VIRTUAL_ENV}/bin"
36 | export VENV_PY="${VENV_BIN}/python3"
37 | export VENV_PIP="${VENV_BIN}/pip"
38 |
39 | create_env() {
40 | ${PYTHON_BIN} -m venv "${VIRTUAL_ENV}"
41 | # TODO: move out, install dependencies iff requirements.txt changed
42 | # (possibly nuke and re-create, TBH)
43 | $VENV_PIP install -r "${PY_SOURCE_ROOT}/requirements.txt"
44 | cp "${PY_SOURCE_ROOT}/requirements.txt" "${VIRTUAL_ENV}/requirements.txt"
45 | $VENV_PIP install -r "${PY_SOURCE_ROOT}/requirements.dev.txt"
46 | cp "${PY_SOURCE_ROOT}/requirements.dev.txt" "${VIRTUAL_ENV}/requirements.dev.txt"
47 | }
48 |
49 | # Create virtual environment if:
50 | # * does not exist
51 | # * requirements.txt or requirements.dev.txt have changed since venv was created.
52 | if [ ! -d "${VIRTUAL_ENV}" ]; then
53 | create_env
54 | elif ! diff "${PY_SOURCE_ROOT}/requirements.txt" "${VIRTUAL_ENV}/requirements.txt" 2>&1 >/dev/null; then
55 | rm -rf "${VIRTUAL_ENV}"
56 | create_env
57 | elif ! diff "${PY_SOURCE_ROOT}/requirements.dev.txt" "${VIRTUAL_ENV}/requirements.dev.txt"; then
58 | rm -rf "${VIRTUAL_ENV}"
59 | create_env
60 | fi
61 |
62 | export LD_LIBRARY_PATH="${XNVME_LIBDIR}${MESON_BUILD_ROOT}:$LD_LIBRARY_PATH"
63 | export LIBRARY_PATH="${MESON_BUILD_ROOT}:$LIBRARY_PATH"
64 | export PY_BUILD_DIR="${PY_BUILD_ROOT}/build"
65 | export PY_DIST_DIR="${PY_BUILD_ROOT}/dist"
66 |
--------------------------------------------------------------------------------
/scripts/build_python_bindings.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
4 |
5 | set -x
6 | set -e
7 | export MESON_SOURCE_ROOT="${SCRIPT_DIR}/../"
8 | source "${SCRIPT_DIR}/_set_env.sh"
9 |
10 | cd "${PY_SOURCE_ROOT}"
11 |
12 | # dist dir won't automatically be cleaned.
13 | rm -rf "${PY_DIST_DIR}"
14 |
15 | # only clean build dir now before rebuild
16 | # (using it otherwise for running tests etc.)
17 | rm -rf "${PY_BUILD_DIR}"
18 |
19 | # build source package
20 | ${VENV_PY} setup.py sdist -d "${PY_DIST_DIR}"
21 |
22 | # build binary (wheel) package
23 | ${VENV_PY} setup.py bdist_wheel -d "${PY_DIST_DIR}" -b "${PY_BUILD_DIR}" -k
24 |
--------------------------------------------------------------------------------
/scripts/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
4 |
5 | set -x
6 | set -e
7 | export MESON_SOURCE_ROOT="${SCRIPT_DIR}/../"
8 | source "${SCRIPT_DIR}/_set_env.sh"
9 |
10 | export PYTHONPATH="${PY_BUILD_DIR}"
11 | export PATH="${VENV_BIN}:${PATH}"
12 | exec $*
13 |
--------------------------------------------------------------------------------
/scripts/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
4 |
5 | set -x
6 | set -e
7 |
8 | export MESON_SOURCE_ROOT="${SCRIPT_DIR}/../"
9 | source "${SCRIPT_DIR}/_set_env.sh"
10 |
11 | cd "${PY_SOURCE_ROOT}"
12 |
13 | # modify PYTHONPATH to use build directory copy of the module
14 | # Run all tests in:
15 | # MESON_BUILD_ROOT: contains the wrapped C tests
16 | # (pyflexalloc/)tests: contains the python bindings tests
17 | PYTHONPATH="${PY_BUILD_DIR}" ${VENV_BIN}/pytest "${MESON_BUILD_ROOT}" tests
18 |
--------------------------------------------------------------------------------
/scripts/style.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright (C) 2021 Joel Granados
3 | set -euo pipefail
4 |
5 | root_dir=$(dirname $(realpath $0))
6 | pushd ${root_dir} > /dev/null
7 |
8 | hash astyle 2>/dev/null || { echo >&2 "Please install astyle."; exit 1; }
9 |
10 | FILES=$(find ../{src,tests} -type f -name *.h -o -name *.c )
11 |
12 | astyle --options="../.astylerc" ${FILES} | grep "^Formatted"
13 |
14 | popd > /dev/null
15 | exit 0
16 |
--------------------------------------------------------------------------------
/src/flexalloc.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | // Copyright (C) 2021 Jesper Devantier
3 |
--------------------------------------------------------------------------------
/src/flexalloc.h:
--------------------------------------------------------------------------------
1 | /**
2 | * flexalloc disk structures.
3 | *
4 | * Copyright (C) 2021 Jesper Devantier
5 | * Copyright (C) 2021 Joel Granados
6 | * Copyright (C) 2021 Adam Manzanares
7 | *
8 | * @file flexalloc.h
9 | */
10 | #ifndef __FLEXALLOC_H_
11 | #define __FLEXALLOC_H_
12 | #include
13 | #include
14 | #include "flexalloc_shared.h"
15 | #include "flexalloc_freelist.h"
16 | #include "flexalloc_hash.h"
17 | #include "flexalloc_pool.h"
18 | #include "flexalloc_dp_fdp.h"
19 | #include "flexalloc_dp.h"
20 | #include "flexalloc_cs.h"
21 |
22 | /// flexalloc device handle
23 | struct fla_dev
24 | {
25 | struct xnvme_dev *dev;
26 | char * dev_uri;
27 | struct xnvme_dev *md_dev;
28 | char * md_dev_uri;
29 | uint32_t lb_nbytes;
30 | };
31 |
32 | struct fla_slab_flist_cache
33 | {
34 | /// flexalloc system handle
35 | struct flexalloc *_fs;
36 | /// Head of cache array, entry at offset n corresponds to slab with id n
37 | struct fla_slab_flist_cache_elem *_head;
38 | };
39 |
40 |
41 | struct fla_geo_slab_sgmt
42 | {
43 | // # LBs for all slab segment
44 | uint32_t slab_sgmt_nlb;
45 | };
46 |
47 | /// Describes flexalloc disk geometry
48 | struct fla_geo
49 | {
50 | /// number of LBAs / disk size
51 | uint64_t nlb;
52 | /// LBA width, in bytes, read from disk
53 | uint32_t lb_nbytes;
54 | /// size of slabs in number of LBAs
55 | uint32_t slab_nlb;
56 | /// number of pools to reserve space for
57 | uint32_t npools;
58 | /// number of slabs
59 | uint32_t nslabs;
60 |
61 | /// number of blocks for fixed portion of metadata
62 | uint32_t md_nlb;
63 | /// blocks needed for pool segment
64 | struct fla_geo_pool_sgmt pool_sgmt;
65 | /// blocks needed for slab segment
66 | struct fla_geo_slab_sgmt slab_sgmt;
67 | };
68 |
69 | struct fla_pools
70 | {
71 | /// pointer to pools freelist bit array
72 | freelist_t freelist;
73 | /// htbl data structure
74 | struct fla_htbl htbl;
75 | /// buffer into which to update the htbl data
76 | struct fla_pool_htbl_header *htbl_hdr_buffer;
77 | /// array of pool entries
78 | struct fla_pool_entry *entries;
79 | /// array of pool_entry functions in memeory
80 | struct fla_pool_entry_fnc *entrie_funcs;
81 |
82 | };
83 |
84 | struct fla_slabs
85 | {
86 | struct fla_slab_header *headers;
87 |
88 | uint32_t *fslab_num;
89 | uint32_t *fslab_head;
90 | uint32_t *fslab_tail;
91 | };
92 |
93 | struct fla_dp_fncs
94 | {
95 | int (*init_dp)(struct flexalloc *fs, uint64_t flags);
96 | int (*fini_dp)(struct flexalloc *fs);
97 | int (*prep_dp_ctx)(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx);
98 | };
99 |
100 | struct fla_dp
101 | {
102 | enum fla_dp_t dp_type;
103 | union
104 | {
105 | struct fla_dp_fdp *fla_dp_fdp;
106 | struct fla_dp_zns *fla_dp_zns;
107 | };
108 |
109 | struct fla_dp_fncs fncs;
110 | };
111 |
112 | /// flexalloc handle
113 | struct flexalloc
114 | {
115 | struct fla_dev dev;
116 | unsigned int state;
117 | /// buffer holding all the disk-wide flexalloc metadata
118 | ///
119 | /// NOTE: allocated as an IO buffer.
120 | void *fs_buffer;
121 |
122 | struct fla_slab_flist_cache slab_cache;
123 |
124 | struct fla_geo geo;
125 |
126 | struct fla_super *super;
127 | struct fla_pools pools;
128 | struct fla_slabs slabs;
129 | struct fla_dp fla_dp;
130 | struct fla_cs fla_cs;
131 |
132 | struct fla_fns fns;
133 |
134 | /// pointer for the application to associate additional data
135 | void *user_data;
136 | };
137 |
138 | uint32_t
139 | fla_calc_objs_in_slab(struct flexalloc const * fs, uint32_t const obj_nlb);
140 | #endif // __FLEXALLOC_H_
141 |
--------------------------------------------------------------------------------
/src/flexalloc_bits.c:
--------------------------------------------------------------------------------
1 | #include "flexalloc_bits.h"
2 |
3 | uint32_t
4 | ntz(uint32_t x)
5 | {
6 | /*
7 | * Count number of trailing zero bits.
8 | * Aside from the easy case (x == 0, -> 32 'trailing' zeros), we count the
9 | * trailing bits in decreasing batch sizes (16, 8, 4, 2, 1). Note we shift out
10 | * matched sequence before proceeding to match on the smaller batch.
11 | * We first check the easy case (x == 0), then proceed to count.
12 | */
13 | uint32_t n;
14 | if (x == 0) return 32u;
15 | n = 1u;
16 | if ((x & 0x0000FFFF) == 0)
17 | {
18 | n += 16u;
19 | x >>= 16;
20 | }
21 | if ((x & 0x000000FF) == 0)
22 | {
23 | n += 8u;
24 | x >>= 8;
25 | }
26 | if ((x & 0x0000000F) == 0)
27 | {
28 | n += 4u;
29 | x >>= 4;
30 | }
31 | if ((x & 0x00000003) == 0)
32 | {
33 | n += 2u;
34 | x >>= 2;
35 | }
36 | // count last bit
37 | return n - (x & 1);
38 | }
39 |
40 | uint32_t
41 | count_set_bits(uint32_t val)
42 | {
43 | // Count number of set bits, following algorithm from 'Hacker's Delight' book.
44 | val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
45 | val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
46 | val = (val & 0x0F0F0F0F) + ((val >> 4) & 0x0F0F0F0F);
47 | val = (val & 0x00FF00FF) + ((val >> 8) & 0x00FF00FF);
48 | return (val & 0x0000FFFF) + ((val >> 16) & 0x0000FFFF);
49 | }
50 |
--------------------------------------------------------------------------------
/src/flexalloc_bits.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_BITS_H_
2 | #define __FLEXALLOC_BITS_H_
3 |
4 | #include
5 |
6 | /**
7 | * Count number of trailing zero bits.
8 | *
9 | * Given a 4B/32b value, count the number of trailing zero bits before
10 | * encountering the first 1-bit.
11 | *
12 | * Examples:
13 | * - ntz(0) == 32
14 | * - ntz(~0) == 0
15 | * - ntz(4) == 2
16 | */
17 | uint32_t
18 | ntz(uint32_t x);
19 |
20 | /**
21 | * Count number of set bits.
22 | *
23 | * Given a 4B/32b value, count the number of set bits.
24 | *
25 | * Examples:
26 | * - count_set_bits(0) == 0
27 | * - count_set_bits(~0) == 32
28 | * - count_set_bits(7) == 3
29 | */
30 | uint32_t
31 | count_set_bits(uint32_t val);
32 |
33 | #endif // __FLEXALLOC_BITS_H_
34 |
--------------------------------------------------------------------------------
/src/flexalloc_cli_common.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "flexalloc_cli_common.h"
3 |
4 | #define ARG_BUFLEN 384
5 |
6 | static void
7 | fmt_arg(char *buf, size_t buflen, struct cli_option *o)
8 | {
9 | if (o->base.has_arg)
10 | {
11 | sprintf(buf, "--%s=%s", o->base.name, o->arg_ex);
12 | }
13 | else
14 | {
15 | sprintf(buf, "--%s", o->base.name);
16 | }
17 | }
18 |
19 | void
20 | print_options(struct cli_option *options)
21 | {
22 | char buf[ARG_BUFLEN];
23 | int longest_arg = 0;
24 | struct cli_option *o;
25 |
26 | fprintf(stdout, "Options:\n");
27 |
28 | for(o = options; o->base.name; o++)
29 | {
30 | char *end = buf;
31 | int arg_len;
32 |
33 | fmt_arg(buf, ARG_BUFLEN, o);
34 | while (*end != '\0')
35 | end++;
36 | arg_len = end - buf;
37 |
38 | if (arg_len > longest_arg)
39 | longest_arg = arg_len;
40 | }
41 |
42 | for(o = options; o->base.name; o++)
43 | {
44 | fmt_arg(buf, ARG_BUFLEN, o);
45 | fprintf(stdout, " -%c, %-*s\t%s\n", o->base.val, longest_arg, buf, o->description);
46 | }
47 | fprintf(stdout, "\n\n");
48 | }
49 |
--------------------------------------------------------------------------------
/src/flexalloc_cli_common.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_CLI_COMMON_H_
2 | #define __FLEXALLOC_CLI_COMMON_H_
3 | #include
4 | #include
5 |
6 | struct cli_option
7 | {
8 | struct option base;
9 | char *description;
10 | char *arg_ex;
11 | };
12 |
13 | void
14 | print_options(struct cli_option *options);
15 |
16 | #endif // __FLEXALLOC_CLI_COMMON_H_
17 |
--------------------------------------------------------------------------------
/src/flexalloc_cs.c:
--------------------------------------------------------------------------------
1 | #include "flexalloc_cs.h"
2 | #include "flexalloc_util.h"
3 | #include "flexalloc_cs_cns.h"
4 | #include "flexalloc_cs_zns.h"
5 | #include "flexalloc.h"
6 | #include
7 |
8 | int
9 | fla_cs_type(struct xnvme_dev const *dev, enum fla_cs_t *cs_t)
10 | {
11 | struct xnvme_geo const *geo = xnvme_dev_get_geo(dev);
12 | switch (geo->type)
13 | {
14 | case XNVME_GEO_ZONED:
15 | *cs_t = FLA_CS_ZNS;
16 | return 0;
17 | case XNVME_GEO_CONVENTIONAL:
18 | *cs_t = FLA_CS_CNS;
19 | return 0;
20 | default:
21 | FLA_ERR(1, "Unsuported Command Set %d\n", geo->type);
22 | return 1;
23 | }
24 | }
25 |
26 | int
27 | fla_init_cs(struct flexalloc *fs)
28 | {
29 | int err = fla_cs_type(fs->dev.dev, &fs->fla_cs.cs_t);
30 | if (FLA_ERR(err, "fla_cs_type()"))
31 | return err;
32 |
33 | switch (fs->fla_cs.cs_t)
34 | {
35 | case FLA_CS_CNS:
36 | err = fla_cs_cns_init(fs, 0);
37 | FLA_ERR(err, "fla_cs_cns_init()");
38 | break;
39 | case FLA_CS_ZNS:
40 | err = fla_cs_zns_init(fs, 0);
41 | FLA_ERR(err, "fla_cs_zns_init()");
42 | break;
43 | default:
44 | err = 1;
45 | FLA_ERR(err, "Unsuported Command set %d\n", fs->fla_cs.cs_t);
46 | }
47 |
48 | return err;
49 | }
50 |
51 | int
52 | fla_cs_geo_check(struct xnvme_dev const *dev, struct fla_geo const *geo)
53 | {
54 | uint64_t nzsect = fla_xne_dev_znd_sect(dev);
55 |
56 | enum xnvme_geo_type geo_type = fla_xne_dev_type(dev);
57 | if (geo_type == XNVME_GEO_ZONED && geo->slab_nlb % nzsect)
58 | {
59 | FLA_ERR_PRINTF("Slab size :%"PRIu32" not multiple of zone sz:%"PRIu64"\n",
60 | geo->slab_nlb, nzsect);
61 | return -1;
62 | }
63 |
64 | return 0;
65 | }
66 |
67 | bool
68 | fla_cs_is_type(struct flexalloc const *fs, enum fla_cs_t const cs_t)
69 | {
70 | return fs->fla_cs.cs_t == cs_t;
71 | }
72 |
--------------------------------------------------------------------------------
/src/flexalloc_cs.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_CS_H_
2 | #define __FLEXALLOC_CS_H_
3 | #include
4 | #include
5 | #include
6 | struct flexalloc;
7 | struct fla_geo;
8 | struct fla_pool;
9 | struct fla_object;
10 |
11 | struct fla_cs_fncs
12 | {
13 | int (*init_cs)(struct flexalloc *fs, const uint64_t flags);
14 | int (*fini_cs)(struct flexalloc *fs, const uint64_t flags);
15 | int (*check_pool)(struct flexalloc *fs, uint32_t const obj_nlb);
16 | int (*slab_offset)(struct flexalloc const *fs, uint32_t const slab_id,
17 | uint64_t const slabs_base, uint64_t *slab_offset);
18 | int (*object_seal)(struct flexalloc *fs, struct fla_pool const *pool_handle,
19 | struct fla_object *obj);
20 | int (*object_destroy)(struct flexalloc *fs, struct fla_pool const *pool_handle,
21 | struct fla_object *obj);
22 | };
23 |
24 | enum fla_cs_t
25 | {
26 | FLA_CS_ZNS,
27 | FLA_CS_CNS
28 | };
29 |
30 | struct fla_cs
31 | {
32 | enum fla_cs_t cs_t;
33 | union
34 | {
35 | struct fla_cs_cns *fla_cs_cns;
36 | struct fla_cs_zns *fla_cs_zns;
37 | };
38 |
39 | struct fla_cs_fncs fncs;
40 | };
41 |
42 | int
43 | fla_init_cs(struct flexalloc *fs);
44 |
45 | int
46 | fla_cs_geo_check(struct xnvme_dev const *dev, struct fla_geo const *geo);
47 |
48 | bool
49 | fla_cs_is_type(struct flexalloc const *fs, enum fla_cs_t const cs_t);
50 |
51 | #endif // __FLEXALLOC_CS_H_
52 |
--------------------------------------------------------------------------------
/src/flexalloc_cs_cns.c:
--------------------------------------------------------------------------------
1 | #include "flexalloc_cs_cns.h"
2 |
3 | int
4 | fla_cs_cns_pool_check(struct flexalloc *fs, uint32_t const obj_nlb)
5 | {
6 | return 0;
7 | }
8 |
9 | int
10 | fla_cs_cns_slab_offset(struct flexalloc const *fs, uint32_t const slab_id,
11 | uint64_t const slabs_base, uint64_t *slab_offset)
12 | {
13 | *slab_offset = slabs_base + (slab_id * fs->geo.slab_nlb);
14 | return 0;
15 | }
16 |
17 | int
18 | fla_cs_cns_object_seal(struct flexalloc *fs, struct fla_pool const *pool_handle,
19 | struct fla_object *obj)
20 | {
21 | return 0;
22 | }
23 |
24 | int
25 | fla_cs_cns_object_destroy(struct flexalloc *fs, struct fla_pool const *pool_handle,
26 | struct fla_object *obj)
27 | {
28 | return 0;
29 | }
30 |
31 | int
32 | fla_cs_cns_fini(struct flexalloc *fs, const uint64_t flags)
33 | {
34 | return 0;
35 | }
36 |
37 | int
38 | fla_cs_cns_init(struct flexalloc *fs, const uint64_t flags)
39 | {
40 | fs->fla_cs.fncs.init_cs = fla_cs_cns_init;
41 | fs->fla_cs.fncs.fini_cs = fla_cs_cns_fini;
42 | fs->fla_cs.fncs.check_pool = fla_cs_cns_pool_check;
43 | fs->fla_cs.fncs.slab_offset = fla_cs_cns_slab_offset;
44 | fs->fla_cs.fncs.object_seal = fla_cs_cns_object_seal;
45 | fs->fla_cs.fncs.object_destroy = fla_cs_cns_object_destroy;
46 | return 0;
47 | }
48 |
49 |
50 |
--------------------------------------------------------------------------------
/src/flexalloc_cs_cns.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_CS_CNS_H
2 | #define __FLEXALLOC_CS_CNS_H
3 |
4 | #include "flexalloc.h"
5 | struct fla_cs_cns
6 | {
7 | int dummy;
8 | };
9 |
10 | int fla_cs_cns_pool_check(struct flexalloc *fs, uint32_t const obj_nlb);
11 | int fla_cs_cns_slab_offset(struct flexalloc const *fs, uint32_t const slab_id,
12 | uint64_t const slabs_base, uint64_t *slab_offset);
13 | int fla_cs_cns_object_seal(struct flexalloc *fs, struct fla_pool const *pool_handle,
14 | struct fla_object *obj);
15 | int fla_cs_cns_object_destroy(struct flexalloc *fs, struct fla_pool const *pool_handle,
16 | struct fla_object *obj);
17 | int fla_cs_cns_init(struct flexalloc *fs, const uint64_t flags);
18 | int fla_cs_cns_fini(struct flexalloc *fs, const uint64_t flags);
19 |
20 | #endif // __FLEXALLOC_CS_CNS_H
21 |
22 |
--------------------------------------------------------------------------------
/src/flexalloc_cs_zns.c:
--------------------------------------------------------------------------------
1 | #include "flexalloc_cs_zns.h"
2 | #include "flexalloc_mm.h"
3 |
4 | static int
5 | fla_znd_manage_zones_object_finish(struct flexalloc *fs, struct fla_pool const *pool_handle,
6 | struct fla_object *obj)
7 | {
8 | int err = 0;
9 | uint64_t obj_slba = fla_object_slba(fs, obj, pool_handle);
10 | struct fla_pool_entry *pool_entry = &fs->pools.entries[pool_handle->ndx];
11 | struct fla_pool_entry_fnc const * pool_entry_fnc = (fs->pools.entrie_funcs + pool_handle->ndx);
12 | uint32_t num_fla_objs = pool_entry_fnc->fla_pool_num_fla_objs(pool_entry);
13 |
14 | for (uint32_t fla_obj = 0; fla_obj < num_fla_objs; fla_obj++)
15 | {
16 | err |= fla_xne_dev_znd_send_mgmt(fs->dev.dev,
17 | obj_slba + (fs->fla_cs.fla_cs_zns->nzsect * fla_obj),
18 | XNVME_SPEC_ZND_CMD_MGMT_SEND_FINISH, false);
19 | }
20 | FLA_ERR(err, "fla_xne_dev_znd_send_mgmt_finish()");
21 | return err;
22 | }
23 |
24 | static int
25 | fla_znd_manage_zones_object_reset(struct flexalloc *fs, struct fla_pool const *pool_handle,
26 | struct fla_object * obj)
27 | {
28 | int err = 0;
29 | uint64_t obj_slba = fla_object_slba(fs, obj, pool_handle);
30 | struct fla_pool_entry *pool_entry = &fs->pools.entries[pool_handle->ndx];
31 | struct fla_pool_entry_fnc const * pool_entry_fnc = (fs->pools.entrie_funcs + pool_handle->ndx);
32 | uint32_t num_fla_objs = pool_entry_fnc->fla_pool_num_fla_objs(pool_entry);
33 |
34 | for (uint32_t fla_obj = 0; fla_obj < num_fla_objs; fla_obj++)
35 | {
36 | err |= fla_xne_dev_znd_send_mgmt(fs->dev.dev,
37 | obj_slba + (fs->fla_cs.fla_cs_zns->nzsect * fla_obj),
38 | XNVME_SPEC_ZND_CMD_MGMT_SEND_RESET, false);
39 | }
40 | FLA_ERR(err, "fla_xne_dev_znd_send_mgmt_reset()");
41 | return err;
42 | }
43 |
44 | int
45 | fla_cs_zns_slab_offset(struct flexalloc const *fs, uint32_t const slab_id,
46 | uint64_t const slabs_base, uint64_t *slab_offset)
47 | {
48 | int err = fla_cs_cns_slab_offset(fs, slab_id, slabs_base, slab_offset);
49 | if (FLA_ERR(err, "fls_cs_cns_slab_offset()"))
50 | return err;
51 |
52 | if (*slab_offset % fs->fla_cs.fla_cs_zns->nzsect)
53 | *slab_offset += (*slab_offset % fs->fla_cs.fla_cs_zns->nzsect);
54 |
55 | return 0;
56 | }
57 |
58 | int
59 | fla_cs_zns_init(struct flexalloc *fs, uint64_t const flags)
60 | {
61 | fs->fla_cs.fla_cs_zns = malloc(sizeof(struct fla_cs_zns));
62 | if (FLA_ERR(!fs->fla_cs.fla_cs_zns, "malloc()"))
63 | return -ENOMEM;
64 | fs->fla_cs.fla_cs_zns->nzones = fla_xne_dev_znd_zones(fs->dev.dev);
65 | fs->fla_cs.fla_cs_zns->nzsect = fla_xne_dev_znd_sect(fs->dev.dev);
66 |
67 | fs->fla_cs.fncs.init_cs = fla_cs_zns_init;
68 | fs->fla_cs.fncs.fini_cs = fla_cs_zns_fini;
69 | fs->fla_cs.fncs.check_pool = fla_cs_zns_pool_check;
70 | fs->fla_cs.fncs.slab_offset = fla_cs_zns_slab_offset;
71 | fs->fla_cs.fncs.object_seal = fla_cs_zns_object_seal;
72 | fs->fla_cs.fncs.object_destroy = fla_cs_zns_object_destroy;
73 | return 0;
74 | }
75 |
76 | int
77 | fla_cs_zns_fini(struct flexalloc *fs, uint64_t const flags)
78 | {
79 | free(fs->fla_cs.fla_cs_zns);
80 | return 0;
81 | }
82 |
83 | int
84 | fla_cs_zns_pool_check(struct flexalloc *fs, uint32_t const obj_nlb)
85 | {
86 | if (FLA_ERR(obj_nlb != fs->fla_cs.fla_cs_zns->nzsect,
87 | "object size != formated zone size"))
88 | return 1;
89 | return 0;
90 | }
91 |
92 | int
93 | fla_cs_zns_object_seal(struct flexalloc *fs, struct fla_pool const *pool_handle,
94 | struct fla_object *obj)
95 | {
96 | return fla_znd_manage_zones_object_finish(fs, pool_handle, obj);
97 | }
98 |
99 | int
100 | fla_cs_zns_object_destroy(struct flexalloc *fs, struct fla_pool const *pool_handle,
101 | struct fla_object *obj)
102 | {
103 | return fla_znd_manage_zones_object_reset(fs, pool_handle, obj);
104 | }
105 |
--------------------------------------------------------------------------------
/src/flexalloc_cs_zns.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_CS_ZNS_H
2 | #define __FLEXALLOC_CS_ZNS_H
3 |
4 | #include "flexalloc.h"
5 | #include "flexalloc_util.h"
6 | #include "flexalloc_cs_cns.h"
7 |
8 | struct fla_cs_zns
9 | {
10 | /// Number of zones
11 | uint32_t nzones;
12 | /// Number of sectors in zone
13 | uint64_t nzsect;
14 | };
15 |
16 | int fla_cs_zns_init(struct flexalloc *fs, uint64_t const flags);
17 | int fla_cs_zns_fini(struct flexalloc *fs, uint64_t const flags);
18 | int fla_cs_zns_pool_check(struct flexalloc *fs, uint32_t const obj_nlb);
19 | int fla_cs_zns_slab_offset(struct flexalloc const *fs, uint32_t const slab_id,
20 | uint64_t const slabs_base, uint64_t *slab_offset);
21 | int fla_cs_zns_object_seal(struct flexalloc *fs, struct fla_pool const *pool_handle,
22 | struct fla_object *obj);
23 | int fla_cs_zns_object_destroy(struct flexalloc *fs, struct fla_pool const *pool_handle,
24 | struct fla_object *obj);
25 |
26 | #endif // __FLEXALLOC_CS_ZNS_H
27 |
--------------------------------------------------------------------------------
/src/flexalloc_daemon.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Jesper Devantier
2 | #include "flexalloc_daemon_base.h"
3 | #include "src/flexalloc_cli_common.h"
4 | #include "src/flexalloc_util.h"
5 | #include "libflexalloc.h"
6 | #include
7 | #include
8 | #include
9 | #include
10 |
11 | #define MAX_CLIENTS 110
12 | #define MAX_CONN_QUEUE 100
13 |
14 | volatile sig_atomic_t keep_running = 1;
15 |
16 | // TODO: refactor fla_open() to do what fla_open_common does (not allocate)
17 | int
18 | fla_open_common(char const *dev_uri, struct flexalloc *fs);
19 |
20 | // TODO: install other signal handler for next CTRL-C
21 | static void
22 | sigint_handler(int _)
23 | {
24 | (void)_;
25 | keep_running = 0;
26 | fprintf(stderr, "SIGINT caught, gracefully shutting down daemon...\n");
27 | }
28 |
29 | int
30 | msg_handler(struct fla_daemon *d, int client_fd, struct fla_msg const * const recv,
31 | struct fla_msg const * const send)
32 | {
33 | FLA_DBG_PRINTF("received msg hdr: {cmd: %"PRIu32", len: %"PRIu32"}\n", recv->hdr->cmd,
34 | recv->hdr->len);
35 | switch (recv->hdr->cmd)
36 | {
37 | case FLA_MSG_CMD_OBJECT_OPEN:
38 | if (FLA_ERR(fla_daemon_object_open_rsp(d, client_fd, recv, send), "fla_daemon_object_open()"))
39 | return -1;
40 | break;
41 | case FLA_MSG_CMD_OBJECT_CREATE:
42 | if (FLA_ERR(fla_daemon_object_create_rsp(d, client_fd, recv, send), "fla_daemon_object_create()"))
43 | return -1;
44 | break;
45 | case FLA_MSG_CMD_OBJECT_DESTROY:
46 | if (FLA_ERR(fla_daemon_object_destroy_rsp(d, client_fd, recv, send), "fla_daemon_object_destroy()"))
47 | return -1;
48 | break;
49 | case FLA_MSG_CMD_POOL_OPEN:
50 | if (FLA_ERR(fla_daemon_pool_open_rsp(d, client_fd, recv, send), "fla_daemon_pool_open()"))
51 | return -1;
52 | break;
53 | case FLA_MSG_CMD_POOL_CREATE:
54 | if (FLA_ERR(fla_daemon_pool_create_rsp(d, client_fd, recv, send), "fla_daemon_pool_create()"))
55 | return -1;
56 | break;
57 | case FLA_MSG_CMD_POOL_DESTROY:
58 | if (FLA_ERR(fla_daemon_pool_destroy_rsp(d, client_fd, recv, send), "fla_daemon_pool_destroy()"))
59 | return -1;
60 | break;
61 | case FLA_MSG_CMD_SYNC:
62 | if (FLA_ERR(fla_daemon_sync_rsp(d, client_fd, recv, send), "fla_daemon_sync_rsp()"))
63 | return -1;
64 | break;
65 | case FLA_MSG_CMD_SYNC_NO_RSPS:
66 | if (FLA_ERR(fla_daemon_sync_rsp(d, client_fd, recv, NULL), "fla_daemon_sync_rsp()"))
67 | return -1;
68 | break;
69 | case FLA_MSG_CMD_POOL_GET_ROOT_OBJECT:
70 | if (FLA_ERR(fla_daemon_pool_get_root_object_rsp(d, client_fd, recv, send),
71 | "fla_daemon_pool_get_root_object()"))
72 | return -1;
73 | break;
74 | case FLA_MSG_CMD_POOL_SET_ROOT_OBJECT:
75 | if (FLA_ERR(fla_daemon_pool_set_root_object_rsp(d, client_fd, recv, send),
76 | "fla_daemon_pool_set_root_object()"))
77 | return -1;
78 | break;
79 | case FLA_MSG_CMD_IDENTIFY:
80 | if (FLA_ERR(fla_daemon_identify_rsp(d, client_fd, recv, send), "fla_daemon_identify_rsp()"))
81 | return -1;
82 | break;
83 | case FLA_MSG_CMD_INIT_INFO:
84 | if (FLA_ERR(fla_daemon_fs_init_rsp(d, client_fd, recv, send), "fla_daemon_init_info()"))
85 | return -1;
86 | break;
87 | default:
88 | FLA_ERR_PRINTF("socket %d: malformed message, msg cmd %"PRIu32"\n", client_fd, recv->hdr->cmd);
89 | return -1;
90 | }
91 | return 0;
92 | }
93 |
94 | static struct cli_option options[] =
95 | {
96 | {
97 | .base = {"socket", required_argument, NULL, 's'},
98 | .description = "path of where to create the UNIX socket",
99 | .arg_ex = "PATH"
100 | },
101 | {
102 | .base = {"device", required_argument, NULL, 'd'},
103 | .description = "path of device containing the FlexAlloc system",
104 | .arg_ex = "DEVICE"
105 | },
106 | {
107 | .base = {"md_device", optional_argument, NULL, 'm'},
108 | .description = "path to metadata device\n",
109 | .arg_ex = "DEVICE"
110 | },
111 | {
112 | .base = {NULL, 0, NULL, 0}
113 | }
114 | };
115 |
116 | void
117 | fla_daemon_usage()
118 | {
119 | fprintf(stdout, "Usage: flexalloc_daemon [options]\n\n");
120 | fprintf(stdout, "Provide mediated access to a FlexAlloc system via a daemon\n\n");
121 | print_options(options);
122 | }
123 |
124 | int
125 | main(int argc, char **argv)
126 | {
127 | int err = 0;
128 | int c;
129 | int opt_idx = 0;
130 | char *socket_path = NULL;
131 | char *device = NULL;
132 | char *md_device = NULL;
133 | struct fla_daemon daemon;
134 | int const n_opts = sizeof(options)/sizeof(struct cli_option);
135 | struct option long_options[n_opts];
136 | struct fla_open_opts fla_oopts = {0};
137 |
138 | for (int i=0; ifla_dp.dp_type);
13 | if (FLA_ERR(err, "fla_dp_type()"))
14 | return err;
15 |
16 | switch (fs->fla_dp.dp_type)
17 | {
18 | case FLA_DP_FDP:
19 | err = fla_dp_fdp_init(fs, 0);
20 | break;
21 | case FLA_DP_DEFAULT:
22 | err = fla_dp_noop_init(fs, 0);
23 | FLA_ERR(err, "fla_dp_noop_init()");
24 |
25 | break;
26 | case FLA_DP_ZNS:
27 | default:
28 | err = 1;
29 | FLA_ERR(err, "Invalid data placement type.");
30 | }
31 |
32 | return err;
33 | }
34 |
35 | static bool
36 | fla_dp_fdp_supported(struct xnvme_spec_idfy_ctrlr *idfy_ctrl)
37 | {
38 | return idfy_ctrl->ctratt.val & (1 << 16);
39 | }
40 |
41 | static bool
42 | fla_dp_fdp_enabled(uint32_t const dw0)
43 | {
44 | return dw0 & (1 << 0);
45 | }
46 |
47 | int
48 | fla_dp_type(struct flexalloc *fs, enum fla_dp_t *dp_t)
49 | {
50 | int err;
51 | struct xnvme_spec_idfy idfy_ctrl = {0};
52 | const struct xnvme_spec_idfy_ns * idfy_ns = NULL;
53 | uint32_t dw0;
54 |
55 | err = fla_xne_ctrl_idfy(fs->dev.dev, &idfy_ctrl);
56 | if (FLA_ERR(err, "fla_xne_ctrl_idfy()"))
57 | return err;
58 |
59 | if (fla_dp_fdp_supported(&idfy_ctrl.ctrlr))
60 | {
61 | idfy_ns = xnvme_dev_get_ns(fs->dev.dev);
62 | if ((err = FLA_ERR(idfy_ns == NULL, "xnvme_dev_get_ns_css()")))
63 | return err;
64 | err = fla_xne_feat_idfy(fs->dev.dev, idfy_ns->endgid, &dw0);
65 | if (FLA_ERR(err, "fla_xne_feat_idfy()"))
66 | return err;
67 | if(fla_dp_fdp_enabled(dw0))
68 | {
69 | *dp_t = FLA_DP_FDP;
70 | return 0;
71 | }
72 | }
73 |
74 | /* If no placement detected its default */
75 | *dp_t = FLA_DP_DEFAULT;
76 | return 0;
77 | }
78 |
--------------------------------------------------------------------------------
/src/flexalloc_dp.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_DP_H
2 | #define __FLEXALLOC_DP_H
3 | #include "flexalloc_xnvme_env.h"
4 |
5 | struct flexalloc;
6 |
7 | enum fla_dp_t
8 | {
9 | FLA_DP_FDP,
10 | FLA_DP_ZNS,
11 | FLA_DP_DEFAULT
12 | };
13 |
14 | int fla_dp_type(struct flexalloc *fs, enum fla_dp_t *dp_t);
15 | int fla_init_dp(struct flexalloc *fs);
16 |
17 | #endif // __FLEXALLOC_DP_H
18 |
--------------------------------------------------------------------------------
/src/flexalloc_dp_fdp.c:
--------------------------------------------------------------------------------
1 | #include "flexalloc_dp_fdp.h"
2 | #include "flexalloc_util.h"
3 | #include "flexalloc.h"
4 | #include "flexalloc_xnvme_env.h"
5 | #include "flexalloc_freelist.h"
6 | #include
7 | #include
8 |
9 | int
10 | fla_fdp_get_placement_identifier(uint32_t *pid, struct fla_dp_fdp *fdp)
11 | {
12 | int err = fla_flist_entries_alloc(fdp->free_pids, 1);
13 | if(FLA_ERR(err < 0, "fla_flist_entries_alloc()"))
14 | return err;
15 |
16 | //pid = fdp->pids + err;
17 |
18 | return err;
19 | }
20 |
21 | int
22 | fla_fdp_get_pid_n(struct xnvme_dev * dev, uint32_t *pid, const int npid)
23 | {
24 | int err;
25 | uint32_t *pids;
26 |
27 | pids = fla_xne_alloc_buf(dev, sizeof(uint32_t) * npid);
28 | if (FLA_ERR(!pids, "fla_xne_alloc_buf()"))
29 | return -errno;
30 |
31 | err = fla_xne_get_usable_pids(dev, npid, &pids);
32 | if (FLA_ERR(err, "fla_xne_get_usable_pids()"))
33 | return err;
34 |
35 | for (int i = 0; i < npid; ++i)
36 | *(pid + i) = *(pids + i);
37 |
38 | fla_xne_free_buf(dev, pids);
39 | return 0;
40 | }
41 |
42 | static int
43 | fla_fdp_onwrite_md_prep_ctx(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx)
44 | {
45 | struct fla_dp_fdp* fla_dp_fdp = xne_io->fla_dp->fla_dp_fdp;
46 | ctx->cmd.nvm.cdw13.dspec = fla_dp_fdp->md_pid;
47 | return 0;
48 | }
49 |
50 | static int
51 | fla_fdp_onwrite_prep_ctx(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx)
52 | {
53 | uint32_t pid;
54 | int err = fla_fdp_get_pid_n(xne_io->dev, &pid, 1);
55 | if (FLA_ERR(err, "fla_fdp_get_pid_n()"))
56 | return err;
57 | ctx->cmd.nvm.cdw13.dspec = pid;
58 | ctx->cmd.nvm.dtype = 2;
59 |
60 | return 0;
61 | }
62 |
63 | static int
64 | fla_fdp_get_id(const uint32_t ndx, va_list ag)
65 | {
66 | uint32_t *pid = va_arg(ag, uint32_t*);
67 | uint32_t *fla_id = va_arg(ag, uint32_t*);
68 | struct fla_dp_fdp * fdp = va_arg(ag, struct fla_dp_fdp*);
69 |
70 | if(*fla_id == (fdp->pids + ndx)->fla_id)
71 | {
72 | *pid = (fdp->pids + ndx)->pid;
73 | return 1;
74 | }
75 |
76 | return 0;
77 | }
78 |
79 | static int
80 | fla_fdp_cached_prep_ctx(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx)
81 | {
82 | struct fla_dp_fdp* fdp = xne_io->fla_dp->fla_dp_fdp;
83 | struct fla_dp_fdp_pid_to_id *pid_to_id;
84 | uint32_t fla_id, pid, found = 0;
85 | int ret;
86 |
87 | switch (xne_io->io_type)
88 | {
89 | case FLA_IO_DATA_READ:
90 | case FLA_IO_MD_READ:
91 | return 0;
92 | case FLA_IO_MD_WRITE:
93 | return fla_fdp_onwrite_md_prep_ctx(xne_io, ctx);
94 | case FLA_IO_DATA_WRITE:
95 | switch (fdp->ctx_set)
96 | {
97 | case FLA_DP_FDP_ON_SLAB:
98 | fla_id = xne_io->obj_handle->slab_id;
99 | break;
100 | case FLA_DP_FDP_ON_POOL:
101 | fla_id = xne_io->pool_handle->ndx;
102 | break;
103 | case FLA_DP_FDP_ON_OBJECT:
104 | fla_id = xne_io->obj_handle->entry_ndx;
105 | break;
106 | case FLA_DP_FDP_ON_WRITE:
107 | /* ctx should be handled by fla_fdp_onwrite_prep_ctx */
108 | default:
109 | FLA_ERR(1, "fla_fdp_cached_prep_ctx()");
110 | return -EINVAL;
111 | }
112 | }
113 |
114 | ret = fla_flist_search_wfunc(fdp->free_pids, FLA_FLIST_SEARCH_EXEC_FIRST,
115 | &found, fla_fdp_get_id, &pid, &fla_id, fdp);
116 | if (FLA_ERR(ret, "fla_flist_search_wfunc()"))
117 | return ret;
118 |
119 | if (found == 1)
120 | ctx->cmd.nvm.cdw13.dspec = pid;
121 |
122 | else if (found == 0)
123 | {
124 | ret = fla_flist_entries_alloc(fdp->free_pids, 1);
125 | if (FLA_ERR(ret < 0, "fla_fdp_cached_prep_ctx()"))
126 | return -ENOSPC;
127 |
128 | pid_to_id = fdp->pids + ret;
129 | pid_to_id->fla_id = fla_id;
130 |
131 | ret = fla_fdp_get_pid_n(xne_io->dev, &pid_to_id->pid, 1);
132 | if (FLA_ERR(ret, "fla_fdp_get_pid_n()"))
133 | return ret;
134 | }
135 | ctx->cmd.nvm.dtype = 2;
136 | return 0;
137 | }
138 |
139 | int
140 | fla_noop_prep_ctx(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx)
141 | {
142 | return 0;
143 | }
144 |
145 | static void
146 | fla_fdp_set_prep_ctx(struct flexalloc const *fs,
147 | int (**prep_ctx)(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx))
148 | {
149 | struct fla_dp_fdp* fla_dp_fdp = fs->fla_dp.fla_dp_fdp;
150 |
151 | switch (fla_dp_fdp->ctx_set)
152 | {
153 | case FLA_DP_FDP_ON_SLAB:
154 | case FLA_DP_FDP_ON_POOL:
155 | case FLA_DP_FDP_ON_OBJECT:
156 | *prep_ctx = fla_fdp_cached_prep_ctx;
157 | break;
158 | case FLA_DP_FDP_ON_WRITE:
159 | *prep_ctx = fla_fdp_onwrite_prep_ctx;
160 | break;
161 | default:
162 | *prep_ctx = fla_noop_prep_ctx;
163 | }
164 | }
165 |
166 | static uint16_t
167 | fla_fdp_get_max_pids()
168 | {
169 | return 60;
170 | }
171 |
172 | static int
173 | fla_fdp_init_pid_to_id(struct flexalloc const *fs)
174 | {
175 | int err;
176 | struct fla_dp_fdp *fla_dp_fdp = fs->fla_dp.fla_dp_fdp;
177 |
178 | uint16_t pid_to_id_cache_size;
179 | switch (fla_dp_fdp->ctx_set)
180 | {
181 | case FLA_DP_FDP_ON_SLAB:
182 | pid_to_id_cache_size = fla_min(fs->geo.nslabs, fla_fdp_get_max_pids());
183 | break;
184 | case FLA_DP_FDP_ON_POOL:
185 | pid_to_id_cache_size = fla_min(fs->geo.npools, fla_fdp_get_max_pids());
186 | break;
187 | case FLA_DP_FDP_ON_OBJECT:
188 | pid_to_id_cache_size = fla_fdp_get_max_pids();
189 | break;
190 | case FLA_DP_FDP_ON_WRITE:
191 | /* Fall through: We look for a new pid every time we write */
192 | default:
193 | return 0;
194 | }
195 |
196 | fla_dp_fdp->pids
197 | = malloc(sizeof(struct fla_dp_fdp_pid_to_id) * pid_to_id_cache_size);
198 | if (FLA_ERR(!fs->fla_dp.fla_dp_fdp->pids, "malloc()"))
199 | return -ENOMEM;
200 |
201 | if ((err = FLA_ERR(fla_flist_new(pid_to_id_cache_size, &fla_dp_fdp->free_pids),
202 | "fla_flist_new()")))
203 | return err;
204 |
205 | return 0;
206 | }
207 |
208 | static int
209 | fla_fdp_init_md_pid(struct flexalloc const *fs)
210 | {
211 | fs->fla_dp.fla_dp_fdp->md_pid = 0;
212 | return 0;
213 | }
214 |
215 | int
216 | fla_dp_fdp_init(struct flexalloc *fs, uint64_t flags)
217 | {
218 | int err;
219 | fs->fla_dp.dp_type = FLA_DP_FDP;
220 | fs->fla_dp.fla_dp_fdp = malloc(sizeof(struct fla_dp_fdp));
221 | if (FLA_ERR(!fs->fla_dp.fla_dp_fdp, "malloc()"))
222 | return -ENOMEM;
223 |
224 | fs->fla_dp.fla_dp_fdp->ctx_set = FLA_DP_FDP_ON_WRITE;
225 | fs->fla_dp.fncs.init_dp = fla_dp_fdp_init;
226 | fs->fla_dp.fncs.fini_dp = fla_dp_fdp_fini;
227 |
228 | fla_fdp_set_prep_ctx(fs, &fs->fla_dp.fncs.prep_dp_ctx);
229 |
230 | if ((err = FLA_ERR(fla_fdp_init_md_pid(fs), "fla_fdp_init_md_pid()")))
231 | return err;
232 |
233 | if((err = FLA_ERR(fla_fdp_init_pid_to_id(fs), "fla_fdp_init_pid_to_id()")))
234 | return err;
235 |
236 | return 0;
237 | }
238 |
239 | int
240 | fla_dp_fdp_fini(struct flexalloc *fs)
241 | {
242 | free(fs->fla_dp.fla_dp_fdp);
243 | return 0;
244 | }
245 |
246 |
247 |
--------------------------------------------------------------------------------
/src/flexalloc_dp_fdp.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_FDP_H
2 | #define __FLEXALLOC_FDP_H
3 | #include
4 | #include "flexalloc_freelist.h"
5 | #include "flexalloc_shared.h"
6 | #include "flexalloc_xnvme_env.h"
7 |
8 | enum fla_dp_fdp_t
9 | {
10 | FLA_DP_FDP_ON_SLAB,
11 | FLA_DP_FDP_ON_POOL,
12 | FLA_DP_FDP_ON_OBJECT,
13 | FLA_DP_FDP_ON_WRITE
14 | };
15 |
16 | struct fla_dp_fdp_pid_to_id
17 | {
18 | uint32_t pid;
19 | uint32_t fla_id;
20 | };
21 |
22 | struct fla_dp_fdp
23 | {
24 | enum fla_dp_fdp_t ctx_set;
25 | struct fla_dp_fdp_pid_to_id *pids;
26 | freelist_t free_pids;
27 | uint32_t md_pid;
28 | };
29 |
30 | int fla_dp_fdp_init(struct flexalloc *fs, const uint64_t flags);
31 | int fla_dp_fdp_fini(struct flexalloc *fs);
32 |
33 | #endif // __FLEXALLOC_FDP_H
34 |
--------------------------------------------------------------------------------
/src/flexalloc_dp_noop.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_DP_NOOP_H
2 | #define __FLEXALLOC_DP_NOOP_H
3 |
4 | #include "flexalloc.h"
5 | #include "flexalloc_xnvme_env.h"
6 |
7 | int
8 | fla_dp_noop_prep_ctx(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx)
9 | {
10 | return 0;
11 | }
12 |
13 | int
14 | fla_dp_noop_fini(struct flexalloc *fs)
15 | {
16 | return 0;
17 | }
18 |
19 | int
20 | fla_dp_noop_init(struct flexalloc *fs, const uint64_t flags)
21 | {
22 | fs->fla_dp.fncs.init_dp = fla_dp_noop_init;
23 | fs->fla_dp.fncs.fini_dp = fla_dp_noop_fini;
24 | fs->fla_dp.fncs.prep_dp_ctx = fla_dp_noop_prep_ctx;
25 |
26 | return 0;
27 | }
28 | #endif // __FLEXALLOC_DP_NOOP_H
29 |
--------------------------------------------------------------------------------
/src/flexalloc_freelist.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "flexalloc_freelist.h"
5 | #include "flexalloc_bits.h"
6 | #include "flexalloc_util.h"
7 |
8 | size_t
9 | fla_flist_size(uint32_t len)
10 | {
11 | // the leading uint32_t element keeps the length of the freelist
12 | return sizeof(uint32_t) * (1 + FLA_FREELIST_U32_ELEMS(len));
13 | }
14 |
15 | uint32_t
16 | fla_flist_len(freelist_t flist)
17 | {
18 | return *flist;
19 | }
20 |
21 | uint32_t
22 | fla_flist_num_reserved(freelist_t flist)
23 | {
24 | uint32_t *ptr = flist + 1;
25 | uint32_t *end = flist + fla_flist_size(*flist) / sizeof(uint32_t);
26 | uint32_t free_entries = 0;
27 |
28 | for (; ptr != end; ptr++)
29 | {
30 | free_entries += count_set_bits(*ptr);
31 | }
32 | return *flist - free_entries;
33 | }
34 |
35 | void
36 | fla_flist_reset(freelist_t flist)
37 | {
38 | uint32_t *len = flist;
39 | uint32_t elems = FLA_FREELIST_U32_ELEMS(*flist);
40 | uint32_t *elem = flist + 1;
41 | uint32_t *elem_last = elem + elems - 1;
42 | uint32_t unused_spots;
43 |
44 | /*
45 | * Initialize freelist by writing 1's for every empty space.
46 | * Because the freelist is backed by an array of uint32_t values, it is likely
47 | * that the sum of available bits (capacity) exceed the desired size of the
48 | * freelist.
49 | * That is why we compute the number of 'unused_spots' and ensure these bits
50 | * start as 0 (reserved).
51 | */
52 | for (; elem != elem_last; elem++)
53 | {
54 | *elem = ~0;
55 | }
56 | unused_spots = elems * sizeof(uint32_t) * 8 - *len;
57 | *elem = (~0u) >> unused_spots;
58 | }
59 |
60 | void
61 | fla_flist_init(freelist_t flist, uint32_t len)
62 | {
63 | flist[0] = len;
64 | fla_flist_reset(flist);
65 | }
66 |
67 | int
68 | fla_flist_new(uint32_t len, freelist_t *flist)
69 | {
70 | *flist = malloc(fla_flist_size(len));
71 | if (FLA_ERR(!(*flist), "malloc()"))
72 | {
73 | return -ENOMEM;
74 | }
75 |
76 | fla_flist_init(*flist, len);
77 |
78 | return 0;
79 | }
80 |
81 | void
82 | fla_flist_free(freelist_t flist)
83 | {
84 | free(flist);
85 | }
86 |
87 | freelist_t
88 | fla_flist_load(void *data)
89 | {
90 | return (freelist_t)data;
91 | }
92 |
93 | // find and take a spot in the freelist, returning its index
94 | int
95 | fla_flist_entry_alloc(freelist_t flist, uint32_t elems)
96 | {
97 | uint32_t *elem;
98 | uint32_t wndx = 0;
99 |
100 | for (uint32_t i = 0; i < elems; i++)
101 | {
102 | elem = &flist[1 + i];
103 | // fully booked
104 | if (*elem == 0)
105 | continue;
106 |
107 | // isolate rightmost 1-bit, store in `wndx` that we may calculate the entry's
108 | // index, then set it in the freelist.
109 | wndx = *elem & (- *elem);
110 | *elem &= ~wndx;
111 | return i * sizeof(uint32_t) * 8 + ntz(wndx);
112 | }
113 | return -1;
114 | }
115 |
116 | int
117 | fla_flist_entries_alloc(freelist_t flist, unsigned int num)
118 | {
119 | uint32_t elems = FLA_FREELIST_U32_ELEMS(*flist);
120 | uint32_t alloc_count;
121 | int alloc_ret;
122 |
123 | alloc_ret = fla_flist_entry_alloc(flist, elems);
124 |
125 | if (num == 1)
126 | return alloc_ret;
127 |
128 | for(alloc_count = 1; alloc_count != num; ++alloc_count)
129 | {
130 | if(fla_flist_entry_alloc(flist, elems) == -1)
131 | return -1;
132 | }
133 |
134 | return alloc_ret;
135 | }
136 |
137 | // release a taken element from freelist
138 | int
139 | fla_flist_entry_free(freelist_t flist, uint32_t ndx)
140 | {
141 | uint32_t *elem = flist + 1;
142 | if (ndx > *flist)
143 | return -1;
144 |
145 | while (ndx >= sizeof(uint32_t) * CHAR_BIT)
146 | {
147 | elem++;
148 | ndx -= sizeof(uint32_t) * CHAR_BIT;
149 | }
150 | *elem |= 1 << ndx;
151 | return 0;
152 | }
153 |
154 | int
155 | fla_flist_entries_free(freelist_t flist, uint32_t ndx, unsigned int num)
156 | {
157 | for(uint32_t i = 0 ; i < num ; ++i)
158 | {
159 | if(fla_flist_entry_free(flist, ndx+i))
160 | return -1;
161 | }
162 | return 0;
163 | }
164 |
165 | int
166 | fla_flist_search_wfunc(freelist_t flist, uint64_t flags, uint32_t *found,
167 | int(*f)(const uint32_t, va_list), ...)
168 | {
169 | uint32_t elem_cpy, wndx = 0, ret;
170 | uint32_t u32_elems = FLA_FREELIST_U32_ELEMS(*flist);
171 | uint32_t len = *flist;
172 | va_list ap;
173 |
174 | if ((flags & FLA_FLIST_SEARCH_EXEC_FIRST) == 0)
175 | return -EINVAL;
176 |
177 | *found = 0;
178 |
179 | for (uint32_t u32_elem = 0 ; u32_elem < u32_elems; u32_elem++)
180 | {
181 | elem_cpy = flist[1 + u32_elem];
182 |
183 | /*
184 | * There is a special case when we reach the end element where all the
185 | * unused bits are NOT 1s but 0s. Here we need to set the unused 0s to ones
186 | * so our stopping condition is valid.
187 | */
188 | if (u32_elem + 1 == u32_elems)
189 | {
190 | uint32_t used_spots = len % 32;
191 | elem_cpy = elem_cpy | (~0 << used_spots);
192 | }
193 |
194 | // All free
195 | if (elem_cpy == 0xFFFFFFFF)
196 | continue;
197 |
198 | /* For all the zero bits: isolate and exec f on the index. */
199 | for (uint8_t j = 0; j < 32 && elem_cpy != 0xFFFFFFFF; ++j)
200 | {
201 | wndx = ~elem_cpy & (elem_cpy + 1);
202 |
203 | va_start(ap, f);
204 | ret = f(u32_elem * sizeof(uint32_t) * 8 + ntz(wndx), ap);
205 | va_end(ap);
206 |
207 | switch (ret)
208 | {
209 | case 1:
210 | *found += ret;
211 | //fall through
212 | case 0:
213 | elem_cpy |= wndx;
214 | continue;
215 | default:
216 | return ret;
217 | }
218 | }
219 | }
220 | return 0;
221 | }
222 |
--------------------------------------------------------------------------------
/src/flexalloc_freelist.h:
--------------------------------------------------------------------------------
1 | #ifndef __FLEXALLOC_FREELIST_H_
2 | #define __FLEXALLOC_FREELIST_H_
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | typedef uint32_t * freelist_t;
9 |
10 | #define FLA_FREELIST_U32_ELEMS(b) FLA_CEIL_DIV(b, sizeof(uint32_t) * 8)
11 |
12 | /**
13 | * Return size, in bytes, needed to support a freelist of `len` entries.
14 | *
15 | * @param len the number of entries to support.
16 | * @return the size required for the freelist, in bytes.
17 | */
18 | size_t
19 | fla_flist_size(uint32_t len);
20 |
21 | /**
22 | * Return length of freelist.
23 | *
24 | * Returns the length of the freelist, defined as the number of slots
25 | * which can be reserved or taken.
26 | *
27 | * @param flist freelist handle
28 | * @return length of the freelist
29 | */
30 | uint32_t
31 | fla_flist_len(freelist_t flist);
32 |
33 | /**
34 | * Return number of reserved entries in freelist.
35 | *
36 | * Return number of entries already reserved by the freelist.
37 | * Number of free entries can be determined by subtracting this value from
38 | * fla_flist_len().
39 | *
40 | * @param flist freelist handle
41 | * @return number of entries already reserved.
42 | */
43 | uint32_t
44 | fla_flist_num_reserved(freelist_t flist);
45 |
46 | /**
47 | * Reset freelist, setting all `len` entries to free.
48 | *
49 | * Resets the freelist by freeing every one of the `len` entries.
50 | *
51 | * @param flist freelist handle
52 | */
53 | void
54 | fla_flist_reset(freelist_t flist);
55 |
56 | /**
57 | * Initialize the freelist.
58 | *
59 | * Use this routine routine in case you allocate a buffer by other means
60 | * (use fla_flist_size() to determine the required size) and wish to initialize
61 | * the freelist.
62 | * Note: if you wish to re-use an existing freelist, calling fla_flist_reset()
63 | * will suffice.
64 | *
65 | * @param flist freelist handle
66 | * @param len length of the freelist. This should be the same length as provided
67 | * when using fla_flist_size() to calculate the required buffer size.
68 | */
69 | void
70 | fla_flist_init(freelist_t flist, uint32_t len);
71 |
72 | /**
73 | * Create a new freelist.
74 | *
75 | * Allocates and initializes a new freelist.
76 | * Note that you can accomplish the same by using fla_flist_size() to calculate
77 | * the required buffer size for a list of `len` entries. Allocate the buffer
78 | * by whatever means you wish and call fla_flist_init() to initialize the
79 | * freelist before use.
80 | *
81 | * @param len length of the freelist to create
82 | * @param flist pointer to a freelist handle
83 | * @return On success, 0 is returned and flist points to an initialized freelist.
84 | * Otherwise, non-zero is returned and flist is uninitialized.
85 | */
86 | int
87 | fla_flist_new(uint32_t len, freelist_t *flist);
88 |
89 | /**
90 | * Free buffer backing the freelist.
91 | *
92 | * NOTE: *only* use this if freelist is allocated using fla_flist_new().
93 | *
94 | * @param flist freelist handle
95 | */
96 | void
97 | fla_flist_free(freelist_t flist);
98 |
99 | /**
100 | * Treat memory at data as an initialized freelist.
101 | *
102 | * Use data pointed at by `data` as a freelist. This expects the buffer to
103 | * be of sufficient length and its contents to be a freelist. That is, at
104 | * some point it was initialized with fla_freelist_init() and since then
105 | * only operated on using the fla_flist_* functions.
106 | *
107 | * NOTE: no additional allocations are made and that the memory is still
108 | * owned by you, do *not* use fla_flist_free() on the freelist handle!
109 | *
110 | * @param data a buffer with data laid out by the freelist routines.
111 | * @return a freelist handle
112 | * */
113 | freelist_t
114 | fla_flist_load(void *data);
115 |
116 | /**
117 | * Allocate an entry from the freelist (if possible).
118 | *
119 | * Allocates an entry from the freelist, if possible, and returns its
120 | * index. An allocation may fail only if the freelist is full, in which
121 | * case -1 is returned.
122 | *
123 | * @param flist freelist handle
124 | * @param unsigned int num of entries to allocate
125 | * @return On success, a value between 0 and `len` (the freelist length)
126 | * indicating which element of the freelist has been reserved. On error,
127 | * -1, in which case no allocation was made.
128 | */
129 | int
130 | fla_flist_entries_alloc(freelist_t flist, unsigned int num);
131 |
132 | /**
133 | * Free an entry from the freelist.
134 | *
135 | * Frees the element identified by `ndx` in the freelist.
136 | *
137 | * NOTE: the free is idempotent, it is possible to free an already
138 | * freed element.
139 | *
140 | * NOTE: attempting to free an entry at an index outside the bounds
141 | * of the freelist range returns -1.
142 | *
143 | * @param flist freelist handle
144 | * @param ndx index within the freelist of the element to free
145 | * @return On success, 0 is returned. On error, -1 is returned.
146 | * */
147 | int
148 | fla_flist_entry_free(freelist_t flist, uint32_t ndx);
149 |
150 |
151 | int
152 | fla_flist_entries_free(freelist_t flist, uint32_t ndx, unsigned int num);
153 |
154 | /**
155 | * Search all the used element by executing a function.
156 | *
157 | *
158 | * @param flist freelist handle
159 | * @param flags modifies how the function is executed.
160 | * When FLA_FLIST_SEARCH_EXEC_FIRST is set returns on first find
161 | * @param found is the number of times f returned 1
162 | * @param f The function that will be executed. Must Return < 0
163 | * on error, must return 0 when an element was not "found",
164 | * must return 1 when an element was "found".
165 | * @param ... These are the variadic arguments that will be
166 | * forwarded to the f function.
167 | * @return <0 if there is an error. 0 otherwise.
168 | */
169 | int
170 | fla_flist_search_wfunc(freelist_t flist, uint64_t flags, uint32_t *found,
171 | int(*f)(const uint32_t, va_list), ...);
172 | #define FLA_FLIST_SEARCH_EXEC_FIRST 1 << 0
173 |
174 | #endif // __FLEXALLOC_FREELIST_H_
175 |
--------------------------------------------------------------------------------
/src/flexalloc_hash.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "flexalloc_util.h"
7 | #include "flexalloc_hash.h"
8 |
9 | uint64_t
10 | fla_hash_djb2(const char *key)
11 | {
12 | uint64_t hash = 5381;
13 | unsigned int key_len = strlen(key);
14 |
15 | for (unsigned int i = 0; i < key_len; i++)
16 | {
17 | hash = ((hash << 5) + hash) + key[i]; // hash * 33 + c
18 | }
19 |
20 | return hash;
21 | }
22 |
23 | uint64_t
24 | fla_hash_sdbm(char const *key)
25 | {
26 | uint64_t hash = 0;
27 | int c;
28 |
29 | while ((c = *key++))
30 | hash = c + (hash << 6) + (hash << 16) - hash;
31 |
32 | return hash;
33 | }
34 |
35 | uint64_t
36 | fla_mad_compression(uint64_t key, uint64_t a, uint64_t b, uint64_t n)
37 | {
38 | // n should be the table size (and ideally a prime number)
39 | // a, b should be non-negative integers
40 | // a % n != 0
41 |
42 | // map `key` to some value within [0;N[
43 | // (abs is implied as all values are unsigned)
44 | return (a * key + b) % n;
45 | }
46 |
47 | void
48 | fla_htbl_entries_init(struct fla_htbl_entry *tbl, uint32_t tbl_size)
49 | {
50 | memset(tbl, 0, sizeof(struct fla_htbl_entry) * tbl_size);
51 |
52 | for (unsigned int i = 0; i < tbl_size; i++)
53 | {
54 | // indicate that slot is unset
55 | tbl[i].h2 = FLA_HTBL_ENTRY_UNSET;
56 | tbl[i].psl = 0;
57 | }
58 | }
59 |
60 | int
61 | htbl_init(struct fla_htbl *htbl, unsigned int tbl_size)
62 | {
63 | struct fla_htbl_entry *table;
64 | int err = 0;
65 |
66 | if ((err = FLA_ERR(htbl->tbl != NULL, "cannot initialize twice")))
67 | goto exit;
68 |
69 | table = calloc(sizeof(struct fla_htbl_entry), tbl_size);
70 | if ((err = FLA_ERR(!table, "failed to allocate table for entries")))
71 | {
72 | err = -ENOMEM;
73 | goto exit;
74 | }
75 |
76 | htbl->tbl = table;
77 | htbl->tbl_size = tbl_size;
78 |
79 | fla_htbl_entries_init(htbl->tbl, htbl->tbl_size);
80 |
81 | htbl->len = 0;
82 |
83 | htbl->stat_insert_calls = 0;
84 | htbl->stat_insert_failed = 0;
85 | htbl->stat_insert_tries = 0;
86 |
87 | exit:
88 | return err;
89 | }
90 |
91 | int
92 | htbl_new(unsigned int tbl_size, struct fla_htbl **htbl)
93 | {
94 | int err = 0;
95 | *htbl = malloc(sizeof(struct fla_htbl));
96 | if (FLA_ERR(!(*htbl), "failed to allocate hash table"))
97 | {
98 | err = -ENOMEM;
99 | return err;
100 | }
101 | (*htbl)->tbl = NULL;
102 |
103 | err = htbl_init(*htbl, tbl_size);
104 | if (FLA_ERR(err, "htbl_init()"))
105 | goto free_table;
106 |
107 | return 0;
108 |
109 | free_table:
110 | free(*htbl);
111 | return err;
112 | }
113 |
114 | void
115 | htbl_free(struct fla_htbl *htbl)
116 | {
117 | if (!htbl)
118 | return;
119 | free(htbl->tbl);
120 | free(htbl);
121 | }
122 |
123 | int
124 | htbl_insert(struct fla_htbl *htbl, char const *key, uint32_t val)
125 | {
126 | unsigned int n_tries = 0;
127 | uint64_t ndx = FLA_HTBL_COMPRESS(FLA_HTBL_H1(key), htbl->tbl_size);
128 |
129 | struct fla_htbl_entry *entry;
130 | struct fla_htbl_entry tmp, current;
131 | current.h2 = FLA_HTBL_H2(key);
132 | current.psl = 0;
133 | current.val = val;
134 |
135 | htbl->stat_insert_calls++;
136 |
137 | if (htbl->len == htbl->tbl_size)
138 | // table is full
139 | return 2;
140 |
141 | while (1)
142 | {
143 | entry = &htbl->tbl[ndx];
144 | n_tries++;
145 |
146 | if (entry->h2 == FLA_HTBL_ENTRY_UNSET)
147 | {
148 | // found empty slot, insert and quit
149 | *entry = current;
150 | htbl->len++;
151 | htbl->stat_insert_tries += n_tries;
152 | return 0;
153 | }
154 | else if (entry->psl < current.psl)
155 | {
156 | // richer element, swap out and continue insert
157 | tmp = *entry;
158 |
159 | *entry = current;
160 |
161 | current = tmp;
162 | current.psl += 1;
163 | }
164 | else if (entry->h2 == current.h2)
165 | {
166 | // entry with same key (or *very* unlikely collision)
167 | entry->val = current.val;
168 | return 0;
169 | }
170 | else
171 | {
172 | // continue search (inc psl and ndx)
173 | current.psl++;
174 | }
175 | ndx++;
176 | if (ndx == htbl->tbl_size)
177 | {
178 | ndx = 0;
179 | }
180 | }
181 | }
182 |
183 | // lookup - based on the robinhood placement strategy
184 | struct fla_htbl_entry *
185 | __htbl_lookup(struct fla_htbl *htbl, uint64_t h2, uint64_t ndx)
186 | {
187 | unsigned int psl = 0;
188 | struct fla_htbl_entry *entry;
189 |
190 | while(1)
191 | {
192 | entry = &htbl->tbl[ndx++];
193 | if (entry->h2 == h2)
194 | return entry;
195 | else if (entry->h2 == FLA_HTBL_ENTRY_UNSET || entry->psl < psl)
196 | // empty entry OR an entry whose placement would make it richer than the one
197 | // we're looking for, which would be impossible given our placement strategy.
198 | break;
199 |
200 | if (ndx == htbl->tbl_size)
201 | // wrap around
202 | ndx = 0;
203 | psl++;
204 | }
205 | return NULL;
206 | }
207 |
208 | struct fla_htbl_entry *
209 | htbl_lookup(struct fla_htbl *htbl, const char *key)
210 | {
211 | return __htbl_lookup(htbl, FLA_HTBL_H2(key), FLA_HTBL_COMPRESS(FLA_HTBL_H1(key),
212 | htbl->tbl_size));
213 | }
214 |
215 | // remove - based on the robinhood placement strategy
216 | void
217 | htbl_remove(struct fla_htbl *htbl, char *key)
218 | {
219 | struct fla_htbl_entry *next;
220 | struct fla_htbl_entry *end = htbl->tbl + htbl->tbl_size;
221 | uint64_t h2 = FLA_HTBL_H2(key);
222 | uint64_t ndx = FLA_HTBL_COMPRESS(FLA_HTBL_H1(key), htbl->tbl_size);
223 | struct fla_htbl_entry *entry = __htbl_lookup(htbl, h2, ndx);
224 |
225 | if (!entry)
226 | return;
227 |
228 | while(1)
229 | {
230 | next = entry + 1;
231 | if (next == end)
232 | next = htbl->tbl;
233 |
234 | if (!next->psl)
235 | break;
236 |
237 | *entry = *next;
238 | entry->psl--;
239 |
240 | entry = next;
241 | }
242 |
243 | entry->h2 = FLA_HTBL_ENTRY_UNSET;
244 | entry->psl = 0;
245 | htbl->len--;
246 | }
247 |
--------------------------------------------------------------------------------
/src/flexalloc_hash.h:
--------------------------------------------------------------------------------
1 | /**
2 | * flexalloc hash functions and hash table implementation
3 | *
4 | * Copyright (C) 2021 Jesper Devantier
5 | *
6 | * @file flexalloc_hash.h
7 | */
8 | #ifndef __FLEXALLOC_HASH_H_
9 | #define __FLEXALLOC_HASH_H_
10 | #include
11 |
12 | #define FLA_HTBL_ENTRY_UNSET ~0
13 | #define FLA_HTBL_H1(key) fla_hash_djb2(key)
14 | #define FLA_HTBL_H2(key) fla_hash_sdbm(key)
15 | #define FLA_HTBL_COMPRESS(hval, tbl_size) fla_mad_compression(hval, 31, 5745, tbl_size)
16 |
17 | /**
18 | * DJB2 hash function.
19 | *
20 | * @param key text value to hash
21 | * @return hash value of key
22 | */
23 | uint64_t
24 | fla_hash_djb2(const char *key);
25 |
26 | /**
27 | * SDBM hash function.
28 | *
29 | * @param key text value to hash
30 | * @return hash value of key
31 | */
32 | uint64_t
33 | fla_hash_sdbm(char const *key);
34 |
35 | /**
36 | * (M)ultiply (a)dd (d)ivide compression algorithm.
37 | *
38 | * The compression function is used to compress hash values (2^64
39 | * range of values) to some smaller range, [0;n[.
40 | *
41 | * Note: it should hold that: a % n != 0
42 | *
43 | * @param key hash some hash value
44 | * @param a some non-negative integer
45 | * @param b some non-negative integer
46 | * @param n maximum permitted value (your hash table's size)
47 | *
48 | * @return some value X where 0 <= x < n
49 | */
50 | uint64_t
51 | fla_mad_compression(uint64_t hash, uint64_t a, uint64_t b, uint64_t n);
52 |
53 | /// Hash table data structure
54 | struct fla_htbl
55 | {
56 | /// The underlying array of table entries
57 | struct fla_htbl_entry *tbl;
58 | /// Hash table size
59 | unsigned int tbl_size;
60 | /// Number of items presently in hash table
61 | unsigned int len;
62 |
63 | unsigned int stat_insert_calls;
64 | unsigned int stat_insert_failed;
65 | unsigned int stat_insert_tries;
66 | };
67 |
68 | /// Hash table entry
69 | struct fla_htbl_entry
70 | {
71 | /// Secondary hash value
72 | ///
73 | /// The secondary hash value is used to distinguish collisions where two
74 | /// distinct input values yield the same hash value when using our primary
75 | /// hash function.
76 | /// Inserting a secondary hash into the table makes genuine collisions
77 | /// highly improbable.
78 | uint64_t h2;
79 | /// The hash table entry value
80 | ///
81 | /// In case of
82 | uint32_t val;
83 | /// Probe sequence length value
84 | ///
85 | /// The probe sequence length value effectively tracks how many
86 | /// places the element is away from its ideal hash table position.
87 | ///
88 | /// We use this value with Robin Hood hashing during insertion to give
89 | /// priority to elements with a higher PSL - inserting our element if it
90 | /// has a higher PSL (already father from its ideal entry) than the currently
91 | /// examined element, continuing insertion by then trying to find a spot for
92 | /// the displaced element.
93 | uint16_t psl;
94 | };
95 |
96 | /**
97 | * Allocates (& initializes) a new hash table with `tbl_size` entries.
98 | *
99 | * @param tbl_size desired size of the backing table
100 | * NOTE: the table size should be 2-3 times the size of the
101 | * expected number of elements. Hash tables perform perform
102 | * very poorly as they fill up.
103 | * @param htbl pointer to hash table, will contain a reference to
104 | * the constructed hash table
105 | *
106 | * @return On success 0 and *htbl pointing to an allocated and initialized
107 | * hash table. On error, non-zero and *htbl being undefined.
108 | */
109 | int htbl_new(unsigned int tbl_size, struct fla_htbl **htbl);
110 |
111 | void fla_htbl_entries_init(struct fla_htbl_entry *tbl, uint32_t tbl_size);
112 |
113 | /**
114 | * Initialize hash table.
115 | *
116 | * NOTE: automatically called by htbl_new
117 | */
118 | int htbl_init(struct fla_htbl *htbl, unsigned int tbl_size);
119 |
120 | /**
121 | * Free hash table.
122 | *
123 | * @param htbl hash table
124 | */
125 | void htbl_free(struct fla_htbl *htbl);
126 |
127 | /**
128 | * Insert entry into hash table.
129 | *
130 | * NOTE: htbl_insert will update the existing entry if present.
131 | *
132 | * @return 0 if entry inserted or updated, otherwise an error.
133 | */
134 | int htbl_insert(struct fla_htbl *htbl, char const *key, uint32_t val);
135 |
136 | /**
137 | * Find entry in hash table.
138 | *
139 | * @param htbl hash table
140 | * @param key key of entry to find
141 | *
142 | * @return NULL if no entry was found, otherwise the entry
143 | */
144 | struct fla_htbl_entry *htbl_lookup(struct fla_htbl *htbl, const char *key);
145 |
146 | /**
147 | * Remove entry from hash table.
148 | *
149 | * @param htbl the hash table
150 | * @param key key of the entry to remove
151 | */
152 | void htbl_remove(struct fla_htbl *htbl, char *key);
153 | #endif // __FLEXALLOC_HASH_H_
154 |
--------------------------------------------------------------------------------
/src/flexalloc_introspection.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "flexalloc_freelist.h"
4 | #include "flexalloc_introspection.h"
5 | #include "flexalloc_hash.h"
6 | #include "flexalloc_mm.h"
7 | #include "flexalloc_util.h"
8 |
9 | #define PTR_OFFSETOF(from, to) ((char*)to - (char*)from)
10 |
11 | int
12 | __flist_bit_reserved(freelist_t flist, uint32_t ndx)
13 | {
14 | /* check if bit at offset `ndx` of freelist is reserved */
15 | uint32_t *start;
16 | for (start = flist + 1; ndx > 32; ndx -= 32, start++)
17 | continue;
18 |
19 | /* remember, 1->free, 0->reserved */
20 | return !((*start >> ndx) & 1U);
21 | }
22 |
23 | void
24 | print_htbl_entry(struct fla_htbl_entry *e)
25 | {
26 | fprintf(stdout, "{h2: %"PRIx64", val: %"PRIu32", psl: %"PRIu32"}",
27 | e->h2, e->val, e->psl);
28 | }
29 |
30 | uint32_t
31 | pool_htbl_num_reserved(struct flexalloc *fs)
32 | {
33 |
34 | struct fla_htbl_entry *entry = fs->pools.htbl.tbl;
35 | struct fla_htbl_entry *end = fs->pools.htbl.tbl + fs->pools.htbl.tbl_size;
36 | uint32_t num_reserved = 0;
37 |
38 | for (; entry != end; entry++)
39 | {
40 | if (entry->h2 != FLA_HTBL_ENTRY_UNSET)
41 | num_reserved++;
42 | }
43 | return num_reserved;
44 | }
45 |
46 | /*
47 | * Check that segments aren't overlapping.
48 | *
49 | * The md_ptr_check_* functions check that the distance between a given
50 | * pointer to the next matches the distance which the segment is supposed
51 | * to fill as described by fla_geo.
52 | *
53 | * This only computes that the distance between pointers are as we expected.
54 | * It cannot prvent writes from spilling over into other segments.
55 | * It will, however, double-check that offsets reflect the geometry indicated
56 | * by fla_geo.
57 | *
58 | */
59 |
60 | int
61 | mdr_ptr_check_super_offset(struct flexalloc *fs)
62 | {
63 | // super block should be at the very start of the buffer
64 | return !(fs->super == fs->fs_buffer);
65 | }
66 |
67 | int
68 | md_ptr_check_super_size(struct flexalloc *fs)
69 | {
70 | return (PTR_OFFSETOF(fs->super, fs->pools.freelist)
71 | != fs->geo.md_nlb * fs->geo.lb_nbytes);
72 | }
73 | int
74 | md_ptr_check_pool_freelist_size(struct flexalloc *fs)
75 | {
76 | return (PTR_OFFSETOF(fs->pools.freelist, fs->pools.htbl_hdr_buffer)
77 | != fs->geo.pool_sgmt.freelist_nlb * fs->geo.lb_nbytes);
78 | }
79 |
80 | int
81 | md_ptr_check_pool_htbl_size(struct flexalloc *fs)
82 | {
83 | return (PTR_OFFSETOF(fs->pools.htbl_hdr_buffer, fs->pools.entries)
84 | != fs->geo.pool_sgmt.htbl_nlb * fs->geo.lb_nbytes);
85 | }
86 |
87 | int
88 | md_ptr_check_pool_entries_size(struct flexalloc *fs)
89 | {
90 | return (PTR_OFFSETOF(fs->pools.entries, fs->slabs.headers)
91 | != fs->geo.pool_sgmt.entries_nlb * fs->geo.lb_nbytes);
92 | }
93 |
94 | // TODO how to validate slab ptr ?
95 |
96 |
97 | unsigned int
98 | check_pool_entries(struct flexalloc *fs, uint32_t *offset)
99 | {
100 | struct fla_htbl_entry *entry = fs->pools.htbl.tbl + *offset;
101 | struct fla_htbl_entry *end = fs->pools.htbl.tbl + fs->pools.htbl.tbl_size;
102 | struct fla_pool_entry *pool_entry;
103 | uint32_t delta = 0;
104 | uint32_t npools = fla_flist_len(fs->pools.freelist);
105 | unsigned int err = 0;
106 |
107 | for (; err == 0 && entry != end; entry++, delta++)
108 | {
109 | if (entry->h2 == FLA_HTBL_ENTRY_UNSET)
110 | continue;
111 |
112 | // ensure entry is correspondingly set in freelist
113 | if (!__flist_bit_reserved(fs->pools.freelist, entry->val))
114 | err |= POOL_ENTRY_NO_FLIST_ENTRY;
115 |
116 | // ensure index pointed to by entry should exist
117 | if (entry->val >= npools)
118 | {
119 | err |= POOL_ENTRY_HTBL_VAL_OUT_OF_BOUNDS;
120 | // exit, should not attempt to read unreleated memory
121 | goto exit;
122 | }
123 |
124 | pool_entry = &fs->pools.entries[entry->val];
125 | // check that name is set.
126 | if (pool_entry->name[0] == '\0')
127 | err |= POOL_ENTRY_NAME_UNSET;
128 |
129 | if (fla_strnlen(pool_entry->name, FLA_NAME_SIZE_POOL) == FLA_NAME_SIZE_POOL)
130 | err |= POOL_ENTRY_NAME_NO_NULLTERM;
131 |
132 | // check that htbl h2 value matches with hashing the pool entry name
133 | if (err & POOL_ENTRY_NAME_UNSET
134 | || err & POOL_ENTRY_NAME_NO_NULLTERM
135 | || FLA_HTBL_H2(pool_entry->name) != entry->h2)
136 | err |= POOL_ENTRY_H2_DISCREPANCY;
137 |
138 | // ensure entry has a specified object size
139 | if (!pool_entry->obj_nlb)
140 | err |= POOL_ENTRY_INVALID_OBJ_SIZE;
141 | if (pool_entry->obj_nlb == 10)
142 | err |= POOL_ENTRY_INVALID_OBJ_SIZE;
143 | }
144 |
145 | exit:
146 | if (err)
147 | // point to problematic entry
148 | *offset += (delta - 1);
149 | else
150 | *offset = 0;
151 | return err;
152 | }
153 |
154 | int
155 | check_pools_num_entries(struct flexalloc *fs)
156 | {
157 | uint32_t flist_reserved = fla_flist_num_reserved(fs->pools.freelist);
158 | uint32_t flist_len = fla_flist_len(fs->pools.freelist);
159 | uint32_t htbl_reserved = 0;
160 | struct fla_htbl_entry *entry;
161 | struct fla_htbl_entry *htbl_end = fs->pools.htbl.tbl + fs->pools.htbl.tbl_size;
162 | unsigned int err = 0;
163 |
164 | if (fs->super->npools != flist_len)
165 | err |= POOLS_SUPER_FLIST_DISCREPANCY;
166 |
167 | for (entry = fs->pools.htbl.tbl; entry != htbl_end; entry++)
168 | {
169 | if (entry->h2 == FLA_HTBL_ENTRY_UNSET)
170 | continue;
171 |
172 | htbl_reserved++;
173 | }
174 |
175 | if (flist_reserved != htbl_reserved)
176 | /*
177 | * The number of hash table entries do not match the number of reserved
178 | * freelist entries. There should always be a 1-to-1 correspondence as
179 | * each item reserved on the freelist should have *exactly* one corresponding
180 | * hash table entry.
181 | */
182 | err |= POOLS_FLIST_HTBL_RESERVED_DISCREPANCY;
183 |
184 | return err;
185 | }
186 |
--------------------------------------------------------------------------------
/src/flexalloc_introspection.h:
--------------------------------------------------------------------------------
1 | /**
2 | * flexalloc disk structures.
3 | *
4 | * Copyright (C) 2021 Jesper Devantier
5 | *
6 | * @file flexalloc_introspection.h
7 | */
8 | #ifndef __FLEXALLOC_INTROSPECTION_H_
9 | #define __FLEXALLOC_INTROSPECTION_H_
10 | #include
11 | #include "flexalloc_mm.h"
12 |
13 | #define POOLS_SUPER_FLIST_DISCREPANCY 1U
14 | #define POOLS_FLIST_HTBL_RESERVED_DISCREPANCY (1U << 1U)
15 |
16 | #define POOL_ENTRY_NO_FLIST_ENTRY 1U
17 | #define POOL_ENTRY_HTBL_VAL_OUT_OF_BOUNDS (1U << 1U)
18 | #define POOL_ENTRY_NAME_UNSET (1U << 2U)
19 | #define POOL_ENTRY_NAME_NO_NULLTERM (1U << 3U)
20 | #define POOL_ENTRY_H2_DISCREPANCY (1U << 4U)
21 | #define POOL_ENTRY_INVALID_OBJ_SIZE (1U << 5U)
22 |
23 | uint32_t
24 | pool_htbl_num_reserved(struct flexalloc *fs);
25 |
26 | unsigned int
27 | check_pool_entries(struct flexalloc *fs, uint32_t *offset);
28 |
29 | int
30 | check_pools_num_entries(struct flexalloc *fs);
31 |
32 | int
33 | mdr_ptr_check_super_offset(struct flexalloc *fs);
34 |
35 | int
36 | md_ptr_check_super_size(struct flexalloc *fs);
37 |
38 | int
39 | md_ptr_check_pool_freelist_size(struct flexalloc *fs);
40 |
41 | int
42 | md_ptr_check_pool_htbl_size(struct flexalloc *fs);
43 |
44 | int
45 | md_ptr_check_pool_entries_size(struct flexalloc *fs);
46 | #endif // __FLEXALLOC_INTROSPECTION_H_
47 |
--------------------------------------------------------------------------------
/src/flexalloc_ll.c:
--------------------------------------------------------------------------------
1 | //Copyright (C) 2021 Joel Granados
2 |
3 | #include "flexalloc_ll.h"
4 | #include "flexalloc_util.h"
5 |
6 | int
7 | fla_hdll_prepend(struct flexalloc * fs, struct fla_slab_header * slab, uint32_t *head)
8 | {
9 | int err;
10 | uint32_t slab_id;
11 | struct fla_slab_header * head_slab = NULL;
12 |
13 | err = fla_slab_id(slab, fs, &slab_id);
14 | if(FLA_ERR(err, "fla_slab_id()"))
15 | {
16 | goto exit;
17 | }
18 |
19 | if(*head != FLA_LINKED_LIST_NULL)
20 | {
21 | head_slab = fla_slab_header_ptr(*head, fs);
22 | if((err = FLA_ERR(!head_slab, "fla_slab_header_ptr()")))
23 | {
24 | goto exit;
25 | }
26 | head_slab->prev = slab_id;
27 | }
28 |
29 | slab->next = *head;
30 | slab->prev = FLA_LINKED_LIST_NULL;
31 | *head = slab_id;
32 |
33 | exit:
34 | return err;
35 | }
36 |
37 | int
38 | fla_hdll_remove(struct flexalloc * fs, struct fla_slab_header * slab, uint32_t * head)
39 | {
40 | int err = 0;
41 | struct fla_slab_header * temp_slab;
42 |
43 | // Remove from head
44 | if(slab->prev == FLA_LINKED_LIST_NULL)
45 | {
46 | *head = slab->next;
47 | }
48 | else
49 | {
50 | temp_slab = fla_slab_header_ptr(slab->prev, fs);
51 | if((err = FLA_ERR(!temp_slab, "fla_slab_header_ptr()")))
52 | {
53 | goto exit;
54 | }
55 | temp_slab->next = slab->next;
56 | }
57 |
58 | if(slab->next != FLA_LINKED_LIST_NULL)
59 | {
60 | temp_slab = fla_slab_header_ptr(slab->next, fs);
61 | if((err = FLA_ERR(!temp_slab, "fla_slab_header_ptr()")))
62 | {
63 | goto exit;
64 | }
65 | temp_slab->prev = slab->prev;
66 | }
67 |
68 | exit:
69 | return err;
70 | }
71 |
72 | int
73 | fla_hdll_remove_all(struct flexalloc *fs, uint32_t *head,
74 | int (*execute_on_release)(struct flexalloc *fs, struct fla_slab_header*))
75 | {
76 | int err = 0;
77 | struct fla_slab_header * curr_slab;
78 |
79 | for(uint32_t i = 0 ; i < fs->geo.nslabs && *head != FLA_LINKED_LIST_NULL; ++i)
80 | {
81 | curr_slab = fla_slab_header_ptr(*head, fs);
82 | if((err = FLA_ERR(!curr_slab, "fla_slab_header_ptr()")))
83 | {
84 | goto exit;
85 | }
86 |
87 | *head = curr_slab->next;
88 |
89 | err = execute_on_release(fs, curr_slab);
90 | if(FLA_ERR(err, "execute_on_release()"))
91 | {
92 | goto exit;
93 | }
94 | }
95 |
96 | // FIXME: This should probably be an assert
97 | err = FLA_ERR(*head != FLA_LINKED_LIST_NULL, "fla_hdll_remove_all()");
98 |
99 | exit:
100 | return err;
101 | }
102 |
103 |
104 | int
105 | fla_edll_remove_head(struct flexalloc * fs, uint32_t * head, uint32_t * tail,
106 | struct fla_slab_header ** a_slab)
107 | {
108 | int err;
109 | struct fla_slab_header * new_head;
110 |
111 | err = !head || !tail;
112 | if(FLA_ERR(err, "fla_edll_remove()"))
113 | {
114 | goto exit;
115 | }
116 |
117 | *a_slab = fla_slab_header_ptr(*head, fs);
118 | if((err = -FLA_ERR(!(*a_slab), "fla_slab_header_ptr()")))
119 | {
120 | goto exit;
121 | }
122 |
123 | if (*head == *tail)
124 | {
125 | *head = FLA_LINKED_LIST_NULL;
126 | *tail = FLA_LINKED_LIST_NULL;
127 | }
128 | else
129 | {
130 | new_head = fla_slab_header_ptr((*a_slab)->next, fs);
131 | if((err = -FLA_ERR(!(new_head), "fla_slab_header_ptr()")))
132 | {
133 | goto exit;
134 | }
135 |
136 | *head = (*a_slab)->next;
137 | new_head->prev = FLA_LINKED_LIST_NULL;
138 | }
139 |
140 | exit:
141 | return err;
142 | }
143 |
144 | int
145 | fla_edll_add_tail(struct flexalloc *fs, uint32_t * head, uint32_t * tail,
146 | struct fla_slab_header * r_slab)
147 | {
148 | int err;
149 | uint32_t r_slab_id;
150 |
151 | err = fla_slab_id(r_slab, fs, &r_slab_id);
152 | if(FLA_ERR(err, "fla_slab_id()"))
153 | {
154 | goto exit;
155 | }
156 |
157 | r_slab->next = FLA_LINKED_LIST_NULL;
158 | if(*head == *tail && *head == FLA_LINKED_LIST_NULL)
159 | {
160 | *tail = r_slab_id;
161 | *head = r_slab_id;
162 | r_slab->prev = FLA_LINKED_LIST_NULL;
163 | }
164 | else
165 | {
166 | struct fla_slab_header * tail_slab;
167 | tail_slab = fla_slab_header_ptr(*tail, fs);
168 | if((err = FLA_ERR(!(tail_slab), "fla_slab_header_ptr()")))
169 | {
170 | goto exit;
171 | }
172 |
173 | tail_slab->next = r_slab_id;
174 | r_slab->prev = *tail;
175 | *tail = r_slab_id;
176 | }
177 |
178 | exit:
179 | return err;
180 | }
181 |
--------------------------------------------------------------------------------
/src/flexalloc_ll.h:
--------------------------------------------------------------------------------
1 | //Copyright (C) 2021 Joel Granados
2 |
3 | #ifndef __FLEXALLOC_LL_H_
4 | #define __FLEXALLOC_LL_H_
5 | #include "flexalloc_mm.h"
6 |
7 | #define FLA_LINKED_LIST_NULL INT32_MAX
8 |
9 | /**
10 | * @brief Prepend a new slab header to a list that has only one head pointer
11 | *
12 | * The prepended slab header will always be first and head pointer
13 | * will always point to it.
14 | *
15 | * @param fs flexalloc system handle
16 | * @param slab slab header to prepend
17 | * @param head pointer to slab head list id
18 | * @return zero on success. non zero otherwise
19 | */
20 | int
21 | fla_hdll_prepend(struct flexalloc * fs, struct fla_slab_header * slab, uint32_t *head);
22 |
23 | /**
24 | * @brief Remove slab from a list that has only one head pointer
25 | *
26 | * @param fs flexalloc system handle
27 | * @param slab slab header to be removed
28 | * @param head pointer to slab head list id
29 | * @return zeor on success. non zero otherwise
30 | */
31 | int
32 | fla_hdll_remove(struct flexalloc * fs, struct fla_slab_header * slab, uint32_t * head);
33 |
34 | /**
35 | * @brief Remove all slabs from list starting at head
36 | *
37 | * @param fs flexalloc system handle
38 | * @param head pointer to first ID in list.
39 | * Gets modified to FLA_LINKED_LIST_NULL on success
40 | * @param execute_on_release Function to be executed once the slab is removed
41 | * @return zero on success. non zero otherwise
42 | */
43 | int
44 | fla_hdll_remove_all(struct flexalloc *fs, uint32_t *head,
45 | int (*execute_on_release)(struct flexalloc *fs, struct fla_slab_header*));
46 |
47 | /**
48 | * @brief Remove slab from a list that has head and tail pointers
49 | *
50 | * Remove slab from the head of the doubly linked list
51 | *
52 | * @param fs flexalloc system handle
53 | * @param head pointer to where the head ID is
54 | * @param tail pointer to where the tail ID is
55 | * @param {name} slab header where to put the slab that is being removed
56 | * @return zero on success. non zero otherwise
57 | */
58 | int
59 | fla_edll_remove_head(struct flexalloc * fs, uint32_t * head, uint32_t * tail,
60 | struct fla_slab_header ** a_slab);
61 |
62 | /**
63 | * @brief Append slab to slit that has a head and tail pointers
64 | *
65 | * Always adds to tail
66 | *
67 | * @param fs flexalloc system handle
68 | * @param head pointer to where the head ID is
69 | * @param tail pointer to where the tail ID is
70 | * @param a_slab slab header to return to list
71 | * @return zero on success. non zero otherwise
72 | */
73 | int
74 | fla_edll_add_tail(struct flexalloc *fs, uint32_t * head, uint32_t * tail,
75 | struct fla_slab_header * a_slab);
76 |
77 |
78 | #endif // __FLEXALLOC_LL_H_
79 |
--------------------------------------------------------------------------------
/src/flexalloc_mkfs.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | // Copyright (C) 2021 Jesper Devantier
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include "flexalloc_xnvme_env.h"
14 | #include "flexalloc_util.h"
15 | #include "flexalloc_mm.h"
16 | #include "flexalloc_cli_common.h"
17 |
18 | #define FLA_MKFS_SHOW_USAGE 100
19 |
20 | int fla_mkfs_parse_args(int, char**, struct fla_mkfs_p*);
21 |
22 | static struct cli_option options[] =
23 | {
24 | {
25 | .base = {"slba-nlb", required_argument, NULL, 's'},
26 | .description = "size of slab in logical blocks",
27 | .arg_ex = "NUM"
28 | },
29 | {
30 | .base = {"pools", required_argument, NULL, 'p'},
31 | .description = "number of pools",
32 | .arg_ex = "NUM"
33 | },
34 | {
35 | .base = {"help", no_argument, NULL, 'h'},
36 | .description = "display this help",
37 | .arg_ex = NULL
38 | },
39 | {
40 | .base = {"verbose", no_argument, NULL, 'v'},
41 | .description = "display additional information..!",
42 | .arg_ex = NULL
43 | },
44 | {
45 | .base = {"md_dev", optional_argument, NULL, 'm'},
46 | .description = "MD device to use\n",
47 | .arg_ex = "/path/to/md_device"
48 | },
49 | {
50 | .base = {NULL, 0, NULL, 0}
51 | }
52 | };
53 |
54 | void
55 | fla_mkfs_help()
56 | {
57 | fprintf(stdout, "Usage: mkfs.flexalloc [options] device\n\n");
58 | fprintf(stdout, "Initialize device for use with flexalloc\n\n");
59 | print_options(options);
60 | }
61 |
62 | int
63 | fla_mkfs_parse_args(int argc, char ** argv, struct fla_mkfs_p * p)
64 | {
65 | int err = 0;
66 | int c;
67 | int opt_idx = 0;
68 | // getopt_long requires an actual, contiguous array of `struct option` entries
69 | int n_opts = sizeof(options)/sizeof(struct cli_option);
70 | struct option long_options[n_opts];
71 | char *arg_end;
72 | long arg_long;
73 |
74 | for (int i=0; iverbose = 1;
85 | break;
86 | case 'h':
87 | fla_mkfs_help();
88 | return FLA_MKFS_SHOW_USAGE;
89 | case 's':
90 | arg_long = strtol(optarg, &arg_end, 0);
91 | if ((arg_end == optarg) || (arg_long > INT_MAX) || (arg_long <= 0))
92 |
93 | {
94 | fprintf(stderr, "slab-nlb: invalid argument, '%s'\n", optarg);
95 | err |= -1;
96 | }
97 | p->slab_nlb = (int)arg_long;
98 | break;
99 | case 'p':
100 | arg_long = strtol(optarg, &arg_end, 0);
101 | if ((arg_end == optarg) || (arg_long > INT_MAX) || (arg_long <= 0))
102 | {
103 | fprintf(stderr, "pools: invalid argument, '%s'\n", optarg);
104 | err = -1;
105 | }
106 | p->npools = (int)arg_long;
107 | break;
108 | case 'm':
109 | p->open_opts.md_dev_uri = optarg;
110 | break;
111 | default:
112 | break;
113 | }
114 | }
115 |
116 | // expect 1 positional argument - the device to format
117 | if (optind == argc)
118 | {
119 | fprintf(stderr, "mkfs.flexalloc: not enough arguments\n");
120 | err = -1;
121 | goto exit;
122 | }
123 | p->open_opts.dev_uri = argv[optind++];
124 |
125 | // TODO: ensure dev uri exists
126 |
127 | // further positional arguments must be a mistake, print them and exit
128 | if(optind < argc)
129 | {
130 | err = -1;
131 | fprintf(stderr, "One or more unrecognized arguments:\n");
132 | for(int i = optind ; i < argc; ++i)
133 | {
134 | fprintf(stderr, " * %s\n", argv[i]);
135 | }
136 | fprintf(stderr, "\n");
137 | goto exit;
138 | }
139 |
140 | // Exit now if any invalid arguments were passed.
141 | // The user will receive one or more error messages indicating what to change.
142 | if (err)
143 | goto exit;
144 |
145 | // ensure we got told how large to make the slabs
146 | if (!p->slab_nlb)
147 | {
148 | fprintf(stderr,
149 | "slab-nlb is 0 and this is invalid, either you gave a non-integer value or forgot to set it.\n");
150 | err = -1;
151 | goto exit;
152 | }
153 |
154 | exit:
155 | return err;
156 | }
157 |
158 | int
159 | main(int argc, char ** argv)
160 | {
161 | int err = 0;
162 |
163 | struct fla_mkfs_p mkfs_params =
164 | {
165 | .open_opts.dev_uri = NULL,
166 | .open_opts.md_dev_uri = NULL,
167 | .slab_nlb = 0,
168 | .verbose = 0,
169 | };
170 | err = fla_mkfs_parse_args(argc, argv, &mkfs_params);
171 | if (err)
172 | {
173 | if (err == FLA_MKFS_SHOW_USAGE)
174 | err = 0;
175 | else
176 | fprintf(stderr, "Try 'mkfs.flexalloc --help' for more information.\n");
177 |
178 | goto exit;
179 | }
180 |
181 | fprintf(stderr, "Opts:\n");
182 | fprintf(stderr, " dev_uri: %s\n", mkfs_params.open_opts.dev_uri);
183 | fprintf(stderr, " slab_nlb: %"PRIu32"\n", mkfs_params.slab_nlb);
184 | fprintf(stderr, " verbose: %"PRIu8"\n", mkfs_params.verbose);
185 | if (mkfs_params.open_opts.md_dev_uri)
186 | fprintf(stderr, " md_dev_uri: %s\n", mkfs_params.open_opts.md_dev_uri);
187 |
188 | fla_mkfs(&mkfs_params);
189 |
190 | exit:
191 | exit(err);
192 | }
193 |
--------------------------------------------------------------------------------
/src/flexalloc_mm.h:
--------------------------------------------------------------------------------
1 | /**
2 | * flexalloc disk structures.
3 | *
4 | * Copyright (C) 2021 Jesper Devantier
5 | * Copyright (C) 2021 Joel Granados
6 | * Copyright (C) 2021 Adam Manzanares
7 | *
8 | * @file flexalloc_mm.h
9 | */
10 | #ifndef __FLEXALLOC_MM_H_
11 | #define __FLEXALLOC_MM_H_
12 | #include
13 | #include "flexalloc.h"
14 | #include "flexalloc_pool.h"
15 |
16 | #define FLA_MAGIC 0x00534621 // 'flexalloc'
17 | #define FLA_FMT_VER 1
18 | #define FLA_SUPER_SLBA 0UL
19 |
20 | #define FLA_STATE_OPEN 1U
21 |
22 | #define FLA_NAME_SIZE 128
23 |
24 | #define FLA_ROOT_OBJ_NONE UINT64_MAX
25 |
26 | #define FLA_MDTS_MIN_NBYTES 512
27 |
28 | /// mkfs file system initialization parameters
29 | struct fla_mkfs_p
30 | {
31 | /// has dev URI and md dev URI.
32 | struct fla_open_opts open_opts;
33 | /// size of each slab, in LBA's
34 | uint32_t slab_nlb;
35 | /// number of pools to support
36 | uint32_t npools;
37 | /// whether to be verbose during initialization
38 | uint8_t verbose;
39 | };
40 |
41 | /// an item representing a particular object in the freelist
42 | struct fla_obj_list_item
43 | {
44 | // both entries should be NULL if not in freelist.
45 | struct fla_obj_freelist_entry *prev, *next;
46 | };
47 |
48 | /// per-object metadata
49 | /// (presently only the file name)
50 | struct fla_obj_meta
51 | {
52 | /// human-readable name for object, use object ID otherwise
53 | char name[FLA_NAME_SIZE];
54 | };
55 |
56 | /// Describes the layout and state of the slab itself.
57 | struct fla_slab_header
58 | {
59 | /// backpointer to parent pool
60 | uint64_t pool;
61 | uint32_t prev;
62 | uint32_t next;
63 |
64 | /// number of objects allocated from slab
65 | uint32_t refcount; // TODO: should have a var in cache structure describing n_entries/slab
66 | };
67 |
68 |
69 | /// flexalloc super block structure
70 | ///
71 | /// The super block structure contains general information of the overall flexalloc
72 | /// system and provides the LBA addresses for the cache and data segments.
73 | struct fla_super
74 | {
75 | /// magic identifier - used to determine if flexalloc
76 | uint64_t magic;
77 |
78 | /// Number of slab entries
79 | uint32_t nslabs;
80 | /// slab size, in LBA's
81 | uint32_t slab_nlb;
82 |
83 | /// Number of pool entries
84 | uint32_t npools;
85 |
86 | /// Blocks reserved for the super
87 | uint32_t md_nlb;
88 |
89 | /// flexalloc disk format version - permits backward compatibility
90 | uint8_t fmt_version;
91 | };
92 |
93 | /// calculate disk offset, in logical blocks, of the start of the slab identified by slab_id
94 | /**
95 | * Calculate disk offset, in logical blocks, of the slab with id slab_id
96 | *
97 | * @param geo flexalloc system disk geometry
98 | * @param slab_id slab ID, a number from 0..N
99 | *
100 | * @return The logical block offset of the slab.
101 | */
102 | uint64_t
103 | fla_geo_slab_lb_off(struct flexalloc const *fs, uint32_t slab_id);
104 |
105 | uint64_t
106 | fla_geo_slab_sgmt_lb_off(struct fla_geo const *geo);
107 |
108 | uint64_t
109 | fla_geo_slabs_lb_off(struct fla_geo const *geo);
110 |
111 | /**
112 | * Create new flexalloc system on disk
113 | * @param p parameters supplied (and inferred) from mkfs program
114 | *
115 | * @return On success 0.
116 | */
117 | int
118 | fla_mkfs(struct fla_mkfs_p *p);
119 |
120 | /**
121 | * Flush flexalloc metadata to disk.
122 | *
123 | * Flush writes flexalloc metadata to disk, persisting any affecting pools and slabs
124 | * themselves.
125 | * NOTE: sync is NOT necessary to persist object writes.
126 | *
127 | * @return On success 0.
128 | */
129 | int
130 | fla_flush(struct flexalloc *fs);
131 |
132 | /**
133 | * Close flexalloc system *without* writing changes to disk.
134 | *
135 | * Closes the flexalloc system without flushing changes to the metadata to the disk.
136 | * This is typically intended for read-only parsing of the flexalloc system state
137 | * and as an escape-hatch in case there is no time to wait for the write to succeed.
138 | *
139 | * NOTE: you will, by definition, lose changes if the meta data has changed since the
140 | * last flush to disk.
141 | *
142 | * @param fs flexalloc system handle
143 | */
144 | void
145 | fla_close_noflush(struct flexalloc *fs);
146 |
147 |
148 | /**
149 | * @brief Acquire the next free slab
150 | *
151 | * The next free slab gets assigned to slab_header and it is removed from the
152 | * free slab list. It is removed from the head of the free slab list.
153 | *
154 | * @param fs flexalloc system handle
155 | * @param obj_nlb size of object in logical blocks
156 | * @param slab_header Pointer that will get set to the next available slab
157 | * @return 0 on success. not zero otherwise.
158 | */
159 | int
160 | fla_acquire_slab(struct flexalloc *fs, const uint32_t obj_nlb,
161 | struct fla_slab_header ** slab_header);
162 |
163 | /**
164 | * @brief Add to the free slabs list
165 | *
166 | * It is appended to the tail of the free slab list.
167 | *
168 | * @param fs flexalloc system handle
169 | * @param slab_header pointer to slab that is to be released
170 | * @return zero on success. not zero otherwise.
171 | */
172 | int
173 | fla_release_slab(struct flexalloc *fs, struct fla_slab_header * slab_header);
174 |
175 | /**
176 | * @brief Slab header pointer from slab ID
177 | *
178 | * Use the slab ID as an offset to find the slab header pointer.
179 | *
180 | * @param s_id Slab ID to search
181 | * @param fs flexalloc system handle
182 | * @return slab header pointer corresponding to the s_id
183 | */
184 | struct fla_slab_header *
185 | fla_slab_header_ptr(const uint32_t s_id, const struct flexalloc * fs);
186 |
187 | /**
188 | * @brief Calculate slab id from slab header poiner
189 | *
190 | * @param slab Slab header pointer
191 | * @param fs flexalloc system handle
192 | * @param slab_id Where the resulting id is placed
193 | * @return zero on success. non zero otherwise.
194 | */
195 | int
196 | fla_slab_id(const struct fla_slab_header * slab, const struct flexalloc * fs,
197 | uint32_t * slab_id);
198 |
199 | /**
200 | * @brief Initializes all in memory variables related to a slab header
201 | *
202 | * @param fs flexalloc system handle
203 | * @param slab slab header to initialize
204 | * @param obj_nlb size of the slab objects in logical blocks
205 | * @return zero on success. non zero otherwise.
206 | */
207 | int
208 | fla_format_slab(struct flexalloc *fs, struct fla_slab_header * slab, uint32_t obj_nlb);
209 |
210 | uint64_t
211 | fla_object_slba(struct flexalloc const * fs, struct fla_object const * obj,
212 | const struct fla_pool * pool_handle);
213 |
214 | /**
215 | * @brief Opens a flexalloc device
216 | *
217 | * @param dev_uri path of the device to open
218 | * @param fs (uninitialized) flexalloc system handle
219 | * @return on success, 0 with *fs being initialized, non-zero otherwise.
220 | */
221 | int
222 | fla_open_common(char const *dev_uri, struct flexalloc *fs);
223 |
224 | #endif // __FLEXALLOC_MM_H_
225 |
226 |
--------------------------------------------------------------------------------
/src/flexalloc_pool.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (C) 2021 Joel Granados
3 | */
4 |
5 | #ifndef __FLEXALLOC_POOL_H_
6 | #define __FLEXALLOC_POOL_H_
7 |
8 | #include
9 | #include "flexalloc_shared.h"
10 |
11 | #define FLA_NAME_SIZE_POOL 112
12 |
13 | struct fla_geo_pool_sgmt
14 | {
15 | /// number of logical blocks for the freelist
16 | uint32_t freelist_nlb;
17 | /// number of logical blocks to contain the hash table
18 | uint32_t htbl_nlb;
19 | /// number of slots in the hash table
20 | uint32_t htbl_tbl_size;
21 | /// number of logical blocks to contain the pool entries
22 | uint32_t entries_nlb;
23 | };
24 |
25 | struct fla_pool_htbl_header
26 | {
27 | /// number of slots in the hash table
28 | uint32_t size;
29 | /// number of elements presently inserted
30 | uint32_t len;
31 | };
32 |
33 | struct fla_pool_strp
34 | {
35 | /// Num of objects to stripe across
36 | ///
37 | /// Pools may optionally hand out striped objects
38 | uint32_t strp_nobjs;
39 |
40 | /// Number of bytes of each stripe chunk
41 | ///
42 | /// When striped, each fla object is subdivided into stripe
43 | /// subsection or chunks.
44 | /// Must be set if we set strp_nobjs
45 | uint32_t strp_nbytes;
46 | };
47 |
48 | struct fla_pool_entry
49 | {
50 | /// identifier for each slab depending on its "fullness"
51 | uint32_t empty_slabs;
52 | uint32_t full_slabs;
53 | uint32_t partial_slabs;
54 |
55 | /// Number of LBA's used for each object in the cache
56 | ///
57 | /// Note that all objects in a cache have the same size.
58 | /// Also note that this is rounded up to fit alignment requirements.
59 | uint32_t obj_nlb;
60 |
61 | /// Number of Objects that fit in each slab
62 | uint32_t slab_nobj;
63 |
64 | /// Root object that is optionally set
65 | ///
66 | /// Pools can have any valid flexalloc object set as a root object
67 | uint64_t root_obj_hndl;
68 |
69 | uint64_t flags;
70 | uint64_t usable;
71 |
72 | /// Human-readable cache identifier
73 | ///
74 | /// The cache name is primarily used for debugging statistics
75 | /// and when creating the read-only file system view.
76 | // TODO get rid of 512B block assumption
77 | char name[FLA_NAME_SIZE_POOL]; // maximize use of 512B block while having entries aligned by 8B
78 |
79 | //TODO : add a struct nested inside fla_pool_entry which holds just the metadata
80 | // (everything but the name and name length). This will allow us to ignore the
81 | // name when we need to.
82 | };
83 |
84 | struct fla_pool_entry_fnc
85 | {
86 | uint64_t (*get_slab_elba)(struct fla_pool_entry const * pool_entry,
87 | uint32_t const obj_ndx);
88 |
89 | int (*fla_pool_entry_reset)(struct fla_pool_entry *pool_entry,
90 | struct fla_pool_create_arg const *arg,
91 | uint32_t const slab_nobj);
92 | uint32_t (*fla_pool_num_fla_objs)(struct fla_pool_entry const * pool_entry);
93 | };
94 |
95 | void
96 | fla_geo_pool_sgmt_calc(uint32_t npools, uint32_t lb_nbytes,
97 | struct fla_geo_pool_sgmt *geo);
98 |
99 | struct fla_geo;
100 |
101 | void
102 | fla_mkfs_pool_sgmt_init(struct flexalloc *fs, struct fla_geo *geo);
103 |
104 | int
105 | fla_pool_init(struct flexalloc *fs, struct fla_geo *geo, uint8_t *pool_sgmt_base);
106 |
107 | void
108 | fla_print_pool_entries(struct flexalloc *fs);
109 |
110 | int
111 | fla_base_pool_open(struct flexalloc *fs, const char *name, struct fla_pool **handle);
112 |
113 | int
114 | fla_pool_release_all_slabs(struct flexalloc *fs, struct fla_pool_entry * pool_entry);
115 |
116 | int
117 | fla_base_pool_create(struct flexalloc *fs, struct fla_pool_create_arg const *,
118 | struct fla_pool **handle);
119 |
120 | int
121 | fla_base_pool_destroy(struct flexalloc *fs, struct fla_pool * handle);
122 |
123 | int
124 | fla_base_pool_set_root_object(struct flexalloc const * const fs,
125 | struct fla_pool const * pool_handle,
126 | struct fla_object const *obj, fla_root_object_set_action act);
127 |
128 | int
129 | fla_base_pool_get_root_object(struct flexalloc const * const fs,
130 | struct fla_pool const * pool_handle,
131 | struct fla_object *obj);
132 | #endif // __FLEXALLOC_POOL_H_
133 |
--------------------------------------------------------------------------------
/src/flexalloc_shared.h:
--------------------------------------------------------------------------------
1 | #ifndef FLEXALLOC_SHARED_H_
2 | #define FLEXALLOC_SHARED_H_
3 | #include
4 | #include
5 |
6 | #ifdef __cplusplus
7 | extern "C" {
8 | #endif
9 |
10 | #define FLA_ERR_ERROR 1001
11 | #define FLA_ERR_ALL_SLABS_USED 2001
12 |
13 | struct flexalloc;
14 |
15 | struct fla_pool;
16 | struct flexalloc;
17 |
18 | /// flexalloc open options
19 | ///
20 | /// Minimally the dev_uri needs to be set
21 | /// If the md_dev is set than flexalloc md will be stored on this device
22 | /// The xnvme open options are optionally set at open time as well
23 | struct fla_open_opts
24 | {
25 | char const * dev_uri;
26 | char const *md_dev_uri;
27 | struct xnvme_opts *opts;
28 | };
29 |
30 | /// flexalloc object handle
31 | ///
32 | /// The object handle is created from the slab id and the index of the object entry
33 | /// within the slab.
34 | struct fla_object
35 | {
36 | /// ID of the parent slab (0..nslab-1)
37 | uint32_t slab_id;
38 | /// offset of object within the slab
39 | uint32_t entry_ndx;
40 | };
41 |
42 | struct fla_pool_create_arg
43 | {
44 | uint64_t flags;
45 | char *name;
46 | int name_len;
47 | uint32_t obj_nlb;
48 | uint32_t strp_nobjs;
49 | uint32_t strp_nbytes;
50 | };
51 |
52 | struct fla_pool
53 | {
54 | /// h2 hash of entry.
55 | /// Can be used to check that acquired pool entry's name matches the name of
56 | /// the pool when the handle was made. This may prevent following stale
57 | /// handles to repurposed pool entries.
58 | uint64_t h2;
59 | /// offset of entry in the pool entries table
60 | uint32_t ndx;
61 | };
62 |
63 | typedef enum
64 | {
65 | ROOT_OBJ_SET_DEF = 0,
66 | ROOT_OBJ_SET_FORCE = 1 << 0,
67 | ROOT_OBJ_SET_CLEAR = 1 << 1
68 | } fla_root_object_set_action;
69 |
70 | /**
71 | * @brief Return the number of bytes in a block in fs
72 | *
73 | * @param fs flexalloc system handl
74 | * @return number of bytes in a block
75 | */
76 | int32_t
77 | fla_fs_lb_nbytes(struct flexalloc const * const fs);
78 |
79 | struct fla_fns
80 | {
81 | int (*close)(struct flexalloc *fs);
82 | int (*sync)(struct flexalloc *fs);
83 | int (*pool_open)(struct flexalloc *fs, const char *name, struct fla_pool **pool);
84 | void (*pool_close)(struct flexalloc *fs, struct fla_pool *pool);
85 | int (*pool_create)(struct flexalloc *fs, struct fla_pool_create_arg const*,
86 | struct fla_pool **pool);
87 | int (*pool_destroy)(struct flexalloc *fs, struct fla_pool *pool);
88 | int (*object_open)(struct flexalloc *fs, struct fla_pool *pool, struct fla_object *object);
89 | int (*object_create)(struct flexalloc *fs, struct fla_pool *pool, struct fla_object *object);
90 | int (*object_destroy)(struct flexalloc *fs, struct fla_pool *pool, struct fla_object *object);
91 | int (*pool_set_root_object)(struct flexalloc const * const fs, struct fla_pool const * pool,
92 | struct fla_object const *object, fla_root_object_set_action act);
93 | int (*pool_get_root_object)(struct flexalloc const * const fs, struct fla_pool const * pool,
94 | struct fla_object *object);
95 | int (*fla_action)();
96 | };
97 |
98 | struct flexalloc *
99 | fla_fs_alloc();
100 |
101 | void
102 | fla_fs_free(struct flexalloc *fs);
103 |
104 | int
105 | fla_fs_set_user(void *user_data);
106 |
107 | void *
108 | fla_fs_get_user();
109 |
110 | enum fla_pool_entry_flags
111 | {
112 | FLA_POOL_ENTRY_STRP
113 | };
114 |
115 | #ifdef __cplusplus
116 | }
117 | #endif
118 |
119 | #endif // FLEXALLOC_SHARED_H_
120 |
--------------------------------------------------------------------------------
/src/flexalloc_test_client.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Jesper Devantier
2 | #include "flexalloc_daemon_base.h"
3 | #include "flexalloc_util.h"
4 | #include "libflexalloc.h"
5 | #include
6 | #include
7 | #include
8 |
9 | #define SOCKET_PATH "/tmp/flexalloc.socket"
10 |
11 |
12 | int
13 | main(int argc, char **argv)
14 | {
15 | int err = 0;
16 | struct fla_daemon_client client;
17 | struct fla_pool *pool;
18 |
19 | memset(&client, 0, sizeof(struct fla_daemon_client));
20 |
21 | if (FLA_ERR((err = fla_daemon_open(SOCKET_PATH, &client)), "fla_daemon_open()"))
22 | return -1;
23 |
24 | // TODO: create variant program / option to open existing pool, check if it works
25 | struct fla_pool_create_arg pool_args = { .flags = 0, .name = "hello", .name_len = 6, .obj_nlb = 10 };
26 |
27 | err = fla_pool_create(client.flexalloc, &pool_args, &pool);
28 | if (FLA_ERR(err, "fla_pool_create()"))
29 | {
30 | return -1;
31 | }
32 | fprintf(stderr, "pool{h2: %"PRIu64", ndx: %"PRIu32"}\n", pool->h2, pool->ndx);
33 |
34 | err = fla_pool_destroy(client.flexalloc, pool);
35 | if (FLA_ERR(err, "fla_pool_destroy()"))
36 | return -1;
37 |
38 | pool_args.name_len = 5;
39 | err = fla_pool_create(client.flexalloc, &pool_args, &pool);
40 | if (FLA_ERR(err, "fla_pool_create() 2"))
41 | return -1;
42 | fprintf(stderr, "pool{h2: %"PRIu64", ndx: %"PRIu32"}\n", pool->h2, pool->ndx);
43 |
44 | fla_pool_close(client.flexalloc, pool);
45 |
46 | // TODO: TEST OPERATION
47 | //err = fla_pool_open(client.flexalloc, pool);
48 |
49 | getchar();
50 | err = fla_close(client.flexalloc);
51 | if (FLA_ERR(err, "fla_close()"))
52 | err = 1;
53 |
54 | return err;
55 | }
56 |
--------------------------------------------------------------------------------
/src/flexalloc_util.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Jesper Devantier
2 | #include "flexalloc_util.h"
3 | #include
4 |
5 | size_t
6 | fla_strnlen(char *s, size_t maxlen)
7 | {
8 | // otherwise provided by POSIX
9 | for (size_t off = 0; off < maxlen; off++, s++)
10 | if (*s == '\0')
11 | return off;
12 | return maxlen;
13 | }
14 |
15 | char *
16 | fla_strndup(char const *s, size_t const len)
17 | {
18 | char *new_s = malloc(len + 1);
19 | if (new_s == NULL)
20 | {
21 | return NULL;
22 | }
23 |
24 | memcpy(new_s, s, len);
25 | new_s[len] = '\0';
26 |
27 | return new_s;
28 | }
29 |
30 | char *
31 | fla_strdup(char const *s)
32 | {
33 | size_t len = strlen(s);
34 | return fla_strndup(s, len);
35 | }
36 |
37 | uint32_t
38 | fla_nelems_max(uint64_t units_total, uint32_t elem_sz_nunit,
39 | uint32_t (*calc_md_size_nunits)(uint32_t nelems, va_list), ...)
40 | {
41 | uint32_t nelems = units_total / elem_sz_nunit;
42 | uint32_t md_nunits;
43 | va_list ap;
44 |
45 | while(nelems)
46 | {
47 | va_start(ap, calc_md_size_nunits);
48 | md_nunits = calc_md_size_nunits(nelems, ap);
49 | va_end(ap);
50 | if(units_total - (nelems * elem_sz_nunit) >= md_nunits)
51 | break;
52 | nelems--;
53 | }
54 | return nelems;
55 | }
56 |
57 |
58 |
--------------------------------------------------------------------------------
/src/flexalloc_util.h:
--------------------------------------------------------------------------------
1 | /**
2 | * Assorted utility functions and error/debug macros
3 | *
4 | * Copyright (C) 2021 Joel Granados
5 | * Copyright (C) 2021 Jesper Devantier
6 | *
7 | * @file flexalloc_util.h
8 | */
9 | #ifndef __FLEXALLOC_UTIL_H
10 | #define __FLEXALLOC_UTIL_H
11 |
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #define FLA_ERR_MIN_NOPRINT 2001
19 | #define FLA_ERR_PRX fprintf(stderr, "flexalloc ERR ");
20 |
21 | #define FLA_ERR_PRINT(s) \
22 | FLA_ERR_PRX; \
23 | fprintf(stderr, s); \
24 | fflush(stderr);
25 |
26 | #define FLA_ERR_PRINTF(f, ...) \
27 | FLA_ERR_PRX; \
28 | fprintf(stderr, f, __VA_ARGS__); \
29 | fflush(stderr);
30 |
31 | #define FLA_ERRNO_VAL(e) e == 0 ? 0 : e > 0 ? -e : -1;
32 |
33 | /**
34 | * @brief Activates error handling if condition is non zero.
35 | *
36 | * @param condition zero means no error
37 | * @param f The file name where the error occured
38 | * @param l The line where the error occurred
39 | * @param fmt The message to use in case there is an error
40 | * @param ... vriadic args for fmt. Can be empty.
41 | * @return condition is returned.
42 | */
43 | static inline int
44 | fla_err_fl(const int condition, const char * f, const int l, const char * fmt, ...)
45 | {
46 | if(condition && condition < FLA_ERR_MIN_NOPRINT)
47 | {
48 | FLA_ERR_PRINTF(" %s(%d) ", f, l);
49 | va_list arglist;
50 | va_start(arglist, fmt);
51 | vfprintf(stderr, fmt, arglist);
52 | va_end(arglist);
53 | fprintf(stderr, "\n");
54 | }
55 | return condition;
56 | }
57 | #define FLA_ERR(condition, ...) fla_err_fl(condition, __FILE__, __LINE__, __VA_ARGS__)
58 |
59 | /**
60 | * @brief Prints errno message if present.
61 | *
62 | * Activates error handling if condition is non zero. Will append errno string
63 | * to message if errno is set. Ingores errno if it is zero. We do not use errno to
64 | * detect an error, only to describe it once it has been detected.
65 | *
66 | * @param condition zero means no error
67 | * @param message The message to use in case there is an error
68 | * @param f The file name where the error occured
69 | * @param l The line where the error occurred
70 | * @return condition is returned when errno is 0, otherwise errno is returned
71 | */
72 | static inline int
73 | fla_err_errno_fl(const int condition, const char * message, const char * f,
74 | const int l)
75 | {
76 | // capture errno to avoid changing it when executing condition
77 | int __errno = FLA_ERRNO_VAL(errno);
78 | if(condition)
79 | {
80 | if(__errno)
81 | {
82 | FLA_ERR_PRINTF(" %s(%d) %s errno:%s\n", f, l, message, strerror(-__errno));
83 | return __errno;
84 | }
85 | else
86 | {
87 | FLA_ERR_PRINTF(" %s(%d) %s\n", f, l, message);
88 | }
89 | }
90 | return condition;
91 | }
92 | #define FLA_ERR_ERRNO(condition, message) fla_err_errno_fl(condition, message, __FILE__, __LINE__)
93 |
94 | /**
95 | * Miscellaneous stuff
96 | */
97 | #define fla_min(a, b) \
98 | ({ __typeof__ (a) _a = (a); \
99 | __typeof__ (b) _b = (b); \
100 | _a < _b ? _a : _b; })
101 |
102 | #if FLA_VERBOSITY > 0
103 | #define FLA_VBS_PRINTF(f, ...) fprintf(stderr, f, __VA_ARGS__);
104 | #else
105 | #define FLA_VBS_PRINTF(f, ...);
106 | #endif
107 |
108 | /**
109 | * Debugging messages
110 | */
111 | #ifdef DEBUG
112 | #define FLA_DBG_PRX fprintf(stderr, "flexalloc DBG ");
113 |
114 | #define FLA_DBG_PRINT(s) \
115 | FLA_DBG_PRX; \
116 | fprintf(stderr, s); \
117 | fflush(stderr);
118 |
119 | #define FLA_DBG_PRINTF(f, ...) \
120 | FLA_DBG_PRX; \
121 | fprintf(stderr, f, __VA_ARGS__); \
122 | fflush(stderr);
123 |
124 | #define FLA_DBG_EXEC(statement) statement;
125 | #else
126 | #define FLA_DBG_PRX() ;
127 | #define FLA_DBG_PRINT(s) ;
128 | #define FLA_DBG_PRINTF(f, ...) ;
129 | #define FLA_DBG_EXEC(statement) ;
130 | #endif
131 |
132 | /* Ceiling division provided:
133 | * - x and y are unsigned integers
134 | * - x is non-zero
135 | */
136 | #define FLA_CEIL_DIV(x,y) (((x)+(y)-1)/(y))
137 |
138 | /**
139 | * determine length of a fixed-size string.
140 | *
141 | * Returns the length of the string, defined as the number of leading
142 | * characters *before* the terminating null byte.
143 | * If the string has no null-terminating byte within the first maxlen bytes
144 | * strnlen() returns maxlen.
145 | *
146 | * @param s the string
147 | * @param maxlen maximum length of string
148 | * @return On success, length of string. On error, maxlen, meaning the string
149 | * is not null-terminated within the first maxlen bytes.
150 | */
151 | size_t
152 | fla_strnlen(char *s, size_t maxlen);
153 |
154 | /**
155 | * Create a duplicate of the provided string.
156 | *
157 | * Returns a pointer to a new string which is a duplicate of the provided string.
158 | * Memory is allocated by `malloc` and should be freed after use.
159 | */
160 | char *
161 | fla_strdup(char const *s);
162 |
163 | char *
164 | fla_strndup(char const *s, size_t const len);
165 |
166 | uint32_t
167 | fla_nelems_max(uint64_t units_total, uint32_t elem_sz_nunit,
168 | uint32_t (*calc_md_size_nunits)(uint32_t nelems, va_list), ...);
169 | #endif /* __FLEXALLOC_UTIL_H */
170 |
--------------------------------------------------------------------------------
/src/flexalloc_xnvme_env.h:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | // Copyright (C) 2021 Jesper Devantier
3 | // Copyright (C) 2021 Adam Manzanares
4 |
5 | #ifndef __XNVME_ENV_H
6 | #define __XNVME_ENV_H
7 |
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 |
14 | struct fla_strp_params
15 | {
16 | /// Num of objects to stripe across
17 | uint32_t strp_nobjs;
18 |
19 | /// Number of bytes of each stripe chunk
20 | uint32_t strp_chunk_nbytes;
21 |
22 | /// Number of lbs in a non-striped object
23 | uint64_t faobj_nlbs;
24 |
25 | /// Start offset within object (slba == 0)
26 | uint64_t xfer_snbytes;
27 |
28 | /// Number of bytes to transfer
29 | uint64_t xfer_nbytes;
30 |
31 | /// Total bytes in a striped object
32 | uint64_t strp_obj_tnbytes;
33 |
34 | /// striped object offset within device
35 | uint64_t strp_obj_start_nbytes;
36 |
37 | /// the device logical block size
38 | uint32_t dev_lba_nbytes;
39 |
40 | /// Wether to write or not
41 | bool write;
42 | };
43 |
44 | enum fla_xne_io_type
45 | {
46 | FLA_IO_DATA_WRITE = 0,
47 | FLA_IO_MD_WRITE,
48 | FLA_IO_MD_READ,
49 | FLA_IO_DATA_READ,
50 | };
51 |
52 | struct fla_xne_io
53 | {
54 | enum fla_xne_io_type io_type;
55 | struct fla_dp const *fla_dp;
56 | struct xnvme_dev *dev;
57 | void *buf;
58 | union
59 | {
60 | struct xnvme_lba_range * lba_range;
61 | struct fla_strp_params * strp_params;
62 | };
63 | struct fla_pool const * pool_handle;
64 | struct fla_object const * obj_handle;
65 |
66 | int (*prep_ctx)(struct fla_xne_io *xne_io, struct xnvme_cmd_ctx *ctx);
67 | };
68 |
69 | struct xnvme_lba_range
70 | fla_xne_lba_range_from_offset_nbytes(struct xnvme_dev *dev, uint64_t offset, uint64_t nbytes);
71 |
72 | /**
73 | * @brief Synchronous sequential write
74 | *
75 | * @param xne_io contains dev, slba, naddrs and buf
76 | * @return Zero on success. non-zero on error.
77 | */
78 | int
79 | fla_xne_sync_seq_w_xneio(struct fla_xne_io *xne_io);
80 |
81 | /**
82 | * @brief asynchronous stripped sequential write
83 | *
84 | * @param xne_io contains dev, slba, naddrs and buf
85 | * @return Zero on success. non-zero on error.
86 | */
87 | int
88 | fla_xne_async_strp_seq_xneio(struct fla_xne_io *xne_io);
89 |
90 | /**
91 | * @brief Synchronous sequential read from storage
92 | *
93 | * @param xne_io contains dev, slba, naddrs and buf
94 | * @return Zero on success. non-zero on error
95 | */
96 | int
97 | fla_xne_sync_seq_r_xneio(struct fla_xne_io *xne_io);
98 |
99 | /**
100 | * @brief Allocate a buffer with xnvme allocate
101 | *
102 | * @param dev xnvme device
103 | * @param nbytes Number of bytes to allocate
104 | */
105 | void *
106 | fla_xne_alloc_buf(const struct xnvme_dev *dev, size_t nbytes);
107 |
108 | /**
109 | * @brief Reallocate a buffer with xnvme reallocate
110 | *
111 | * @param dev xnvme device
112 | * @param buf Buffer to reallocate
113 | * @param nbytes Size of the allocated buffer in bytes
114 | */
115 | void *
116 | fla_xne_realloc_buf(const struct xnvme_dev *dev, void *buf,
117 | size_t nbytes);
118 | /**
119 | * @brief Free a buffer with xnvme free
120 | *
121 | * @param dev xnvme device
122 | * @param buf Buffer to free
123 | */
124 | void
125 | fla_xne_free_buf(const struct xnvme_dev * dev, void * buf);
126 |
127 | uint64_t
128 | fla_xne_dev_tbytes(const struct xnvme_dev * dev);
129 |
130 | uint32_t
131 | fla_xne_dev_lba_nbytes(const struct xnvme_dev * dev);
132 |
133 | uint32_t
134 | fla_xne_dev_znd_zones(const struct xnvme_dev *dev);
135 |
136 | uint64_t
137 | fla_xne_dev_znd_sect(const struct xnvme_dev *dev);
138 |
139 | enum xnvme_geo_type
140 | fla_xne_dev_type(const struct xnvme_dev *dev);
141 |
142 | uint32_t
143 | fla_xne_dev_mdts_nbytes(const struct xnvme_dev *dev);
144 |
145 | int
146 | fla_xne_dev_open(const char *dev_uri, struct xnvme_opts *opts, struct xnvme_dev **dev);
147 |
148 | void
149 | fla_xne_dev_close(struct xnvme_dev *dev);
150 |
151 | int
152 | fla_xne_ctrl_idfy(struct xnvme_dev *dev, struct xnvme_spec_idfy *idfy_ctrlr);
153 |
154 | int
155 | fla_xne_feat_idfy(struct xnvme_dev *dev, uint32_t const endgid, uint32_t *dw0);
156 |
157 | int
158 | fla_xne_dev_znd_send_mgmt(struct xnvme_dev *dev, uint64_t slba,
159 | enum xnvme_spec_znd_cmd_mgmt_send_action act, bool);
160 |
161 | uint32_t
162 | fla_xne_dev_get_znd_mar(struct xnvme_dev *dev);
163 |
164 | uint32_t
165 | fla_xne_dev_get_znd_mor(struct xnvme_dev *dev);
166 |
167 | int
168 | fla_xne_dev_mkfs_prepare(struct xnvme_dev *dev, char const *md_dev_uri, struct xnvme_dev **md_dev);
169 | /**
170 | * @brief Check if parameters are within range
171 | *
172 | * @param dev xnvme device
173 | * @return zero if everything is ok. Non-zero if there is a problem
174 | */
175 | int
176 | fla_xne_dev_sanity_check(struct xnvme_dev const * dev, struct xnvme_dev const * md_dev);
177 |
178 | struct xnvme_lba_range
179 | fla_xne_lba_range_from_slba_naddrs(struct xnvme_dev *dev, uint64_t slba, uint64_t naddrs);
180 |
181 | int
182 | fla_xne_get_usable_pids(struct xnvme_dev *dev, uint32_t npids, uint32_t **pids);
183 | #endif /*__XNVME_ENV_H */
184 |
--------------------------------------------------------------------------------
/src/libflexalloc.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | // Copyright (C) 2021 Jesper Devantier
3 |
4 | #include "libflexalloc.h"
5 | #include "flexalloc_xnvme_env.h"
6 | #include "flexalloc.h"
7 |
8 | int
9 | fla_close(struct flexalloc *fs)
10 | {
11 | return fs->fns.close(fs);
12 | }
13 |
14 | int
15 | fla_sync(struct flexalloc *fs)
16 | {
17 | return fs->fns.sync(fs);
18 | }
19 |
20 | int
21 | fla_pool_create(struct flexalloc *fs, struct fla_pool_create_arg const *arg, struct fla_pool **pool)
22 | {
23 | return fs->fns.pool_create(fs, arg, pool);
24 | }
25 |
26 | int
27 | fla_pool_destroy(struct flexalloc *fs, struct fla_pool * pool)
28 | {
29 | return fs->fns.pool_destroy(fs, pool);
30 | }
31 |
32 | int
33 | fla_pool_open(struct flexalloc *fs, const char *name, struct fla_pool **pool)
34 | {
35 | return fs->fns.pool_open(fs, name, pool);
36 | }
37 |
38 | void
39 | fla_pool_close(struct flexalloc *fs, struct fla_pool * pool)
40 | {
41 | fs->fns.pool_close(fs, pool);
42 | }
43 |
44 | int
45 | fla_object_create(struct flexalloc * fs, struct fla_pool * pool,
46 | struct fla_object * object)
47 | {
48 | return fs->fns.object_create(fs, pool, object);
49 | }
50 |
51 | int
52 | fla_object_open(struct flexalloc * fs, struct fla_pool * pool,
53 | struct fla_object * object)
54 | {
55 | return fs->fns.object_open(fs, pool, object);
56 | }
57 |
58 | int
59 | fla_object_destroy(struct flexalloc *fs, struct fla_pool * pool,
60 | struct fla_object * object)
61 | {
62 | return fs->fns.object_destroy(fs, pool, object);
63 | }
64 |
65 | void *
66 | fla_buf_alloc(struct flexalloc const *fs, size_t nbytes)
67 | {
68 | return fla_xne_alloc_buf(fs->dev.dev, nbytes);
69 | }
70 |
71 | void
72 | fla_buf_free(struct flexalloc const * fs, void *buf)
73 | {
74 | fla_xne_free_buf(fs->dev.dev, buf);
75 | }
76 |
77 | int
78 | fla_pool_set_root_object(struct flexalloc const * const fs,
79 | struct fla_pool const * pool,
80 | struct fla_object const *object, fla_root_object_set_action act)
81 | {
82 | return fs->fns.pool_set_root_object(fs, pool, object, act);
83 | }
84 |
85 | int
86 | fla_pool_get_root_object(struct flexalloc const * const fs,
87 | struct fla_pool const * pool,
88 | struct fla_object *object)
89 | {
90 | return fs->fns.pool_get_root_object(fs, pool, object);
91 | }
92 |
93 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_hash.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "flexalloc_hash.h"
7 | #include "flexalloc_util.h"
8 |
9 | char default_file[] = "wlist.txt";
10 |
11 | struct linereader
12 | {
13 | FILE *fp;
14 | char *buf;
15 | unsigned int buf_len;
16 | };
17 |
18 | int
19 | linereader_init(struct linereader *lr, char *fname, char *buf, unsigned int buf_len)
20 | {
21 | FILE *fp = fopen(fname, "r");
22 | int err = 0;
23 |
24 | if ((err = FLA_ERR(!fp, "fopen() failed to open file")))
25 | {
26 | err = -1;
27 | goto exit;
28 | }
29 |
30 | lr->fp = fp;
31 | lr->buf = buf;
32 | lr->buf_len = buf_len;
33 |
34 | exit:
35 | return err;
36 | }
37 |
38 | void
39 | linereader_destroy(struct linereader *lr)
40 | {
41 | if (!lr)
42 | return;
43 |
44 | fclose(lr->fp);
45 | }
46 |
47 | void
48 | linereader_seek_to_start(struct linereader *lr)
49 | {
50 | fseek(lr->fp, 0, SEEK_SET);
51 | }
52 |
53 | int
54 | linereader_next(struct linereader *lr)
55 | {
56 | unsigned int i = 0;
57 |
58 | for (; i < lr->buf_len; i++)
59 | {
60 | lr->buf[i] = fgetc(lr->fp);
61 |
62 | if (lr->buf[i] == '\n')
63 | {
64 | lr->buf[i] = '\0';
65 | break;
66 | }
67 | else if (lr->buf[i] == EOF)
68 | {
69 | lr->buf[i] = '\0';
70 | if (i == 0)
71 | {
72 | // indicate that nothing was read
73 | return -1;
74 | }
75 | break;
76 | }
77 | }
78 | return 0;
79 | }
80 |
81 | // 0 > RET, failed some other step
82 | // 0 == success
83 | // N, where N > 0, failed to insert N entries
84 | int
85 | test_insert(struct fla_htbl *htbl, struct linereader *lr)
86 | {
87 | // ensure test files have no duplicate strings
88 | // lay out table with all entries to unset
89 | unsigned int lineno = 0;
90 | int err = 0;
91 |
92 | linereader_seek_to_start(lr);
93 |
94 | while (linereader_next(lr) >= 0)
95 | {
96 | if (htbl_insert(htbl, lr->buf, lineno++))
97 | {
98 | err++;
99 | }
100 | }
101 | if(err)
102 | {
103 | fprintf(stdout, "========================================\n");
104 | fprintf(stdout, "test_insert\n");
105 | fprintf(stdout, "========================================\n");
106 | }
107 |
108 | return err;
109 | }
110 |
111 | // 0 > RET if initialization failed
112 | // 0 == success
113 | // N, where N > 0 -- had N failed lookups
114 | int
115 | test_lookup(struct fla_htbl *htbl, struct linereader *reader)
116 | {
117 | // ensure test files have no duplicate strings
118 | // lay out table with all entries to unset
119 | struct fla_htbl_entry *entry = NULL;
120 | int err = 0;
121 |
122 | linereader_seek_to_start(reader);
123 |
124 | while (linereader_next(reader) >= 0)
125 | {
126 | entry = htbl_lookup(htbl, reader->buf);
127 | if (!entry)
128 | {
129 | err++;
130 | }
131 | else if (entry->h2 != FLA_HTBL_H2(reader->buf))
132 | {
133 | err++;
134 | }
135 | }
136 |
137 | if(err)
138 | {
139 | fprintf(stdout, "========================================\n");
140 | fprintf(stdout, "test_lookup\n");
141 | fprintf(stdout, "========================================\n");
142 | }
143 |
144 | return err;
145 | }
146 |
147 | // 0 > RET if initialization failed
148 | // 0 == success
149 | // N, where N > 0 -- had N instances of failing to remove entry
150 | int
151 | test_remove(struct fla_htbl *htbl, struct linereader *reader)
152 | {
153 | // ensure test files have no duplicate strings
154 | // lay out table with all entries to unset
155 | struct fla_htbl_entry *entry = NULL;
156 | int err = 0;
157 |
158 | linereader_seek_to_start(reader);
159 |
160 | while (linereader_next(reader) >= 0)
161 | {
162 | htbl_remove(htbl, reader->buf);
163 |
164 | entry = htbl_lookup(htbl, reader->buf);
165 | if (entry)
166 | err++;
167 | }
168 | if(err)
169 | {
170 | fprintf(stdout, "========================================\n");
171 | fprintf(stdout, "test_remove\n");
172 | fprintf(stdout, "========================================\n");
173 | }
174 |
175 | return err;
176 | }
177 |
178 | void
179 | htbl_stats_print(struct fla_htbl *htbl)
180 | {
181 | unsigned int entries = 0;
182 | unsigned int psl_max = 0;
183 | for (unsigned int i = 0; i < htbl->tbl_size; i++)
184 | {
185 | if (htbl->tbl[i].h2 != FLA_HTBL_ENTRY_UNSET)
186 | {
187 | entries++;
188 | if (htbl->tbl[i].psl > psl_max)
189 | psl_max = htbl->tbl[i].psl;
190 | }
191 | }
192 |
193 | FLA_VBS_PRINTF("psl max: %d\n", psl_max);
194 | FLA_VBS_PRINTF("lines: %d\n", htbl->stat_insert_calls);
195 | FLA_VBS_PRINTF("entries: %d\n", entries);
196 | FLA_VBS_PRINTF("table size: %d\n", htbl->tbl_size);
197 | FLA_VBS_PRINTF("avg placement tries: %f\n",
198 | (double)htbl->stat_insert_tries / (double)htbl->stat_insert_calls);
199 | FLA_VBS_PRINTF("failed placements: %d\n", htbl->stat_insert_failed);
200 | }
201 |
202 | int
203 | main(int argc, char **argv)
204 | {
205 | char *fname;
206 | struct fla_htbl *htbl = NULL;
207 | unsigned int tbl_size = 83922 * 2;
208 | struct linereader reader;
209 | char linebuf[150];
210 | int err = 0;
211 | if (argc > 2)
212 | {
213 | fprintf(stderr, "usage: $0 []\n");
214 | err = -1;
215 | goto exit;
216 | }
217 | else if (argc == 1)
218 | {
219 | fname = default_file;
220 | }
221 | else
222 | {
223 | fname = argv[1];
224 | }
225 | err = linereader_init(&reader, fname, linebuf, 150);
226 | if (FLA_ERR(err, "failed to initialize line reader"))
227 | {
228 | goto exit;
229 | }
230 |
231 | err = htbl_new(tbl_size, &htbl);
232 | if (FLA_ERR(err, "failed to allocate table"))
233 | {
234 | err = -1;
235 | goto reader_destroy;
236 | }
237 |
238 | err = test_insert(htbl, &reader);
239 | if (FLA_ERR(err, "failed during insert"))
240 | {
241 | fprintf(stderr, "failed %d inserts\n", err);
242 | }
243 | htbl_stats_print(htbl);
244 |
245 | err = test_lookup(htbl, &reader);
246 | if (FLA_ERR(err, "failed during lookup"))
247 | {
248 | fprintf(stdout, "failed %d lookups\n", err);
249 | goto exit;
250 | }
251 |
252 | err = test_remove(htbl, &reader);
253 | if (FLA_ERR(err, "failed to remove entries"))
254 | {
255 | goto table_free;
256 | }
257 |
258 | htbl_stats_print(htbl);
259 |
260 | table_free:
261 | htbl_free(htbl);
262 |
263 | reader_destroy:
264 | linereader_destroy(&reader);
265 |
266 | exit:
267 | return err;
268 | }
269 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_lib_open_close.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "libflexalloc.h"
7 | #include "flexalloc_mm.h"
8 | #include "flexalloc_util.h"
9 | #include "flexalloc_xnvme_env.h"
10 | #include "tests/flexalloc_tests_common.h"
11 |
12 | int
13 | main(int argc, char **argv)
14 | {
15 | int err, ret;
16 | char * pool_handle_name, * write_msg, *write_buf, *read_buf;
17 | size_t write_msg_len, buf_len;
18 | struct flexalloc *fs = NULL;
19 | struct fla_pool *pool_handle;
20 | struct fla_object obj;
21 | struct fla_ut_dev tdev = {0};
22 |
23 | err = fla_ut_dev_init(2000, &tdev);
24 | if (FLA_ERR(err, "fla_ut_dev_init()"))
25 | {
26 |
27 | goto exit;
28 | }
29 |
30 | if (!tdev._is_zns)
31 | err = fla_ut_fs_create(500, 1, &tdev, &fs);
32 | else
33 | err = fla_ut_fs_create(tdev.nsect_zn, 1, &tdev, &fs);
34 |
35 | if (FLA_ERR(err, "fla_ut_fs_create()"))
36 | {
37 | goto teardown_ut_dev;
38 | }
39 |
40 | pool_handle_name = "mypool";
41 | struct fla_pool_create_arg pool_arg =
42 | {
43 | .flags = 0,
44 | .name = pool_handle_name,
45 | .name_len = strlen(pool_handle_name),
46 | .obj_nlb = tdev.nsect_zn
47 | };
48 |
49 | if (!tdev._is_zns)
50 | pool_arg.obj_nlb = 2;
51 |
52 | err = fla_pool_create(fs, &pool_arg, &pool_handle);
53 |
54 | if(FLA_ERR(err, "fla_pool_create()"))
55 | {
56 | goto teardown_ut_fs;
57 | }
58 |
59 | err = fla_object_create(fs, pool_handle, &obj);
60 | if(FLA_ERR(err, "fla_object_create()"))
61 | {
62 | goto release_pool;
63 | }
64 |
65 | write_msg = "hello, world";
66 | write_msg_len = strlen(write_msg);
67 | buf_len = FLA_CEIL_DIV(write_msg_len, tdev.lb_nbytes) * tdev.lb_nbytes;
68 |
69 | read_buf = fla_buf_alloc(fs, buf_len);
70 | if((err = FLA_ERR(!read_buf, "fla_buf_alloc()")))
71 | {
72 | goto release_object;
73 | }
74 |
75 | write_buf = fla_buf_alloc(fs, buf_len);
76 | if((err = FLA_ERR(!write_buf, "fla_buf_alloc()")))
77 | {
78 | goto free_read_buffer;
79 | }
80 | memcpy(write_buf, write_msg, write_msg_len);
81 | write_buf[write_msg_len] = '\0';
82 |
83 | err = fla_object_write(fs, pool_handle, &obj, write_buf, 0, buf_len);
84 | if(FLA_ERR(err, "fla_object_write()"))
85 | {
86 | goto free_write_buffer;
87 | }
88 |
89 | err = fla_object_read(fs, pool_handle, &obj, read_buf, 0, buf_len);
90 | if(FLA_ERR(err, "fla_obj_read()"))
91 | {
92 | goto free_write_buffer;
93 | }
94 |
95 | // compare that the string (and terminating NULL) was read back
96 | err = memcmp(write_buf, read_buf, write_msg_len + 1);
97 | if(FLA_ERR(err, "memcmp() - failed to read back the written value"))
98 | {
99 | goto free_write_buffer;
100 | }
101 |
102 | free_write_buffer:
103 | fla_buf_free(fs, write_buf);
104 |
105 | free_read_buffer:
106 | fla_buf_free(fs, read_buf);
107 |
108 | release_object:
109 | ret = fla_object_destroy(fs, pool_handle, &obj);
110 | if(FLA_ERR(ret, "fla_object_destroy()"))
111 | {
112 | err = ret;
113 | }
114 |
115 | release_pool:
116 | ret = fla_pool_destroy(fs, pool_handle);
117 | if(FLA_ERR(ret, "fla_pool_destroy()"))
118 | {
119 | err = ret;
120 | }
121 |
122 | teardown_ut_fs:
123 | ret = fla_ut_fs_teardown(fs);
124 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
125 | {
126 | err = ret;
127 | }
128 |
129 | teardown_ut_dev:
130 | ret = fla_ut_dev_teardown(&tdev);
131 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
132 | {
133 | err = ret;
134 | }
135 |
136 | exit:
137 | return err;
138 | }
139 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_mkfs.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | #include "tests/flexalloc_tests_common.h"
3 | #include "flexalloc_util.h"
4 | #include "flexalloc_mm.h"
5 | #include "libflexalloc.h"
6 | #include
7 |
8 | struct test_vals
9 | {
10 | /* Initial variables */
11 | uint64_t blk_num;
12 | uint32_t npools;
13 | uint32_t slab_nlb;
14 | char *dev_path;
15 | char *md_dev_path;
16 | };
17 |
18 | int test_mkfs(struct test_vals * test_vals);
19 |
20 | int
21 | main(int argc, char ** argv)
22 | {
23 | int err = 0;
24 | struct test_vals test_vals [4] =
25 | {
26 | {.blk_num = 40000, .npools = 2, .slab_nlb = 4000}
27 | , {.blk_num = 80000, .npools = 2, .slab_nlb = 4000},
28 | };
29 |
30 | for(int i = 0 ; i < 2 ; ++i)
31 | {
32 |
33 | err = test_mkfs(&test_vals[i]);
34 | if(FLA_ERR(err, "test_mkfs()"))
35 | goto exit;
36 | }
37 |
38 | exit:
39 | return err;
40 | }
41 |
42 | int
43 | test_mkfs(struct test_vals * test_vals)
44 | {
45 | int err, ret = 0;
46 | struct fla_ut_dev dev = {0};
47 | struct flexalloc *fs;
48 |
49 | err = fla_ut_dev_init(test_vals->blk_num, &dev);
50 | if (FLA_ERR(err, "fla_ut_dev_init()"))
51 | goto exit;
52 |
53 | if (dev._is_zns)
54 | {
55 | test_vals->blk_num = dev.nblocks;
56 | test_vals->slab_nlb = dev.nsect_zn;
57 | }
58 |
59 | err = fla_ut_fs_create(test_vals->slab_nlb, test_vals->npools, &dev, &fs);
60 | if(FLA_ERR(err, "fla_ut_fs_create()"))
61 | goto teardown_ut_dev;
62 |
63 |
64 | err |= FLA_ASSERTF(fs->super->npools == test_vals->npools,
65 | "Unexpected number of pools (%"PRIu32" == %"PRIu32")", fs->super->npools, test_vals->npools);
66 | err |= FLA_ASSERTF(fs->super->slab_nlb == test_vals->slab_nlb,
67 | "Unexpected size of slab (%"PRIu64" == %"PRIu32")", fs->super->slab_nlb
68 | ,test_vals->slab_nlb);
69 | err |= FLA_ASSERTF(fs->geo.nlb == dev.nblocks,
70 | "Unexpected number of lbas, (%"PRIu64" == %"PRIu64")", fs->geo.nlb, dev.nblocks);
71 | err |= FLA_ASSERTF(fs->geo.lb_nbytes == dev.lb_nbytes,
72 | "Unexpected lba width, (%"PRIu32" == %"PRIu64")", fs->geo.lb_nbytes, dev.lb_nbytes);
73 |
74 | ret = fla_ut_fs_teardown(fs);
75 | if(FLA_ERR(ret, "fla_ut_fs_teardown()"))
76 | err = ret;
77 |
78 | teardown_ut_dev:
79 | ret = fla_ut_dev_teardown(&dev);
80 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
81 | err = ret;
82 |
83 | exit:
84 | return err;
85 | }
86 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_multi_pool_read_write.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "libflexalloc.h"
5 | #include "flexalloc.h"
6 | #include "flexalloc_util.h"
7 | #include "tests/flexalloc_tests_common.h"
8 |
9 | #define NUM_POOLS 2
10 |
11 | struct test_vals
12 | {
13 | uint64_t blk_size;
14 | uint64_t blk_num;
15 | uint32_t npools;
16 | uint32_t slab_nlb;
17 | uint32_t obj_nlb;
18 | };
19 |
20 | int
21 | main(int argc, char **argv)
22 | {
23 | int err = 0;
24 | char *pool_handle_names[NUM_POOLS] = {"mypool0", "mypool1"};
25 | char* write_msgs[NUM_POOLS] = {"hello, world pool0", "hello, world pool1"};
26 | char *write_bufs[NUM_POOLS] = {NULL, NULL}, *read_buf = NULL;
27 | size_t write_msg_lens[NUM_POOLS] = {strlen(write_msgs[0]), strlen(write_msgs[1])};
28 | size_t buf_len;
29 | struct flexalloc *fs = NULL;
30 | struct fla_pool *pool_handle[NUM_POOLS] = {NULL, NULL};
31 | struct fla_object obj[NUM_POOLS] = {0};
32 | struct test_vals t_val = {0};
33 | struct fla_ut_dev tdev = {0};
34 | struct fla_open_opts open_opts = {0};
35 |
36 | open_opts.opts = NULL;
37 | err = fla_ut_dev_init(t_val.blk_num, &tdev);
38 | if (FLA_ERR(err, "fla_ut_dev_init()"))
39 | goto exit;
40 |
41 | // If we are not using a ZNS device we aren't really interested in this test.
42 | // This test is to demonstrate that one zone will be left opened by flexalloc
43 | if (!tdev._is_zns)
44 | {
45 | goto teardown_ut_dev;
46 | }
47 |
48 | t_val.blk_size = tdev.lb_nbytes;
49 | t_val.blk_num = tdev.nblocks;
50 | t_val.slab_nlb = tdev.nsect_zn;
51 | t_val.obj_nlb = tdev.nsect_zn;
52 | t_val.npools = NUM_POOLS;
53 |
54 | err = fla_ut_fs_create(t_val.slab_nlb, t_val.npools, &tdev, &fs);
55 | if(FLA_ERR(err, "fla_ut_fs_create()"))
56 | goto teardown_ut_dev;
57 |
58 | for (int pn = 0; pn < NUM_POOLS; pn++)
59 | {
60 | struct fla_pool_create_arg pool_arg =
61 | {
62 | .flags = 0,
63 | .name = pool_handle_names[pn],
64 | .name_len = strlen(pool_handle_names[pn]),
65 | .obj_nlb = t_val.obj_nlb,
66 | };
67 |
68 | buf_len = FLA_CEIL_DIV(write_msg_lens[pn], t_val.blk_size) * t_val.blk_size;
69 | err = fla_pool_create(fs, &pool_arg, &pool_handle[pn]);
70 | if(FLA_ERR(err, "fla_pool_create()"))
71 | goto teardown_ut_fs;
72 |
73 | err = fla_object_create(fs, pool_handle[pn], &obj[pn]);
74 | if(FLA_ERR(err, "fla_object_create()"))
75 | goto release_pool;
76 |
77 | write_bufs[pn] = fla_buf_alloc(fs, buf_len);
78 | if((err = FLA_ERR(!write_bufs[pn], "fla_buf_alloc()")))
79 | goto release_object;
80 |
81 | memset(write_bufs[pn], 0, buf_len);
82 | memcpy(write_bufs[pn], write_msgs[pn], write_msg_lens[pn]);
83 | write_bufs[pn][write_msg_lens[pn]] = '\0';
84 |
85 | err = fla_object_write(fs, pool_handle[pn], &obj[pn], write_bufs[pn], 0, buf_len);
86 | if(FLA_ERR(err, "fla_object_write()"))
87 | goto free_write_buffer;
88 | }
89 |
90 | err = fla_close(fs);
91 | if(FLA_ERR(err, "fla_close()"))
92 | goto free_write_buffer;
93 |
94 | open_opts.dev_uri = tdev._dev_uri;
95 | open_opts.md_dev_uri = tdev._md_dev_uri;
96 | err = fla_open(&open_opts, &fs);
97 | if(FLA_ERR(err, "fla_open()"))
98 | goto free_write_buffer;
99 |
100 | for (int pn = 0; pn < NUM_POOLS; pn++)
101 | {
102 | buf_len = FLA_CEIL_DIV(write_msg_lens[pn], t_val.blk_size) * t_val.blk_size;
103 | err = fla_pool_open(fs, pool_handle_names[pn], &pool_handle[pn]);
104 | if(FLA_ERR(err, "fla_pool_lookup()"))
105 | goto free_write_buffer;
106 |
107 | err = fla_object_open(fs, pool_handle[pn], &obj[pn]);
108 | if(FLA_ERR(err, "fla_object_open()"))
109 | goto free_write_buffer;
110 |
111 | if (!read_buf)
112 | read_buf = fla_buf_alloc(fs, buf_len);
113 | if((err = FLA_ERR(!read_buf, "fla_buf_alloc()")))
114 | goto free_write_buffer;
115 |
116 | memset(read_buf, 0, buf_len);
117 | err = fla_object_read(fs, pool_handle[pn], &obj[pn], read_buf, 0, buf_len);
118 | if(FLA_ERR(err, "fla_obj_read()"))
119 | goto free_read_buffer;
120 |
121 | // compare that the string (and terminating NULL) was read back
122 | err = memcmp(write_bufs[pn], read_buf, write_msg_lens[pn] + 1);
123 | if(FLA_ERR(err, "memcmp() - failed to read back the written value"))
124 | goto free_write_buffer;
125 | }
126 |
127 | free_read_buffer:
128 | fla_buf_free(fs, read_buf);
129 |
130 | free_write_buffer:
131 | for (int pn = 0; pn < NUM_POOLS; pn++)
132 | {
133 | if (write_bufs[pn])
134 | fla_buf_free(fs, write_bufs[pn]);
135 | }
136 |
137 | release_object:
138 | for (int pn = 0; pn < NUM_POOLS; pn++)
139 | {
140 | if ((obj[pn].slab_id || obj[pn].entry_ndx) && pn == 1)
141 | {
142 | err = fla_object_destroy(fs, pool_handle[pn], &obj[pn]);
143 | FLA_ERR(err, "fla_object_destroy()");
144 | }
145 | }
146 |
147 | release_pool:
148 | for (int pn = 0; pn < NUM_POOLS; pn++)
149 | {
150 | if (pool_handle[pn] && pn == 1)
151 | fla_pool_close(fs, pool_handle[pn]);
152 | }
153 |
154 | teardown_ut_fs:
155 | err = fla_ut_fs_teardown(fs);
156 | FLA_ERR(err, "fla_ut_fs_teardown()");
157 |
158 | teardown_ut_dev:
159 | err = fla_ut_dev_teardown(&tdev);
160 | FLA_ERR(err, "fla_ut_dev_teardown()");
161 |
162 | exit:
163 | return err;
164 | }
165 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_object_allocate.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) Jesper Devantier
2 |
3 | #include
4 | #include "tests/flexalloc_tests_common.h"
5 | #include "flexalloc_util.h"
6 | #include "flexalloc_mm.h"
7 | #include "libflexalloc.h"
8 |
9 | struct test_vals
10 | {
11 | uint64_t blk_num;
12 | uint32_t npools;
13 | uint32_t slab_nlb;
14 | uint32_t obj_nlb;
15 | uint32_t dev_nobj;
16 | };
17 |
18 | static int test_objects(struct test_vals * test_vals);
19 | #define FLA_UT_OBJECT_NUMBER_OF_TESTS 2
20 |
21 | int
22 | main(int argc, char** argv)
23 | {
24 |
25 | int err = 0;
26 | struct test_vals test_vals [FLA_UT_OBJECT_NUMBER_OF_TESTS] =
27 | {
28 | {.blk_num = 40000, .npools = 2, .slab_nlb = 4000, .obj_nlb = 1, .dev_nobj = 2}
29 | , {.blk_num = 40000, .npools = 1, .slab_nlb = 4000, .obj_nlb = 5, .dev_nobj = 6}
30 | };
31 |
32 | for(int i = 0 ; i < FLA_UT_OBJECT_NUMBER_OF_TESTS ; ++i)
33 | {
34 | err = test_objects(&test_vals[i]);
35 | if(FLA_ERR(err, "test_slabs()"))
36 | goto exit;
37 | }
38 |
39 | exit:
40 | return err;
41 | }
42 |
43 | static int
44 | test_objects(struct test_vals * test_vals)
45 | {
46 | int err, ret;
47 | struct fla_ut_dev dev;
48 | struct flexalloc *fs;
49 | char * pool_name = "pool1";
50 | struct fla_pool *pool_handle;
51 | struct fla_object *objs;
52 |
53 | objs = malloc(sizeof(struct fla_object) * test_vals->dev_nobj);
54 | if((err = FLA_ERR(!objs, "malloc()")))
55 | goto exit;
56 |
57 | err = fla_ut_dev_init(test_vals->blk_num, &dev);
58 | if (FLA_ERR(err, "fla_ut_dev_init"))
59 | goto free_objs;
60 |
61 | if (dev._is_zns)
62 | {
63 | test_vals->slab_nlb = dev.nsect_zn;
64 | test_vals->obj_nlb = dev.nsect_zn;
65 | test_vals->dev_nobj = 1;
66 | }
67 |
68 | err = fla_ut_fs_create(test_vals->slab_nlb, test_vals->npools, &dev, &fs);
69 | if (FLA_ERR(err, "fla_ut_fs_create()"))
70 | goto teardown_ut_dev;
71 |
72 | struct fla_pool_create_arg pool_arg =
73 | {
74 | .flags = 0,
75 | .name = pool_name,
76 | .name_len = strlen(pool_name),
77 | .obj_nlb = test_vals->obj_nlb,
78 | };
79 |
80 | err = fla_pool_create(fs, &pool_arg, &pool_handle);
81 | if(FLA_ERR(err, "fla_pool_create()"))
82 | goto teardown_ut_fs;
83 |
84 | // Allocate all possible objects
85 | for(size_t objs_offset = 0 ; objs_offset < test_vals->dev_nobj; ++objs_offset)
86 | {
87 | err = fla_object_create(fs, pool_handle, &objs[objs_offset]);
88 | if(FLA_ERR(err, "fla_object_create()"))
89 | goto release_pool;
90 | }
91 |
92 | // Make sure we cannot allocate more
93 | /*err = fla_object_create(fs, pool_handle, &obj);
94 | if(FLA_ASSERTF(err != 0, "Allocated past the max value %d\n", test_vals->dev_nobj))
95 | {
96 | goto release_pool;
97 | }*/
98 |
99 | for(size_t objs_offset = 0 ; objs_offset < test_vals->dev_nobj; ++ objs_offset)
100 | {
101 | err = fla_object_destroy(fs, pool_handle, &objs[objs_offset]);
102 | if(FLA_ERR(err, "fla_object_destroy()"))
103 | goto release_pool;
104 | }
105 |
106 | release_pool:
107 | ret = fla_pool_destroy(fs, pool_handle);
108 | if(FLA_ERR(ret, "fla_pool_destroy()"))
109 | err = ret;
110 |
111 | teardown_ut_fs:
112 | ret = fla_ut_fs_teardown(fs);
113 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
114 | err = ret;
115 |
116 | teardown_ut_dev:
117 | ret = fla_ut_dev_teardown(&dev);
118 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
119 | err = ret;
120 |
121 | free_objs:
122 | free(objs);
123 |
124 | exit:
125 | return err;
126 | }
127 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_object_overread_overwrite.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "libflexalloc.h"
5 | #include "flexalloc.h"
6 | #include "flexalloc_util.h"
7 | #include "tests/flexalloc_tests_common.h"
8 | #include
9 |
10 | struct test_vals
11 | {
12 | uint64_t blk_num;
13 | uint32_t npools;
14 | uint32_t slab_nlb;
15 | uint32_t obj_nlb;
16 | };
17 |
18 | int
19 | main(int argc, char **argv)
20 | {
21 | int err, ret;
22 | char * pool_handle_name, *buf;
23 | size_t buf_len;
24 | struct fla_ut_dev dev;
25 | struct flexalloc *fs = NULL;
26 | struct fla_pool *pool_handle;
27 | struct fla_object obj;
28 | struct test_vals test_vals
29 | = {.blk_num = 40000, .slab_nlb = 4000, .npools = 1, .obj_nlb = 2};
30 |
31 | pool_handle_name = "mypool";
32 |
33 | err = fla_ut_dev_init(test_vals.blk_num, &dev);
34 | if (FLA_ERR(err, "fla_ut_dev_init()"))
35 | goto exit;
36 |
37 | if (dev._is_zns)
38 | {
39 | test_vals.slab_nlb = dev.nsect_zn;
40 | test_vals.obj_nlb = dev.nsect_zn;
41 | }
42 |
43 | err = fla_ut_fs_create(test_vals.slab_nlb, test_vals.npools, &dev, &fs);
44 | if (FLA_ERR(err, "fla_ut_fs_create()"))
45 | goto teardown_ut_dev;
46 |
47 | buf_len = FLA_CEIL_DIV(dev.lb_nbytes * (test_vals.obj_nlb + 1),
48 | dev.lb_nbytes) * dev.lb_nbytes;
49 |
50 | struct fla_pool_create_arg pool_arg =
51 | {
52 | .flags = 0,
53 | .name = pool_handle_name,
54 | .name_len = strlen(pool_handle_name),
55 | .obj_nlb = test_vals.obj_nlb,
56 | };
57 |
58 | err = fla_pool_create(fs, &pool_arg, &pool_handle);
59 | if(FLA_ERR(err, "fla_pool_create()"))
60 | goto teardown_ut_fs;
61 |
62 | err = fla_object_create(fs, pool_handle, &obj);
63 | if(FLA_ERR(err, "fla_object_create()"))
64 | goto release_pool;
65 |
66 | fprintf(stderr, "dev.lb_nbytes(%"PRIu64"), buf_len(%zu)\n", dev.lb_nbytes, buf_len);
67 | buf = fla_buf_alloc(fs, buf_len);
68 | if((err = FLA_ERR(!buf, "fla_buf_alloc()")))
69 | goto release_object;
70 |
71 | ret = fla_object_write(fs, pool_handle, &obj, buf, 0, buf_len);
72 | err |= FLA_ASSERT(ret != 0, "We need to fail when we write over the object limit");
73 | ret = fla_object_read(fs, pool_handle, &obj, buf, 0, buf_len);
74 | err |= FLA_ASSERT(ret != 0, "We need to fail when we read over the object limit");
75 |
76 | fla_buf_free(fs, buf);
77 |
78 | release_object:
79 | ret = fla_object_destroy(fs, pool_handle, &obj);
80 | if(FLA_ERR(ret, "fla_object_destroy()"))
81 | err = ret;
82 |
83 | release_pool:
84 | ret = fla_pool_destroy(fs, pool_handle);
85 | if(FLA_ERR(ret, "fla_pool_destroy()"))
86 | err = ret;
87 |
88 | teardown_ut_fs:
89 | ret = fla_ut_fs_teardown(fs);
90 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
91 | err = ret;
92 |
93 | teardown_ut_dev:
94 | ret = fla_ut_dev_teardown(&dev);
95 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
96 | err = ret;
97 |
98 | exit:
99 | return err;
100 | }
101 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_object_read_write.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include "libflexalloc.h"
5 | #include "flexalloc.h"
6 | #include "flexalloc_util.h"
7 | #include "tests/flexalloc_tests_common.h"
8 |
9 | struct test_vals
10 | {
11 | uint64_t blk_num;
12 | uint32_t npools;
13 | uint32_t slab_nlb;
14 | uint32_t obj_nlb;
15 | };
16 |
17 | int
18 | main(int argc, char **argv)
19 | {
20 | int err, ret;
21 | char * pool_handle_name, * write_msg, *write_buf, *read_buf;
22 | size_t write_msg_len, buf_len;
23 | struct fla_ut_dev dev;
24 | struct flexalloc *fs = NULL;
25 | struct fla_pool *pool_handle;
26 | struct fla_object obj;
27 | struct fla_open_opts open_opts = {0};
28 | struct test_vals test_vals
29 | = {.blk_num = 40000, .slab_nlb = 4000, .npools = 1, .obj_nlb = 2};
30 |
31 | pool_handle_name = "mypool";
32 | write_msg = "hello, world";
33 | write_msg_len = strlen(write_msg);
34 |
35 | err = fla_ut_dev_init(test_vals.blk_num, &dev);
36 | if (FLA_ERR(err, "fla_ut_dev_init()"))
37 | goto exit;
38 |
39 | if (dev._is_zns)
40 | {
41 | test_vals.slab_nlb = dev.nsect_zn;
42 | test_vals.obj_nlb = dev.nsect_zn;
43 | }
44 |
45 | err = fla_ut_fs_create(test_vals.slab_nlb, test_vals.npools, &dev, &fs);
46 | if (FLA_ERR(err, "fla_ut_fs_create()"))
47 | {
48 | goto teardown_ut_dev;
49 | }
50 |
51 | buf_len = FLA_CEIL_DIV(write_msg_len, dev.lb_nbytes) * dev.lb_nbytes;
52 |
53 | struct fla_pool_create_arg pool_arg =
54 | {
55 | .flags = 0,
56 | .name = pool_handle_name,
57 | .name_len = strlen(pool_handle_name),
58 | .obj_nlb = test_vals.obj_nlb
59 | };
60 |
61 | err = fla_pool_create(fs, &pool_arg, &pool_handle);
62 | if(FLA_ERR(err, "fla_pool_create()"))
63 | goto teardown_ut_fs;
64 |
65 | err = fla_object_create(fs, pool_handle, &obj);
66 | if(FLA_ERR(err, "fla_object_create()"))
67 | goto release_pool;
68 |
69 | write_buf = fla_buf_alloc(fs, buf_len);
70 | if((err = FLA_ERR(!write_buf, "fla_buf_alloc()")))
71 | goto release_object;
72 |
73 | memcpy(write_buf, write_msg, write_msg_len);
74 | write_buf[write_msg_len] = '\0';
75 |
76 | err = fla_object_write(fs, pool_handle, &obj, write_buf, 0, buf_len);
77 | if(FLA_ERR(err, "fla_object_write()"))
78 | goto free_write_buffer;
79 |
80 | err = fla_close(fs);
81 | if(FLA_ERR(err, "fla_close()"))
82 | goto free_write_buffer;
83 |
84 | open_opts.dev_uri = dev._dev_uri;
85 | open_opts.md_dev_uri = dev._md_dev_uri;
86 | err = fla_open(&open_opts, &fs);
87 | if(FLA_ERR(err, "fla_open()"))
88 | goto free_write_buffer;
89 |
90 | fla_pool_close(fs, pool_handle);
91 | err = fla_pool_open(fs, pool_handle_name, &pool_handle);
92 | if(FLA_ERR(err, "fla_pool_open()"))
93 | goto free_write_buffer;
94 |
95 | err = fla_object_open(fs, pool_handle, &obj);
96 | if(FLA_ERR(err, "fla_object_open()"))
97 | goto free_write_buffer;
98 |
99 | read_buf = fla_buf_alloc(fs, buf_len);
100 | if((err = FLA_ERR(!read_buf, "fla_buf_alloc()")))
101 | goto free_write_buffer;
102 |
103 | err = fla_object_read(fs, pool_handle, &obj, read_buf, 0, buf_len);
104 | if(FLA_ERR(err, "fla_obj_read()"))
105 | goto free_read_buffer;
106 |
107 | // compare that the string (and terminating NULL) was read back
108 | err = memcmp(write_buf, read_buf, buf_len + 1);
109 | if(FLA_ERR(err, "memcmp() - failed to read back the written value"))
110 | goto free_write_buffer;
111 |
112 | // Free the object, which should reset a zone
113 | ret = fla_object_destroy(fs, pool_handle, &obj);
114 | if(FLA_ERR(ret, "fla_object_destroy()"))
115 | err = ret;
116 |
117 | // Allocate a new object
118 | err = fla_object_create(fs, pool_handle, &obj);
119 | if(FLA_ERR(err, "fla_object_create()"))
120 | goto release_pool;
121 |
122 | // Will fail on zns without object reset
123 | err = fla_object_write(fs, pool_handle, &obj, write_buf, 0, buf_len);
124 | if(FLA_ERR(err, "fla_object_write()"))
125 | goto free_write_buffer;
126 |
127 | free_read_buffer:
128 | fla_buf_free(fs, read_buf);
129 |
130 | free_write_buffer:
131 | fla_buf_free(fs, write_buf);
132 |
133 | release_object:
134 | ret = fla_object_destroy(fs, pool_handle, &obj);
135 | if(FLA_ERR(ret, "fla_object_destroy()"))
136 | err = ret;
137 |
138 | release_pool:
139 | ret = fla_pool_destroy(fs, pool_handle);
140 | if(FLA_ERR(ret, "fla_pool_destroy()"))
141 | err = ret;
142 |
143 | teardown_ut_fs:
144 | ret = fla_ut_fs_teardown(fs);
145 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
146 | {
147 | err = ret;
148 | }
149 |
150 | teardown_ut_dev:
151 | ret = fla_ut_dev_teardown(&dev);
152 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
153 | {
154 | err = ret;
155 | }
156 |
157 | exit:
158 | return err;
159 | }
160 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_object_unaligned_write.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include "libflexalloc.h"
8 | #include "flexalloc.h"
9 | #include "flexalloc_util.h"
10 | #include "tests/flexalloc_tests_common.h"
11 |
12 | struct test_vals
13 | {
14 | uint64_t blk_num;
15 | uint32_t npools;
16 | uint32_t slab_nlb;
17 | uint32_t obj_nlb;
18 | };
19 |
20 | struct fs_test_vals
21 | {
22 | char * pool_handle_name;
23 | struct flexalloc * fs;
24 | struct fla_pool * pool_handle;
25 | struct fla_object object_handle;
26 | };
27 |
28 | struct unaligned_test_vals
29 | {
30 | char * orig_msg;
31 | size_t lhs_msg_len, rhs_msg_len, mid_msg_len, mid_start;
32 | };
33 |
34 | int test_unaligned_write(struct fs_test_vals const * fs_tv,
35 | struct unaligned_test_vals const * u_tv);
36 | int test_unaligned_writes(struct fs_test_vals const * fs_tv);
37 |
38 | int
39 | main(int argc, char **argv)
40 | {
41 | int err, ret;
42 | struct fla_ut_dev dev;
43 | struct test_vals test_vals
44 | = {.blk_num = 40000, .slab_nlb = 4000, .npools = 1, .obj_nlb = 10};
45 | struct fs_test_vals fs_vals;
46 |
47 | fs_vals.pool_handle_name = "mypool";
48 |
49 | srand(getpid());
50 |
51 | err = fla_ut_dev_init(test_vals.blk_num, &dev);
52 | if (FLA_ERR(err, "fla_ut_dev_init()"))
53 | {
54 | goto exit;
55 | }
56 |
57 | // Unaligned writes are not compatible with ZNS
58 | if (dev._is_zns)
59 | {
60 | goto exit;
61 | }
62 |
63 | err = fla_ut_fs_create(test_vals.slab_nlb, test_vals.npools, &dev, &fs_vals.fs);
64 | if (FLA_ERR(err, "fla_ut_fs_create()"))
65 | {
66 | goto teardown_ut_dev;
67 | }
68 |
69 | struct fla_pool_create_arg pool_arg =
70 | {
71 | .flags = 0,
72 | .name = fs_vals.pool_handle_name,
73 | .name_len = strlen(fs_vals.pool_handle_name),
74 | .obj_nlb = test_vals.obj_nlb,
75 | };
76 |
77 | err = fla_pool_create(fs_vals.fs, &pool_arg, &fs_vals.pool_handle);
78 | if(FLA_ERR(err, "fla_pool_create()"))
79 | goto teardown_ut_fs;
80 |
81 | err = fla_object_create(fs_vals.fs, fs_vals.pool_handle, &fs_vals.object_handle);
82 | if(FLA_ERR(err, "fla_object_create()"))
83 | goto release_pool;
84 |
85 | err = test_unaligned_writes(&fs_vals);
86 | if(FLA_ERR(err, "test_unaligned_write()"))
87 | goto release_object;
88 |
89 | release_object:
90 | ret = fla_object_destroy(fs_vals.fs, fs_vals.pool_handle, &fs_vals.object_handle);
91 | if(FLA_ERR(ret, "fla_object_destroy()"))
92 | err = ret;
93 |
94 | release_pool:
95 | ret = fla_pool_destroy(fs_vals.fs, fs_vals.pool_handle);
96 | if(FLA_ERR(ret, "fla_pool_destroy()"))
97 | err = ret;
98 |
99 | teardown_ut_fs:
100 | ret = fla_ut_fs_teardown(fs_vals.fs);
101 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
102 | {
103 | err = ret;
104 | }
105 |
106 | teardown_ut_dev:
107 | ret = fla_ut_dev_teardown(&dev);
108 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
109 | {
110 | err = ret;
111 | }
112 |
113 | exit:
114 | return err;
115 | }
116 |
117 | int
118 | test_unaligned_write(struct fs_test_vals const * fs_tv, struct unaligned_test_vals const * u_tv)
119 | {
120 | int err = 0;
121 | char * lhs_msg, * rhs_msg, * mid_msg, * w_buf, * a_buf;
122 | size_t orig_msg_len, aligned_msg_len;
123 |
124 | orig_msg_len = strlen(u_tv->orig_msg);
125 | lhs_msg = u_tv->orig_msg;
126 | rhs_msg = u_tv->orig_msg + u_tv->lhs_msg_len;
127 | mid_msg = u_tv->orig_msg + u_tv->mid_start;
128 |
129 | aligned_msg_len = FLA_CEIL_DIV(orig_msg_len,
130 | fs_tv->fs->dev.lb_nbytes) * fs_tv->fs->dev.lb_nbytes;
131 | a_buf = fla_buf_alloc(fs_tv->fs, aligned_msg_len);
132 | if((err = FLA_ERR(!a_buf, "fla_buf_alloc()")))
133 | {
134 | err = -ENOENT;
135 | goto exit;
136 | }
137 | memset(a_buf, '!', fs_tv->fs->dev.lb_nbytes);
138 | err = fla_object_write(fs_tv->fs, fs_tv->pool_handle, &fs_tv->object_handle, a_buf,
139 | aligned_msg_len - fs_tv->fs->dev.lb_nbytes, fs_tv->fs->dev.lb_nbytes);
140 |
141 | if(FLA_ERR(err, "fla_object_write()"))
142 | goto free_a_buf;
143 |
144 | w_buf = malloc(orig_msg_len);
145 | if((err = FLA_ERR(!w_buf, "malloc()")))
146 | goto free_a_buf;
147 |
148 | memcpy(w_buf, lhs_msg, u_tv->lhs_msg_len);
149 | err = fla_object_unaligned_write(fs_tv->fs, fs_tv->pool_handle, &fs_tv->object_handle, w_buf,
150 | 0, u_tv->lhs_msg_len);
151 | if(FLA_ERR(err, "fla_object_unaligned_write()"))
152 | goto free_w_buf;
153 |
154 | memcpy(w_buf, rhs_msg, u_tv->rhs_msg_len);
155 | err = fla_object_unaligned_write(fs_tv->fs, fs_tv->pool_handle, &fs_tv->object_handle, w_buf,
156 | u_tv->lhs_msg_len, u_tv->rhs_msg_len);
157 | if(FLA_ERR(err, "fla_object_unaligned_write()"))
158 | goto free_w_buf;
159 |
160 | memcpy(w_buf, mid_msg, u_tv->mid_msg_len);
161 | err = fla_object_unaligned_write(fs_tv->fs, fs_tv->pool_handle, &fs_tv->object_handle, w_buf,
162 | u_tv->mid_start, u_tv->mid_msg_len);
163 | if(FLA_ERR(err, "fla_object_unaligned_write()"))
164 | goto free_w_buf;
165 |
166 | err = fla_object_read(fs_tv->fs, fs_tv->pool_handle, &fs_tv->object_handle, a_buf, 0,
167 | aligned_msg_len);
168 | if(FLA_ERR(err, "fla_obj_read()"))
169 | goto free_w_buf;
170 |
171 | err = memcmp(a_buf, u_tv->orig_msg, orig_msg_len);
172 | if(FLA_ERR(err, "memcmp() - failed to read back the written value"))
173 | goto free_w_buf;
174 |
175 | if(aligned_msg_len > orig_msg_len)
176 | {
177 | if((err = FLA_ERR(*(a_buf + aligned_msg_len - 1) != '!', "Failed to maintain existing data")))
178 | goto free_w_buf;
179 | }
180 |
181 |
182 | free_w_buf:
183 | free(w_buf);
184 |
185 | free_a_buf:
186 | free(a_buf);
187 |
188 | exit:
189 | return err;
190 | }
191 |
192 | int
193 | test_unaligned_writes(struct fs_test_vals const * fs_tv)
194 | {
195 | int err = 0;
196 | unsigned int test_lengths[2] = {12, 600}, test_length;
197 | struct unaligned_test_vals u_tv;
198 |
199 | u_tv.orig_msg = malloc(test_lengths[0]);
200 | if((err = FLA_ERR(!u_tv.orig_msg, "malloc()")))
201 | goto exit;
202 |
203 | for(int i = 0; i < 2 ; ++i)
204 | {
205 | test_length = test_lengths[i];
206 |
207 | u_tv.orig_msg = realloc(u_tv.orig_msg, test_length);
208 | if((err = FLA_ERR(!u_tv.orig_msg, "realloc()")))
209 | goto free_orig_msg;
210 |
211 | fla_t_fill_buf_random(u_tv.orig_msg, test_length - 1);
212 | u_tv.lhs_msg_len = test_length / 2;
213 | u_tv.rhs_msg_len = test_length - u_tv.lhs_msg_len;
214 | u_tv.mid_msg_len = u_tv.rhs_msg_len;
215 | u_tv.mid_start = u_tv.mid_msg_len / 2;
216 |
217 | err = test_unaligned_write(fs_tv, &u_tv);
218 | if(FLA_ERR(err, "test_unaligned_write()"))
219 | goto free_orig_msg;
220 | }
221 |
222 | free_orig_msg:
223 | free(u_tv.orig_msg);
224 |
225 | exit:
226 | return err;
227 | }
228 |
229 |
230 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_pool.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) Jesper Devantier
2 | #include "flexalloc.h"
3 | #include "tests/flexalloc_tests_common.h"
4 | #include "flexalloc_util.h"
5 | #include "flexalloc_mm.h"
6 | #include "libflexalloc.h"
7 | #include
8 | #include
9 |
10 | struct ut_pool_entry
11 | {
12 | char *name;
13 | struct fla_pool *handle;
14 | };
15 |
16 | struct ut_pool_entry pools[] =
17 | {
18 | {.name = "pool1", .handle = NULL},
19 | {.name = "pool2", .handle = NULL},
20 | {.name = "pool3", .handle = NULL},
21 | {.name = "pool4", .handle = NULL},
22 | };
23 |
24 | int
25 | __pool_handle_eq(struct fla_pool *h1, struct fla_pool *h2)
26 | {
27 | return (h1->h2 == h2->h2 && h1->ndx == h2->ndx);
28 | }
29 |
30 | void
31 | __pp_handle(FILE *stream, struct fla_pool *h)
32 | {
33 | fprintf(stream, "handle{ndx: %"PRIu32", h2: %"PRIx64"}", h->ndx, h->h2);
34 | }
35 |
36 | size_t pools_len = sizeof(pools)/sizeof(struct ut_pool_entry);
37 |
38 |
39 | int
40 | lookup_and_validate_pool_handles(struct flexalloc *fs)
41 | {
42 | /*
43 | * For each entry in the 'pools' array, find/lookup the pool by name.
44 | * Furthermore, compare the returned handle's data to the handle acquired
45 | * at the point of creation (see main function).
46 | */
47 | struct fla_pool * handle;
48 | int err = 0, ret;
49 |
50 | for(unsigned int i = 0; i < pools_len; i++)
51 | {
52 | ret = fla_pool_open(fs, pools[i].name, &handle) == 0;
53 | err |= FLA_ASSERTF(ret, "fla_pool_open(): pool (%s) not found", pools[i].name);
54 |
55 | if (err)
56 | goto exit;
57 |
58 | ret = __pool_handle_eq(handle, pools[i].handle);
59 | err |= FLA_ASSERT(ret,
60 | "__pool_handle_eq() - acquired pool handle from lookup differs from expected");
61 | if (err)
62 | {
63 | FLA_ERR_PRINTF("pool '%s' (index: %u):\n", pools[i].name, i);
64 | FLA_ERR_PRINTF(" * handle(actual): {ndx: %"PRIu32", h2: %"PRIx64"}\n", handle->ndx,
65 | handle->h2);
66 | FLA_ERR_PRINTF(" * handle(expected): {ndx: %"PRIu32", h2: %"PRIx64"}\n",
67 | pools[i].handle->ndx,
68 | pools[i].handle->h2);
69 | goto exit;
70 | }
71 |
72 | fla_pool_close(fs, handle);
73 | }
74 |
75 | exit:
76 | return err;
77 | }
78 |
79 |
80 | int
81 | main(int argc, char **argv)
82 | {
83 | struct fla_ut_dev dev;
84 | struct flexalloc *fs = NULL;
85 | struct fla_pool * handle;
86 | struct fla_open_opts open_opts = {0};
87 | int err = 0, ret;
88 |
89 | err = fla_ut_dev_init(40000, &dev);
90 | if (FLA_ERR(err, "fla_ut_dev_init()"))
91 | {
92 | goto exit;
93 | }
94 |
95 | if (dev._is_zns)
96 | {
97 | err = fla_ut_fs_create(dev.nsect_zn, 4, &dev, &fs);
98 | }
99 | else
100 | {
101 | err = fla_ut_fs_create(4000, 4, &dev, &fs);
102 | }
103 |
104 | if (FLA_ERR(err, "fla_ut_fs_create()"))
105 | {
106 | goto teardown_ut_dev;
107 | }
108 |
109 | // create pools
110 | for (unsigned int i = 0; i < pools_len; i++)
111 | {
112 |
113 | struct fla_pool_create_arg pool_arg =
114 | {
115 | .flags = 0,
116 | .name = pools[i].name,
117 | .name_len = strlen(pools[i].name),
118 | .obj_nlb = 2
119 | };
120 |
121 | if (dev._is_zns)
122 | {
123 | pool_arg.obj_nlb = dev.nsect_zn;
124 | }
125 |
126 | ret = fla_pool_create(fs, &pool_arg, &pools[i].handle) == 0 ;
127 | err |= FLA_ASSERTF(ret,
128 | "fla_pool_create(fs, name: %s, len: %u, obj_nlb: %u, handle) - initial acquire failed",
129 | pools[i].name, strlen(pools[i].name), i);
130 |
131 | if (err)
132 | goto teardown_ut_fs;
133 | }
134 |
135 | // lookup pools and validate the returned handles
136 | err = lookup_and_validate_pool_handles(fs);
137 | if (FLA_ERR(err, "lookup_and_validate_pool_handles()"))
138 | goto teardown_ut_fs;
139 |
140 | // close flexalloc system - should flush changes to disk
141 | err = fla_close(fs);
142 | if (FLA_ERR(err, "fla_close()"))
143 | goto teardown_ut_fs;
144 |
145 | open_opts.dev_uri = dev._dev_uri;
146 | open_opts.md_dev_uri = dev._md_dev_uri;
147 | err = fla_open(&open_opts, &fs);
148 | if (FLA_ERR(err, "fla_open() - failed to re-open device"))
149 | goto teardown_ut_fs;
150 |
151 | // lookup pools and validate handles (again) - if still OK, changes persist across flexalloc open/close
152 | err = lookup_and_validate_pool_handles(fs);
153 | if (FLA_ERR(err, "lookup_and_validate_pool_handles()"))
154 | goto teardown_ut_fs;
155 |
156 | // destroy each pool
157 | for (unsigned int i = 0; i < pools_len; i++)
158 | {
159 | err = fla_pool_destroy(fs, pools[i].handle);
160 | if (FLA_ERR(err, "fla_pool_destroy()"))
161 | goto teardown_ut_fs;
162 | }
163 |
164 | // ensure pools cannot be found
165 | for (unsigned int i = 0; i < pools_len; i++)
166 | {
167 | err = fla_pool_open(fs, pools[i].name, &handle) == 0;
168 | if (FLA_ERR(err, "fla_pool_open() - found destroyed pool"))
169 | {
170 | __pp_handle(stderr, handle);
171 | fprintf(stderr, "\n");
172 | err |= 1;
173 | }
174 | }
175 |
176 | // close flexalloc system - changes should be persisted
177 | err = fla_close(fs);
178 | if (FLA_ERR(err, "fla_close()"))
179 | goto teardown_ut_fs;
180 |
181 | // We need to reopen the device to verify pools have been destroyed
182 | err = fla_open(&open_opts, &fs);
183 | if (FLA_ERR(err, "fla_open() - failed to re-open device"))
184 | goto teardown_ut_fs;
185 |
186 | for (unsigned int i = 0; i < pools_len; i++)
187 | {
188 | err = fla_pool_open(fs, pools[i].name, &handle) == 0;
189 | if (FLA_ERR( err, "fla_pool_open() - found pool, should not have"))
190 | {
191 | __pp_handle(stderr, handle);
192 | fprintf(stderr, "\n");
193 | err |= 1;
194 | }
195 | }
196 | if (err)
197 | goto teardown_ut_fs;
198 |
199 | teardown_ut_fs:
200 | ret = fla_ut_fs_teardown(fs);
201 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
202 | {
203 | err = ret;
204 | }
205 |
206 | teardown_ut_dev:
207 | ret = fla_ut_dev_teardown(&dev);
208 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
209 | {
210 | err = ret;
211 | }
212 |
213 | exit:
214 | return err;
215 | }
216 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_strp_object_read_write.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "src/libflexalloc.h"
7 | #include "src/flexalloc.h"
8 | #include "src/flexalloc_util.h"
9 | #include "tests/flexalloc_tests_common.h"
10 |
11 | struct test_vals
12 | {
13 | uint32_t obj_nstrp;
14 | uint32_t strp_nlbs;
15 | uint32_t npools;
16 | uint32_t strp_nobj;
17 |
18 | uint32_t obj_nlb;
19 | uint32_t slab_nlb;
20 | uint32_t blk_nlbs;
21 |
22 | uint32_t xfer_snlb;
23 | uint32_t xfer_nlbs;
24 | uint32_t xfer_snstrps;
25 | uint32_t xfer_nstrps;
26 | };
27 |
28 | #define NUM_TESTS 1
29 | struct test_vals tests[] =
30 | {
31 | // Simple write all.
32 | {
33 | .obj_nstrp = 3072, .strp_nlbs = 2, .npools = 1, .strp_nobj = 2,
34 | .obj_nlb = 0, .blk_nlbs = 0, .slab_nlb = 0,
35 | .xfer_snlb = 0, .xfer_nlbs = 0,
36 | .xfer_snstrps = 0, .xfer_nstrps = 4,
37 | },
38 |
39 | // start from second chunk and wrap around
40 | {
41 | .obj_nstrp = 3072, .strp_nlbs = 4, .npools = 1, .strp_nobj = 4,
42 | .obj_nlb = 0, .blk_nlbs = 0, .slab_nlb = 0,
43 | .xfer_snlb = 0, .xfer_nlbs = 0,
44 | .xfer_snstrps = 1, .xfer_nstrps = 1
45 | },
46 |
47 | // several transfers in each object
48 | {
49 | .obj_nstrp = 3072, .strp_nlbs = 4, .npools = 1, .strp_nobj = 4,
50 | .obj_nlb = 0, .blk_nlbs = 0, .slab_nlb = 0,
51 | .xfer_snlb = 4, .xfer_nlbs = 48,
52 | .xfer_snstrps = 0, .xfer_nstrps = 3072 * 2
53 | },
54 | };
55 |
56 | bool
57 | should_write_fail(struct fla_ut_dev * dev, struct test_vals * test_vals)
58 | {
59 | if(dev->_is_zns)
60 | {
61 | if(test_vals->xfer_snstrps > test_vals->obj_nstrp)
62 | return true;
63 | }
64 | return false;
65 | }
66 |
67 |
68 | int
69 | test_strp(struct test_vals test_vals)
70 | {
71 | int err = 0, ret;
72 | char * pool_handle_name, *write_buf, *read_buf;
73 | size_t buf_len;
74 | uint64_t xfer_offset;
75 | struct fla_ut_dev dev;
76 | struct flexalloc *fs = NULL;
77 | struct fla_pool *pool_handle;
78 | struct fla_object obj;
79 | struct fla_open_opts open_opts = {0};
80 |
81 | pool_handle_name = "mypool";
82 |
83 | if(FLA_ERR(test_vals.xfer_nstrps < 1, "Test needs to transfer more than zero lbs"))
84 | goto exit;
85 |
86 | test_vals.obj_nlb = test_vals.obj_nstrp * test_vals.strp_nlbs;
87 | test_vals.slab_nlb = test_vals.obj_nlb * test_vals.strp_nobj * 4;
88 | test_vals.blk_nlbs = test_vals.slab_nlb * 10;
89 |
90 | err = fla_ut_dev_init(test_vals.blk_nlbs, &dev);
91 | if (FLA_ERR(err, "fla_ut_dev_init()"))
92 | goto exit;
93 |
94 | if(test_vals.blk_nlbs != dev.nblocks)
95 | {
96 | // "Real" device
97 | if(!dev._is_zns)
98 | goto teardown_ut_dev; // ignore non ZNS for now.
99 |
100 | test_vals.obj_nlb = dev.nsect_zn;
101 | if (test_vals.obj_nlb % test_vals.obj_nstrp > 0)
102 | goto teardown_ut_dev; // zone must be a multiple obj_nstrp
103 | test_vals.strp_nlbs = test_vals.obj_nlb / test_vals.obj_nstrp;
104 |
105 | test_vals.slab_nlb = test_vals.obj_nlb * test_vals.strp_nobj * 4;
106 | test_vals.blk_nlbs = dev.nblocks;
107 | }
108 |
109 | test_vals.xfer_snlb = test_vals.xfer_snstrps * test_vals.strp_nlbs;
110 | test_vals.xfer_nlbs = test_vals.xfer_nstrps * test_vals.strp_nlbs;
111 |
112 | err = fla_ut_fs_create(test_vals.slab_nlb, test_vals.npools, &dev, &fs);
113 | if (FLA_ERR(err, "fla_ut_fs_create()"))
114 | {
115 | goto teardown_ut_dev;
116 | }
117 |
118 | buf_len = test_vals.xfer_nlbs * dev.lb_nbytes;
119 | xfer_offset = test_vals.xfer_snlb * dev.lb_nbytes;
120 |
121 | struct fla_pool_create_arg pool_arg =
122 | {
123 | .flags = 0,
124 | .name = pool_handle_name,
125 | .name_len = strlen(pool_handle_name),
126 | .obj_nlb = test_vals.obj_nlb,
127 | .strp_nobjs = test_vals.strp_nobj,
128 | .strp_nbytes = test_vals.strp_nlbs * dev.lb_nbytes,
129 | };
130 |
131 | err = fla_pool_create(fs, &pool_arg, &pool_handle);
132 | if(FLA_ERR(err, "fla_pool_create()"))
133 | goto teardown_ut_fs;
134 |
135 | err = fla_object_create(fs, pool_handle, &obj);
136 | if(FLA_ERR(err, "fla_object_create()"))
137 | goto release_pool;
138 |
139 | write_buf = fla_buf_alloc(fs, buf_len);
140 | if((err = FLA_ERR(!write_buf, "fla_buf_alloc(): buf_len :%"PRIu64"")))
141 | goto release_object;
142 |
143 | fla_t_fill_buf_random(write_buf, buf_len);
144 | write_buf[buf_len] = '\0';
145 |
146 | err = fla_object_write(fs, pool_handle, &obj, write_buf, xfer_offset, buf_len);
147 | if(should_write_fail(&dev, &test_vals) && err == 0)
148 | {
149 | FLA_ERR(1, "fla_object_write(): Expected write failure, got success");
150 | goto free_write_buffer;
151 | }
152 |
153 | if(FLA_ERR(err, "fla_object_write()"))
154 | goto free_write_buffer;
155 |
156 | err = fla_close(fs);
157 | if(FLA_ERR(err, "fla_close()"))
158 | goto free_write_buffer;
159 |
160 | open_opts.dev_uri = dev._dev_uri;
161 | open_opts.md_dev_uri = dev._md_dev_uri;
162 | err = fla_open(&open_opts, &fs);
163 | if(FLA_ERR(err, "fla_open()"))
164 | goto free_write_buffer;
165 |
166 | fla_pool_close(fs, pool_handle);
167 | err = fla_pool_open(fs, pool_handle_name, &pool_handle);
168 | if(FLA_ERR(err, "fla_pool_open()"))
169 | goto free_write_buffer;
170 |
171 | err = fla_object_open(fs, pool_handle, &obj);
172 | if(FLA_ERR(err, "fla_object_open()"))
173 | goto free_write_buffer;
174 |
175 | read_buf = fla_buf_alloc(fs, buf_len);
176 | if((err = FLA_ERR(!read_buf, "fla_buf_alloc()")))
177 | goto free_write_buffer;
178 | memset(read_buf, 0, buf_len);
179 | read_buf[buf_len] = '\0';
180 |
181 | err = fla_object_read(fs, pool_handle, &obj, read_buf, xfer_offset, buf_len);
182 | if(FLA_ERR(err, "fla_obj_read()"))
183 | goto free_read_buffer;
184 |
185 | // Compare that the string (and terminating NULL) was read back
186 | err = memcmp(write_buf, read_buf, buf_len + 1);
187 |
188 | if(FLA_ERR(err, "Unexpected value for memcmp"))
189 | goto free_write_buffer;
190 |
191 | // Free the object, which should reset a zone
192 | ret = fla_object_destroy(fs, pool_handle, &obj);
193 | if(FLA_ERR(ret, "fla_object_destroy()"))
194 | err = ret;
195 |
196 | // Allocate a new object
197 | err = fla_object_create(fs, pool_handle, &obj);
198 | if(FLA_ERR(err, "fla_object_create()"))
199 | goto release_pool;
200 |
201 | // Will fail on zns without object reset
202 | err = fla_object_write(fs, pool_handle, &obj, write_buf, 0, buf_len);
203 | if(FLA_ERR(err, "fla_object_write()"))
204 | goto free_write_buffer;
205 |
206 | free_read_buffer:
207 | fla_buf_free(fs, read_buf);
208 |
209 | free_write_buffer:
210 | fla_buf_free(fs, write_buf);
211 |
212 | release_object:
213 | ret = fla_object_destroy(fs, pool_handle, &obj);
214 | if(FLA_ERR(ret, "fla_object_destroy()"))
215 | err = ret;
216 |
217 | release_pool:
218 | ret = fla_pool_destroy(fs, pool_handle);
219 | if(FLA_ERR(ret, "fla_pool_destroy()"))
220 | err = ret;
221 |
222 | teardown_ut_fs:
223 | ret = fla_ut_fs_teardown(fs);
224 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
225 | {
226 | err = ret;
227 | }
228 |
229 | teardown_ut_dev:
230 | ret = fla_ut_dev_teardown(&dev);
231 | if (FLA_ERR(ret, "fla_ut_dev_teardown()"))
232 | {
233 | err = ret;
234 | }
235 |
236 | exit:
237 | return err;
238 | }
239 |
240 | int
241 | main(int argc, char **argv)
242 | {
243 | int err = 0;
244 | // We seed the rand with pid so it produces different values always
245 | srand(getpid());
246 |
247 | for (int i = 0 ; i < NUM_TESTS ; ++i)
248 | err |= test_strp(tests[i]);
249 |
250 | return err;
251 | }
252 |
--------------------------------------------------------------------------------
/tests/flexalloc_rt_xnvme_to_from.c:
--------------------------------------------------------------------------------
1 | //Copyright (C) 2021 Joel Granados
2 | #define FLA_TESTING
3 | #include
4 | #include "flexalloc_tests_common.h"
5 | #include "flexalloc_util.h"
6 | #include "flexalloc_xnvme_env.h"
7 | #include
8 | #include
9 |
10 | int test_to_stg(const int buf_size, const int blk_num, const int blk_size,
11 | struct fla_ut_lpbk * lpbk, struct xnvme_dev * xnvme_dev, char * buf);
12 |
13 | int test_from_stg(const int buf_size, const int blk_num, const int blk_size,
14 | struct fla_ut_lpbk * lpbk, struct xnvme_dev * xnvme_dev, char * buf);
15 |
16 | int
17 | main(int argc, char ** argv)
18 | {
19 | int ret = 0, err = 0, blk_size = 512, blk_num = 3,
20 | buf_size = blk_size*blk_num;
21 | struct fla_ut_lpbk * lpbk;
22 | struct xnvme_dev * xnvme_dev;
23 | char * buf;
24 |
25 | ret = fla_ut_lpbk_dev_alloc(blk_size, blk_num, &lpbk);
26 | if(FLA_ERR(ret, "fla_ut_lpbk_dev_alloc()"))
27 | {
28 | goto exit;
29 | }
30 |
31 | ret = fla_xne_dev_open(lpbk->dev_name, NULL, &xnvme_dev);
32 | if((ret = FLA_ERR(ret, "fla_xne_dev_open()")))
33 | {
34 | xnvmec_perr("xnvme_dev_open()", errno);
35 | ret = -EIO;
36 | goto loop_free;
37 | }
38 |
39 | buf = fla_xne_alloc_buf(xnvme_dev, buf_size);
40 | if((ret = FLA_ERR(!buf, "fla_buf_alloc()")))
41 | {
42 | goto close_dev;
43 | }
44 |
45 | ret = test_to_stg(buf_size, blk_num, blk_size, lpbk, xnvme_dev, buf);
46 | if(FLA_ERR(ret, "test_to_stg()"))
47 | {
48 | goto free_buf;
49 | }
50 |
51 | ret = test_from_stg(buf_size, blk_num, blk_size, lpbk, xnvme_dev, buf);
52 | if(FLA_ERR(ret, "test_from_stg()"))
53 | {
54 | goto free_buf;
55 | }
56 |
57 | free_buf:
58 | fla_xne_free_buf(xnvme_dev, buf);
59 |
60 | close_dev:
61 | xnvme_dev_close(xnvme_dev);
62 |
63 | loop_free:
64 | err = fla_ut_lpbk_dev_free(lpbk);
65 | if(FLA_ERR(err, "fla_ut_lpbk_dev_free()") && !ret)
66 | {
67 | ret = err;
68 | }
69 |
70 | exit:
71 | return ret != 0;
72 | }
73 |
74 | int
75 | test_from_stg(const int buf_size, const int blk_num, const int blk_size,
76 | struct fla_ut_lpbk * lpbk, struct xnvme_dev * xnvme_dev, char * buf)
77 | {
78 | int ret = 0, num_ones;
79 |
80 | ret = fla_ut_lpbk_overwrite('1', lpbk);
81 | if(FLA_ERR(ret, "fla_ut_lpbk_overwrite()"))
82 | {
83 | goto exit;
84 | }
85 |
86 | for(int slba = 0; slba < blk_num; ++slba)
87 | {
88 | for(int elba = slba; elba < blk_num; ++ elba)
89 | {
90 | memset(buf, '0', buf_size);
91 |
92 | struct xnvme_lba_range range = xnvme_lba_range_from_slba_naddrs(xnvme_dev, slba,
93 | elba - slba + 1);
94 | if((ret = FLA_ERR(range.attr.is_valid != 1, "fla_xne_lba_range_from_slba_naddrs()")))
95 | goto exit;
96 | struct fla_xne_io xne_io =
97 | {
98 | .dev = xnvme_dev,
99 | .buf = buf,
100 | .lba_range = &range,
101 | .fla_dp = NULL
102 | };
103 |
104 | ret = fla_xne_sync_seq_r_xneio(&xne_io);
105 | if(FLA_ERR(ret, "fla_xne_sync_seq_r_xneio()"))
106 | goto exit;
107 |
108 | num_ones = fla_ut_count_char_in_buf('1', buf, buf_size);
109 |
110 | ret = FLA_ASSERTF(num_ones == (elba - slba + 1) * blk_size,
111 | "Read unexpected number of bytes. slba : %d, elba : %d, num_ones : %d",
112 | slba, elba, num_ones);
113 | if(FLA_ERR(ret, "FLA_ASSERT()"))
114 | goto exit;
115 | }
116 | }
117 |
118 | exit:
119 | return ret;
120 | }
121 |
122 | int
123 | test_to_stg(const int buf_size, const int blk_num, const int blk_size,
124 | struct fla_ut_lpbk * lpbk, struct xnvme_dev * xnvme_dev, char * buf)
125 | {
126 | int ret = 0, num_ones;
127 | memset(buf, '1', buf_size);
128 |
129 | for(int slba = 0; slba < blk_num; ++slba)
130 | {
131 | for(int elba = slba; elba < blk_num; ++ elba)
132 | {
133 | ret = fla_ut_lpbk_overwrite('0', lpbk);
134 | if(FLA_ERR(ret, "fla_ut_lpbk_overwrite()"))
135 | goto exit;
136 |
137 | struct xnvme_lba_range range;
138 | range = fla_xne_lba_range_from_slba_naddrs(xnvme_dev, slba, elba - slba + 1);
139 | if ((ret = FLA_ERR(range.attr.is_valid != 1, "fla_xne_lba_range_from_slba_naddrs()")))
140 | goto exit;
141 |
142 | struct fla_xne_io xne_io = {.dev = xnvme_dev, .buf = buf, .lba_range = &range, .fla_dp = NULL};
143 | ret = fla_xne_sync_seq_w_xneio(&xne_io);
144 | if(FLA_ERR(ret, "fla_xne_sync_seq_w_xneio()"))
145 | goto exit;
146 |
147 | ret = fsync(lpbk->bfile_fd);
148 | if((ret = FLA_ERR_ERRNO(ret, "fsync()")))
149 | goto exit;
150 |
151 | ret = fla_ut_count_char_in_file('1', lpbk->bfile_fd, 0, blk_size*blk_num, &num_ones);
152 | if(FLA_ERR(ret, "fla_ut_count_char_in_file()"))
153 | goto exit;
154 |
155 | ret = FLA_ASSERTF(num_ones == (elba - slba + 1) * blk_size,
156 | "Wrote unexpected number of bytes. slba : %d, elba : %d, num_ones : %d",
157 | slba, elba, num_ones);
158 | if(FLA_ERR(ret, "FLA_ASSERT()"))
159 | goto exit;
160 | }
161 | }
162 |
163 | exit:
164 | return ret;
165 | }
166 |
167 |
--------------------------------------------------------------------------------
/tests/flexalloc_tests_common.h:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | // Copyright (C) 2021 Jesper Devantier
3 | // Copyright (C) 2021 Adam Manzanares
4 |
5 | #ifndef __FLEXALLOC_TESTS_COMMON_H
6 | #define __FLEXALLOC_TESTS_COMMON_H
7 |
8 | #include
9 | #include "flexalloc_xnvme_env.h"
10 | #include "flexalloc_mm.h"
11 |
12 | /**
13 | * Loopback elements
14 | */
15 | #define FLA_UT_DEV_NAME_SIZE 1024
16 | #define FLA_UT_BACKING_FILE_NAME_SIZE 255
17 |
18 | #define FLA_TEST_LPBK_REMOVE_BFILE "FLA_TEST_LPBK_REMOVE_BFILE"
19 | /**
20 | * Loop back devices
21 | */
22 | struct fla_ut_lpbk
23 | {
24 | uint64_t block_size; // block size, in bytes
25 | uint64_t size; // total size, in bytes
26 | int bfile_fd; // file descriptor of backing file
27 | char * bfile_name; // name of backing file
28 | int dev_fd; // file descriptor of device file
29 | char * dev_name; // name of device
30 | };
31 |
32 | struct fla_ut_dev
33 | {
34 | /// block size, in bytes (can be read after initialization)
35 | uint64_t lb_nbytes;
36 | /// number of logical blocks on device (can be read after initialization)
37 | uint64_t nblocks;
38 | // ZNS
39 | /// Number of zones on zns device
40 | uint32_t nzones;
41 | /// Number of sectors per zone
42 | uint64_t nsect_zn;
43 |
44 | // internal
45 | /// device node path
46 | const char *_dev_uri;
47 | /// reference to loopback structure (if loopback device)
48 | struct fla_ut_lpbk *_loop;
49 | /// true iff. selected device is a loopback device
50 | uint8_t _is_loop;
51 | // ZNS
52 | /// ZNS require a separate MD device
53 | const char *_md_dev_uri;
54 | uint8_t _is_zns;
55 | };
56 |
57 | #define FLA_TEST_SKIP_RETCODE 77
58 |
59 | bool
60 | is_globalenv_set(char const * glb);
61 |
62 | int
63 | fla_ut_lpbk_dev_alloc(uint64_t block_size, uint64_t nblocks, struct fla_ut_lpbk **loop);
64 |
65 | int
66 | fla_ut_lpbk_dev_free(struct fla_ut_lpbk *loop);
67 |
68 | int
69 | fla_ut_lpbk_fs_create(uint64_t lb_nbytes, uint64_t nblocks, uint32_t slab_nlb,
70 | uint32_t npools,
71 | struct fla_ut_lpbk **lpbk, struct flexalloc **fs);
72 |
73 | int
74 | fla_ut_lpbk_fs_destroy(struct fla_ut_lpbk *lpbk, struct flexalloc *fs);
75 |
76 | /**
77 | * True iff fla_ut_fs_create would use a device.
78 | *
79 | * Use this to determine if fla_ut_fs_create() would use a backing device.
80 | * Tests requiring a certain type of device can use this to check and return
81 | * FLA_TEST_SKIP_RECODE to skip execution if needed.
82 | */
83 | int
84 | fla_ut_dev_use_device(struct fla_ut_dev *dev);
85 |
86 | /**
87 | * Get a wrapped device instance for testing.
88 | *
89 | * @param disk_min_512byte_blocks minimum number of 512B blocks required for test
90 | * (if using a loopback device, this becomes the disk size)
91 | * @param dev wrapped device
92 | */
93 | int
94 | fla_ut_dev_init(uint64_t disk_min_512byte_blocks, struct fla_ut_dev *dev);
95 |
96 | /**
97 | * Release use of test device
98 | *
99 | * @param dev test device
100 | */
101 | int
102 | fla_ut_dev_teardown(struct fla_ut_dev *dev);
103 |
104 | /**
105 | * Create a flexalloc instance using device if FLA_TEST_DEV is set, otherwise using loopback.
106 | *
107 | * @param slab_min_blocks minimum number of blocks
108 | * @param npools number of pools to allocate space for in flexalloc
109 | * @param dev contains information related to which device type was selected,
110 | * fields not prepended by underscore may be read.
111 | * @param fs flexalloc handle
112 | * @return 0 on success, error otherwise.
113 | */
114 | int
115 | fla_ut_fs_create(uint32_t slab_min_blocks, uint32_t npools,
116 | struct fla_ut_dev *dev, struct flexalloc **fs);
117 |
118 | /**
119 | * Release flexalloc instance and backing (loopback?) device.
120 | *
121 | * @param dev unit test device wrapper
122 | * @param fs flexalloc handle
123 | *
124 | * @return 0 on success, non-zero on error, cleanup still proceeds
125 | */
126 | int
127 | fla_ut_fs_teardown(struct flexalloc *fs);
128 |
129 | int
130 | fla_ut_temp_file_create(const int size, char * created_name);
131 |
132 | int
133 | fla_ut_lpbk_overwrite(const char c, struct fla_ut_lpbk * lpbk);
134 |
135 |
136 | int
137 | fla_ut_lpbk_offs_blk_fill(struct fla_ut_lpbk * lpbk);
138 |
139 | void
140 | fla_t_fill_buf_random(char * buf, const size_t size);
141 |
142 | /**
143 | * Assert functions for the testing frame work
144 | */
145 |
146 | /**
147 | * Compares file offset with char. Uses strlen.
148 | *
149 | * @param file_fd file descriptor (file is already open)
150 | * @param file_offset lseek to this offset
151 | * @param expected_str strcmp with this string. Use the size of string to compare
152 | *
153 | * @return 0 on success, !=0 otherwise.
154 | */
155 | int fla_ut_assert_equal_within_file_char(const int file_fd, const int file_offset,
156 | const char * expected_str);
157 |
158 | int fla_ut_count_char_in_buf(const char c, const char * buf, const int size);
159 | int fla_ut_count_char_in_file(const char c, const int file_fd, const size_t file_offset,
160 | const size_t size, int * i);
161 |
162 | /**
163 | * Numerical comparison. It is implemented as a macro
164 | *
165 | * @param file_fd file descriptor (file is already open)
166 | * @param file_offset lseek to this offset
167 | * @param expected_val expected numerical value
168 | *
169 | * @return 0 on success, !=0 otherwise.
170 | */
171 | int fla_ut_assert_equal_within_file_int32_t(const int file_fd, const int file_offset,
172 | const int32_t expected_val);
173 | int fla_ut_assert_equal_within_file_int64_t(const int file_fd, const int file_offset,
174 | const int64_t expected_val);
175 |
176 | int
177 | fla_expr_assert(char *expr_s, int expr_result, char *err_msg, const char *func,
178 | const int line, ...);
179 | /**
180 | * Assert truthfulness of expression.
181 | *
182 | * @param expr any C expression which evaluates to a boolean value.
183 | * @param err_msg error message to display in case the assertion fails
184 | * @return 0 on success, != 0 otherwise.
185 | */
186 | #define FLA_ASSERT(expr, err_msg) \
187 | fla_expr_assert(#expr, expr, err_msg, __func__, __LINE__)
188 |
189 | /**
190 | * Assert truthfulness of expression.
191 | *
192 | * @param expr any C expression - ordinary truthyness rules apply
193 | * @param err_msg error format string
194 | * @param ... arguments to formatted string
195 | * @return 0 on success, != 0 otherwise.
196 | */
197 | #define FLA_ASSERTF(expr, err_msg, ...) \
198 | fla_expr_assert(#expr, expr, err_msg, __func__, __LINE__, __VA_ARGS__)
199 |
200 | #endif /* __FLEXALLOC_TESTS_COMMON_H */
201 |
--------------------------------------------------------------------------------
/tests/flexalloc_ut_bits.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Jesper Devantier
2 |
3 | #include
4 | #include "flexalloc_bits.h"
5 | #include "flexalloc_util.h"
6 | #include "flexalloc_tests_common.h"
7 |
8 | int
9 | test_ntz()
10 | {
11 | int err = 0;
12 | err |= FLA_ASSERT(ntz(0) == 32, "expected ntz(0) == 32");
13 |
14 | err |= FLA_ASSERT(ntz(~0) == 0, "expected ntz(1) == 0");
15 |
16 | err |= FLA_ASSERT(ntz(4) == 2, "expected ntz(4) == 2 (100 in binary)");
17 |
18 | err |= FLA_ASSERT(ntz(256) == 8, "expected ntz(256) == 8");
19 |
20 | return err;
21 | }
22 |
23 | #define ASSERT_SET_BITS(val, num_set) \
24 | FLA_ASSERTF(count_set_bits(val) == num_set, "%u should have %u set bits", val, num_set)
25 |
26 |
27 | int
28 | test_count_set_bits()
29 | {
30 | int err = 0;
31 |
32 | err |= ASSERT_SET_BITS(0, 0);
33 | err |= ASSERT_SET_BITS(~0, 32);
34 | err |= ASSERT_SET_BITS(7, 3);
35 |
36 | return err;
37 | }
38 |
39 | int
40 | run_test(char *test_label, int (*test_fn)())
41 | {
42 |
43 | FLA_VBS_PRINTF("\n--- %s ---\n", test_label);
44 | return test_fn();
45 | }
46 |
47 | #define RUN_TEST(fn) run_test(#fn, fn);
48 |
49 | int
50 | main(int argc, char **argv)
51 | {
52 | int err = 0;
53 |
54 | err |= RUN_TEST(test_ntz);
55 | err |= RUN_TEST(test_count_set_bits);
56 |
57 | return err;
58 | }
59 |
--------------------------------------------------------------------------------
/tests/flexalloc_ut_slab.c:
--------------------------------------------------------------------------------
1 | // Copyright (C) 2021 Joel Granados
2 | #include "tests/flexalloc_tests_common.h"
3 | #include "flexalloc_util.h"
4 | #include "flexalloc_mm.h"
5 | #include "flexalloc_ll.h"
6 | #include
7 |
8 | struct test_vals
9 | {
10 | uint32_t npools;
11 | uint32_t min_disk_lbs; // Will be overridden by real size on "real" HW.
12 | float slab_size_p; // slab size in percent of disk size
13 | float obj_size_p; // obje size in percent of slab size
14 | };
15 |
16 | static int test_slabs(struct test_vals * test_vals);
17 | static int test_check_slab_pointers(struct flexalloc * fs, const uint32_t expected_size);
18 |
19 | int
20 | main(int argc, char ** argv)
21 | {
22 | int err = 0;
23 |
24 | struct test_vals test_vals [] =
25 | {
26 | {.npools = 1, .min_disk_lbs = 100, .slab_size_p = 0.8, .obj_size_p = 0.8 }
27 | , {.npools = 1, .min_disk_lbs = 100, .slab_size_p = 0.8, .obj_size_p = 0.2 }
28 | , {.npools = 2, .min_disk_lbs = 100, .slab_size_p = 0.4, .obj_size_p = 0.8 }
29 | , {.npools = 2, .min_disk_lbs = 100, .slab_size_p = 0.4, .obj_size_p = 0.2 }
30 | , {.npools = 4, .min_disk_lbs = 100, .slab_size_p = 0.2, .obj_size_p = 0.8 }
31 | , {.npools = 4, .min_disk_lbs = 100, .slab_size_p = 0.2, .obj_size_p = 0.2 }
32 | , {.npools = 0, .min_disk_lbs = 0, .slab_size_p = 0, .obj_size_p = 0}
33 | };
34 |
35 | for(int i = 0 ; true ; ++i)
36 | {
37 | if (test_vals[i].npools == 0)
38 | goto exit;
39 | err = test_slabs(&test_vals[i]);
40 | if(FLA_ERR(err, "test_slabs()"))
41 | {
42 | goto exit;
43 | };
44 | }
45 |
46 | exit:
47 | return err;
48 | }
49 |
50 | int
51 | test_slabs(struct test_vals * test_vals)
52 | {
53 | int err = 0, ret;
54 | struct fla_ut_dev dev;
55 | struct flexalloc *fs;
56 | struct fla_slab_header *slab_header, *slab_error;
57 | uint32_t slab_nlb, obj_nlb, init_free_slabs;
58 |
59 | err = fla_ut_dev_init(test_vals->min_disk_lbs, &dev);
60 | if (FLA_ERR(err, "fla_ut_dev_init()"))
61 | {
62 | goto exit;
63 | }
64 | test_vals->min_disk_lbs = dev.nblocks;
65 |
66 | /* Skip for ZNS.
67 | * If we are testing ZNS, we will automatically modify slab size
68 | * rendering all our tests useless.
69 | */
70 | if(dev._is_zns)
71 | goto teardown_ut_dev;
72 |
73 | slab_error = malloc(sizeof(struct fla_slab_header));
74 | if (FLA_ERR(!slab_error, "malloc()"))
75 | {
76 | err = -ENOMEM;
77 | goto teardown_ut_dev;
78 | }
79 |
80 | slab_nlb = (uint32_t)(test_vals->min_disk_lbs * test_vals->slab_size_p);
81 | obj_nlb = (uint32_t)(slab_nlb * test_vals->obj_size_p);
82 |
83 | err = fla_ut_fs_create(slab_nlb, test_vals->npools, &dev, &fs);
84 | if (FLA_ERR(err, "fla_ut_fs_create()"))
85 | {
86 | goto free_slab_error;
87 | }
88 |
89 | init_free_slabs = *fs->slabs.fslab_num;
90 |
91 | FLA_ASSERTF(test_vals->min_disk_lbs > fla_geo_slabs_lb_off(&fs->geo),
92 | "Slabs start after disk has ended (%"PRIu64" > %"PRIu64"",
93 | test_vals->min_disk_lbs, fla_geo_slabs_lb_off(&fs->geo));
94 |
95 | /* we need at least one slab */
96 | err |= FLA_ASSERTF(init_free_slabs >= 1, "Unexpected number of free slabs (%d >= 1)",
97 | init_free_slabs);
98 |
99 | err |= FLA_ASSERTF(*fs->slabs.fslab_head == 0,
100 | "Unexpected head ID (%d == %d)", *fs->slabs.fslab_head, 0);
101 |
102 | /* Acquire all the slabs and then release them all */
103 | for(uint32_t slab_offset = 0 ; slab_offset < init_free_slabs ; ++slab_offset)
104 | {
105 | slab_header = (void*)fs->slabs.headers + (slab_offset * sizeof(struct fla_slab_header));
106 |
107 | err = fla_acquire_slab(fs, obj_nlb, &slab_header);
108 | if(FLA_ERR(err, "fla_acquire_slab()"))
109 | {
110 | goto close_fs;
111 | }
112 |
113 | err = FLA_ASSERT(slab_header->next == FLA_LINKED_LIST_NULL,
114 | "Next pointer is not null after slab format");
115 | err |= FLA_ASSERT(slab_header->prev == FLA_LINKED_LIST_NULL,
116 | "Prev pointer is not null after slab format");
117 | if(FLA_ERR(err, "FLA_ASSERT()"))
118 | {
119 | goto close_fs;
120 | }
121 |
122 | const uint32_t curr_free_slabs = init_free_slabs - (slab_offset + 1);
123 | err = FLA_ASSERTF(*fs->slabs.fslab_num >= curr_free_slabs,
124 | "Unexpected number of free slabs (%d >= %d)",
125 | *fs->slabs.fslab_num, curr_free_slabs);
126 | if(FLA_ERR(err, "FLA_ASSERTF()"))
127 | {
128 | goto close_fs;
129 | }
130 |
131 | err = test_check_slab_pointers(fs, curr_free_slabs);
132 | if(FLA_ERR(err, "test_check_slab_pointers()"))
133 | {
134 | goto close_fs;
135 | }
136 | }
137 |
138 | /* If we acquire another slab, we should receive an error */
139 | ret = fla_acquire_slab(fs, obj_nlb, &slab_error);
140 | err = FLA_ASSERT(ret != 0, "Acquire of an empty free list did NOT fail");
141 | if(FLA_ERR(err, "FLA_ASSERT()"))
142 | {
143 | goto close_fs;
144 | }
145 |
146 | for(uint32_t slab_offset = 0 ; slab_offset < init_free_slabs ; ++slab_offset)
147 | {
148 | slab_header = (void*)fs->slabs.headers + (slab_offset * sizeof(struct fla_slab_header));
149 |
150 | err = fla_release_slab(fs, slab_header);
151 | if(FLA_ERR(err, "fla_release_slab()"))
152 | goto close_fs;
153 |
154 | err = FLA_ASSERTF(*fs->slabs.fslab_num >= slab_offset + 1,
155 | "Unexpected number of free slabs (%d >= %d)",
156 | *fs->slabs.fslab_num, slab_offset + 1);
157 | if(FLA_ERR(err, "FLA_ASSERTF()"))
158 | goto close_fs;
159 | }
160 |
161 | close_fs:
162 | ret = fla_ut_fs_teardown(fs);
163 | if (FLA_ERR(ret, "fla_ut_fs_teardown()"))
164 | {
165 | err = ret;
166 | }
167 |
168 | free_slab_error:
169 | free(slab_error);
170 |
171 | teardown_ut_dev:
172 | ret = fla_ut_dev_teardown(&dev);
173 | if (FLA_ERR(err, "fla_ut_dev_teardown()"))
174 | {
175 | err |= ret;
176 | }
177 |
178 | exit:
179 | return err;
180 | }
181 |
182 | int
183 | test_check_slab_pointers(struct flexalloc * fs, const uint32_t curr_free_slabs)
184 | {
185 | int err = 0;
186 | struct fla_slab_header * curr_slab;
187 | uint32_t curr_slab_id, size_from_head = 0;
188 |
189 | /* check next pointers */
190 | curr_slab_id = *fs->slabs.fslab_head;
191 | for (uint32_t i = 0 ; i <= curr_free_slabs && curr_slab_id != INT32_MAX; ++i)
192 | {
193 | curr_slab = fla_slab_header_ptr(curr_slab_id, fs);
194 | if((err = -FLA_ERR(!curr_slab, "fla_slab_header_ptr()")))
195 | {
196 | goto exit;
197 | }
198 | curr_slab_id = curr_slab->next;
199 | size_from_head++;
200 | }
201 |
202 | err = FLA_ASSERTF(size_from_head >= curr_free_slabs,
203 | "Unexpected size when starting from head (%d == %d)",
204 | size_from_head, curr_free_slabs);
205 |
206 | /* check prev pointers */
207 | curr_slab_id = *fs->slabs.fslab_tail;
208 | for (uint32_t i = 0; i <= curr_free_slabs && curr_slab_id != INT32_MAX; ++i)
209 | {
210 | curr_slab = fla_slab_header_ptr(curr_slab_id, fs);
211 | if((err = -FLA_ERR(!curr_slab, "fla_slab_header_ptr()")))
212 | {
213 | goto exit;
214 | }
215 | curr_slab_id = curr_slab->prev;
216 | size_from_head++;
217 | }
218 |
219 | exit:
220 | return err;
221 | }
222 |
--------------------------------------------------------------------------------