├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
├── pull_request_template.md
├── setup
│ └── action.yaml
└── workflows
│ ├── comment_commands.yml
│ ├── optional.yml
│ └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── LICENSE
├── README.md
├── conftest.py
├── docs
├── README.md
├── philosophy.md
├── release_procedure.md
├── setup.md
└── tests.md
├── pandas-stubs
├── __init__.pyi
├── _config
│ ├── __init__.pyi
│ └── config.pyi
├── _libs
│ ├── __init__.pyi
│ ├── indexing.pyi
│ ├── interval.pyi
│ ├── json.pyi
│ ├── lib.pyi
│ ├── missing.pyi
│ ├── ops_dispatch.pyi
│ ├── properties.pyi
│ ├── sparse.pyi
│ ├── tslibs
│ │ ├── __init__.pyi
│ │ ├── base.pyi
│ │ ├── conversion.pyi
│ │ ├── nattype.pyi
│ │ ├── np_datetime.pyi
│ │ ├── offsets.pyi
│ │ ├── parsing.pyi
│ │ ├── period.pyi
│ │ ├── strptime.pyi
│ │ ├── timedeltas.pyi
│ │ └── timestamps.pyi
│ └── window
│ │ └── __init__.pyi
├── _testing
│ └── __init__.pyi
├── _typing.pyi
├── _version.pyi
├── api
│ ├── __init__.pyi
│ ├── extensions
│ │ └── __init__.pyi
│ ├── indexers
│ │ └── __init__.pyi
│ ├── interchange
│ │ └── __init__.pyi
│ ├── types
│ │ └── __init__.pyi
│ └── typing
│ │ └── __init__.pyi
├── arrays
│ └── __init__.pyi
├── core
│ ├── __init__.pyi
│ ├── accessor.pyi
│ ├── algorithms.pyi
│ ├── api.pyi
│ ├── arraylike.pyi
│ ├── arrays
│ │ ├── __init__.pyi
│ │ ├── arrow
│ │ │ └── dtype.pyi
│ │ ├── base.pyi
│ │ ├── boolean.pyi
│ │ ├── categorical.pyi
│ │ ├── datetimelike.pyi
│ │ ├── datetimes.pyi
│ │ ├── floating.pyi
│ │ ├── integer.pyi
│ │ ├── interval.pyi
│ │ ├── masked.pyi
│ │ ├── numeric.pyi
│ │ ├── numpy_.pyi
│ │ ├── period.pyi
│ │ ├── sparse
│ │ │ ├── __init__.pyi
│ │ │ ├── accessor.pyi
│ │ │ ├── array.pyi
│ │ │ └── dtype.pyi
│ │ ├── string_.pyi
│ │ └── timedeltas.pyi
│ ├── base.pyi
│ ├── common.pyi
│ ├── computation
│ │ ├── __init__.pyi
│ │ ├── align.pyi
│ │ ├── api.pyi
│ │ ├── common.pyi
│ │ ├── engines.pyi
│ │ ├── eval.pyi
│ │ ├── expr.pyi
│ │ ├── expressions.pyi
│ │ ├── ops.pyi
│ │ ├── parsing.pyi
│ │ ├── pytables.pyi
│ │ └── scope.pyi
│ ├── config_init.pyi
│ ├── construction.pyi
│ ├── dtypes
│ │ ├── __init__.pyi
│ │ ├── api.pyi
│ │ ├── base.pyi
│ │ ├── cast.pyi
│ │ ├── common.pyi
│ │ ├── concat.pyi
│ │ ├── dtypes.pyi
│ │ ├── generic.pyi
│ │ ├── inference.pyi
│ │ └── missing.pyi
│ ├── frame.pyi
│ ├── generic.pyi
│ ├── groupby
│ │ ├── __init__.pyi
│ │ ├── base.pyi
│ │ ├── categorical.pyi
│ │ ├── generic.pyi
│ │ ├── groupby.pyi
│ │ ├── grouper.pyi
│ │ ├── indexing.pyi
│ │ └── ops.pyi
│ ├── indexers.pyi
│ ├── indexes
│ │ ├── __init__.pyi
│ │ ├── accessors.pyi
│ │ ├── api.pyi
│ │ ├── base.pyi
│ │ ├── category.pyi
│ │ ├── datetimelike.pyi
│ │ ├── datetimes.pyi
│ │ ├── extension.pyi
│ │ ├── frozen.pyi
│ │ ├── interval.pyi
│ │ ├── multi.pyi
│ │ ├── period.pyi
│ │ ├── range.pyi
│ │ └── timedeltas.pyi
│ ├── indexing.pyi
│ ├── interchange
│ │ ├── __init__.pyi
│ │ ├── dataframe_protocol.pyi
│ │ └── from_dataframe.pyi
│ ├── missing.pyi
│ ├── ops
│ │ ├── __init__.pyi
│ │ ├── array_ops.pyi
│ │ ├── common.pyi
│ │ ├── dispatch.pyi
│ │ ├── docstrings.pyi
│ │ ├── invalid.pyi
│ │ └── mask_ops.pyi
│ ├── resample.pyi
│ ├── reshape
│ │ ├── __init__.pyi
│ │ ├── api.pyi
│ │ ├── concat.pyi
│ │ ├── encoding.pyi
│ │ ├── melt.pyi
│ │ ├── merge.pyi
│ │ ├── pivot.pyi
│ │ ├── tile.pyi
│ │ └── util.pyi
│ ├── series.pyi
│ ├── sparse
│ │ └── __init__.pyi
│ ├── strings.pyi
│ ├── tools
│ │ ├── __init__.pyi
│ │ ├── datetimes.pyi
│ │ ├── numeric.pyi
│ │ └── timedeltas.pyi
│ ├── util
│ │ ├── __init__.pyi
│ │ └── hashing.pyi
│ └── window
│ │ ├── __init__.pyi
│ │ ├── ewm.pyi
│ │ ├── expanding.pyi
│ │ └── rolling.pyi
├── errors
│ └── __init__.pyi
├── io
│ ├── __init__.pyi
│ ├── api.pyi
│ ├── clipboard
│ │ └── __init__.pyi
│ ├── clipboards.pyi
│ ├── common.pyi
│ ├── excel
│ │ ├── __init__.pyi
│ │ ├── _base.pyi
│ │ └── _util.pyi
│ ├── feather_format.pyi
│ ├── formats
│ │ ├── __init__.pyi
│ │ ├── css.pyi
│ │ ├── format.pyi
│ │ ├── style.pyi
│ │ └── style_render.pyi
│ ├── html.pyi
│ ├── json
│ │ ├── __init__.pyi
│ │ ├── _json.pyi
│ │ ├── _normalize.pyi
│ │ └── _table_schema.pyi
│ ├── orc.pyi
│ ├── parquet.pyi
│ ├── parsers.pyi
│ ├── parsers
│ │ ├── __init__.pyi
│ │ └── readers.pyi
│ ├── pickle.pyi
│ ├── pytables.pyi
│ ├── sas
│ │ ├── __init__.pyi
│ │ ├── sas7bdat.pyi
│ │ ├── sas_xport.pyi
│ │ └── sasreader.pyi
│ ├── spss.pyi
│ ├── sql.pyi
│ ├── stata.pyi
│ └── xml.pyi
├── plotting
│ ├── __init__.pyi
│ ├── _core.pyi
│ └── _misc.pyi
├── py.typed
├── testing.pyi
├── tseries
│ ├── __init__.pyi
│ ├── api.pyi
│ ├── frequencies.pyi
│ ├── holiday.pyi
│ └── offsets.pyi
└── util
│ ├── __init__.pyi
│ ├── _decorators.pyi
│ ├── _doctools.pyi
│ ├── _exceptions.pyi
│ ├── _print_versions.pyi
│ ├── _tester.pyi
│ └── version
│ └── __init__.pyi
├── pyproject.toml
├── pyrightconfig-strict.json
├── scripts
├── __init__.py
├── _job.py
└── test
│ ├── __init__.py
│ ├── _step.py
│ └── run.py
└── tests
├── __init__.py
├── data
├── SSHSV1_A.xpt
├── airline.sas7bdat
├── labelled-num.sav
└── myhtml_table.tpl
├── extension
├── __init__.py
└── decimal
│ ├── __init__.py
│ └── array.py
├── test_api_types.py
├── test_api_typing.py
├── test_config.py
├── test_dtypes.py
├── test_errors.py
├── test_extension.py
├── test_frame.py
├── test_groupby.py
├── test_holidays.py
├── test_indexes.py
├── test_interval.py
├── test_interval_index.py
├── test_io.py
├── test_merge.py
├── test_pandas.py
├── test_plotting.py
├── test_resampler.py
├── test_scalars.py
├── test_series.py
├── test_string_accessors.py
├── test_styler.py
├── test_testing.py
├── test_timefuncs.py
├── test_utility.py
└── test_windowing.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | 1. Provide a minimal runnable `pandas` example that is not properly checked by the stubs.
15 | 2. Indicate which type checker you are using (`mypy` or `pyright`).
16 | 3. Show the error message received from that type checker while checking your example.
17 |
18 |
19 | **Please complete the following information:**
20 | - OS: [e.g. Windows, Linux, MacOS]
21 | - OS Version [e.g. 22]
22 | - python version
23 | - version of type checker
24 | - version of installed `pandas-stubs`
25 |
26 |
27 | **Additional context**
28 | Add any other context about the problem here.
29 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | - [ ] Closes #xxxx (Replace xxxx with the Github issue number)
2 | - [ ] Tests added: Please use `assert_type()` to assert the type of any return value
3 |
--------------------------------------------------------------------------------
/.github/setup/action.yaml:
--------------------------------------------------------------------------------
1 | name: Install project dependencies
2 | description: Install project dependencies
3 |
4 | inputs:
5 | python-version:
6 | description: The python version to use
7 | required: true
8 | os:
9 | description: The OS to run on
10 | required: true
11 |
12 | runs:
13 | using: composite
14 | steps:
15 | - name: Set up Python
16 | uses: actions/setup-python@v5
17 | with:
18 | python-version: ${{ inputs.python-version }}
19 |
20 | - name: Install Poetry
21 | shell: bash
22 | run: python -m pip install poetry
23 |
24 | - name: Determine poetry version
25 | shell: bash
26 | run: echo "{VERSION}=$(poetry --version)"
27 | id: poetry_version
28 |
29 | - name: Cache poetry.lock
30 | uses: actions/cache@v4
31 | with:
32 | path: poetry.lock
33 | key: ${{ inputs.os }}-${{ inputs.python-version }}-poetry-${{ steps.poetry_version.outputs.VERSION }}-${{ hashFiles('pyproject.toml') }}
34 |
35 | - name: Install project dependencies
36 | shell: bash
37 | run: poetry install -vvv --no-root
38 |
--------------------------------------------------------------------------------
/.github/workflows/comment_commands.yml:
--------------------------------------------------------------------------------
1 | name: Comment Commands to Trigger CI
2 | on:
3 | issue_comment:
4 | types: created
5 |
6 | permissions:
7 | checks: write
8 |
9 | env:
10 | # store mapping of commands to use with poetry
11 | RUN_COMMAND: '{"/pandas_nightly": "pytest --nightly", "/pyright_strict": "pyright_strict", "/mypy_nightly": "mypy --mypy_nightly"}'
12 | # store mapping of labels to display in the check runs
13 | DISPLAY_COMMAND: '{"/pandas_nightly": "Pandas nightly tests", "/pyright_strict": "Pyright strict tests", "/mypy_nightly": "Mypy nightly tests"}'
14 |
15 | jobs:
16 | optional_tests:
17 | name: "Optional tests run"
18 | runs-on: ubuntu-latest
19 | timeout-minutes: 10
20 | # if more commands are added, they will need to be added here too as we don't have access to env at this stage
21 | if: (github.event.issue.pull_request) && contains(fromJSON('["/pandas_nightly", "/pyright_strict", "/mypy_nightly"]'), github.event.comment.body)
22 |
23 | steps:
24 | - name: Get head sha, branch name and store value
25 | # get the sha of the last commit to attach the results of the tests
26 | if: always()
27 | id: get-branch-info
28 | uses: actions/github-script@v7
29 | with:
30 | github-token: ${{ secrets.GITHUB_TOKEN }}
31 | script: |
32 | const pr = await github.rest.pulls.get({
33 | owner: context.repo.owner,
34 | repo: context.repo.repo,
35 | pull_number: ${{ github.event.issue.number }}
36 | })
37 | core.setOutput('sha', pr.data.head.sha)
38 | core.setOutput('branch', pr.data.head.ref)
39 | core.setOutput('repository', pr.data.head.repo.full_name)
40 |
41 | - name: Checkout code on the correct branch
42 | uses: actions/checkout@v4
43 | with:
44 | # context is not aware which branch to checkout so it would otherwise
45 | # default to main (we also need repo name to source from the right user
46 | # otherwise it will look for the branch in pandas-stubs repo)
47 | ref: ${{ steps.get-branch-info.outputs.branch }}
48 | repository: ${{ steps.get-branch-info.outputs.repository }}
49 |
50 | - name: Install project dependencies
51 | uses: ./.github/setup
52 | with:
53 | os: ubuntu-latest
54 | python-version: "3.12"
55 |
56 | - name: Run ${{ fromJSON(env.DISPLAY_COMMAND)[github.event.comment.body] }}
57 | # run the tests based on the value of the comment
58 | id: tests-step
59 | run: poetry run poe ${{ fromJSON(env.RUN_COMMAND)[github.event.comment.body] }}
60 |
61 | - name: Report results of the tests and publish
62 | # publish the results to a check run no matter the pass or fail
63 | if: always()
64 | uses: actions/github-script@v7
65 | with:
66 | github-token: ${{ secrets.GITHUB_TOKEN }}
67 | script: |
68 | github.rest.checks.create({
69 | name: '${{ fromJSON(env.DISPLAY_COMMAND)[github.event.comment.body] }}',
70 | head_sha: '${{ steps.get-branch-info.outputs.sha }}',
71 | status: 'completed',
72 | conclusion: '${{ steps.tests-step.outcome }}',
73 | output: {
74 | title: 'Run ${{ fromJSON(env.DISPLAY_COMMAND)[github.event.comment.body] }}',
75 | summary: 'Results: ${{ steps.tests-step.outcome }}',
76 | text: 'See the actions run at ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}',
77 | },
78 | owner: context.repo.owner,
79 | repo: context.repo.repo
80 | })
81 |
--------------------------------------------------------------------------------
/.github/workflows/optional.yml:
--------------------------------------------------------------------------------
1 | name: 'Optional'
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | nightly:
11 | runs-on: ubuntu-latest
12 | timeout-minutes: 10
13 |
14 | steps:
15 | - uses: actions/checkout@v3
16 |
17 | - name: Install project dependencies
18 | uses: ./.github/setup
19 | with:
20 | os: ubuntu-latest
21 | python-version: '3.11'
22 |
23 | - name: Run pytest (against pandas nightly)
24 | run: poetry run poe pytest --nightly
25 |
26 | mypy_nightly:
27 | runs-on: ubuntu-latest
28 | timeout-minutes: 10
29 |
30 | steps:
31 | - uses: actions/checkout@v3
32 |
33 | - name: Install project dependencies
34 | uses: ./.github/setup
35 | with:
36 | os: ubuntu-latest
37 | python-version: '3.11'
38 |
39 | - name: Run mypy tests with mypy nightly
40 | run: poetry run poe mypy --mypy_nightly
41 |
42 | pyright_strict:
43 | runs-on: ubuntu-latest
44 | timeout-minutes: 10
45 |
46 | steps:
47 | - uses: actions/checkout@v3
48 |
49 | - name: Install project dependencies
50 | uses: ./.github/setup
51 | with:
52 | os: ubuntu-latest
53 | python-version: '3.11'
54 |
55 | - name: Run pyright tests with full strict mode
56 | run: poetry run poe pyright_strict
57 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: "Test"
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 | workflow_dispatch:
9 |
10 | env:
11 | MPLBACKEND: "Agg"
12 |
13 | jobs:
14 | released:
15 | runs-on: ${{ matrix.os }}
16 | timeout-minutes: 20
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | # macos-latest is arm
21 | os: [ubuntu-latest, windows-latest, macos-latest]
22 | python-version: ["3.10", "3.11", "3.12", "3.13"]
23 |
24 | name: OS ${{ matrix.os }} - Python ${{ matrix.python-version }}
25 |
26 | steps:
27 | - uses: actions/checkout@v4
28 |
29 | - name: Install project dependencies
30 | uses: ./.github/setup
31 | with:
32 | os: ${{ matrix.os }}
33 | python-version: ${{ matrix.python-version }}
34 |
35 | - name: Run mypy on 'tests' (using the local stubs) and on the local stubs
36 | run: poetry run poe mypy
37 |
38 | - name: Run ty on 'pandas-stubs' (using the local stubs) and on the local stubs
39 | run: poetry run poe ty
40 |
41 | - name: Run pyright on 'tests' (using the local stubs) and on the local stubs
42 | run: poetry run poe pyright
43 |
44 | - name: Run pytest
45 | run: poetry run poe pytest
46 |
47 | - name: Install pandas-stubs and run tests on the installed stubs
48 | run: poetry run poe test_dist
49 |
50 | precommit:
51 | runs-on: ubuntu-latest
52 | timeout-minutes: 10
53 |
54 | steps:
55 | - uses: actions/checkout@v4
56 |
57 | - uses: pre-commit/action@v3.0.0
58 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # PyCharm project settings
121 | .idea/
122 |
123 | # VSCode project settings
124 | .vscode/
125 |
126 | # mkdocs documentation
127 | /site
128 |
129 | # mypy
130 | .mypy_cache/
131 | .dmypy.json
132 | dmypy.json
133 |
134 | # Pyre type checker
135 | .pyre/
136 | /poetry.lock
137 | .idea/**/*
138 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | minimum_pre_commit_version: 2.15.0
2 | ci:
3 | autofix_prs: false
4 | repos:
5 | - repo: https://github.com/python/black
6 | rev: 25.1.0
7 | hooks:
8 | - id: black
9 | - repo: https://github.com/PyCQA/isort
10 | rev: 6.0.1
11 | hooks:
12 | - id: isort
13 | - repo: https://github.com/astral-sh/ruff-pre-commit
14 | rev: v0.11.5
15 | hooks:
16 | - id: ruff
17 | args: [
18 | --exit-non-zero-on-fix,
19 | --target-version, py39,
20 | --extend-select, "PYI,UP,RUF100",
21 | --ignore, "E501,E731,F841,PYI042",
22 | --per-file-ignores, "_*.pyi:PYI001",
23 | --fix
24 | ]
25 | - repo: https://github.com/codespell-project/codespell
26 | rev: v2.4.1
27 | hooks:
28 | - id: codespell
29 | additional_dependencies: [ tomli ]
30 | args: [-L, "THIRDPARTY"]
31 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2022, pandas
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | import gc
2 |
3 | import pytest
4 |
5 |
6 | @pytest.fixture
7 | def mpl_cleanup():
8 | """
9 | Ensure Matplotlib is cleaned up around a test.
10 |
11 | Before a test is run:
12 |
13 | 1) Set the backend to "template" to avoid requiring a GUI.
14 |
15 | After a test is run:
16 |
17 | 1) Reset units registry
18 | 2) Reset rc_context
19 | 3) Close all figures
20 |
21 | See matplotlib/testing/decorators.py#L24.
22 | """
23 | mpl = pytest.importorskip("matplotlib")
24 | mpl_units = pytest.importorskip("matplotlib.units")
25 | plt = pytest.importorskip("matplotlib.pyplot")
26 | orig_units_registry = mpl_units.registry.copy()
27 | try:
28 | with mpl.rc_context():
29 | mpl.use("template")
30 | yield
31 | finally:
32 | mpl_units.registry.clear()
33 | mpl_units.registry.update(orig_units_registry)
34 | plt.close("all")
35 | # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close
36 | gc.collect(1)
37 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Development paradigm with pandas-stubs
2 |
3 | For any update to the stubs, we require an associated test case that should fail without
4 | a proposed change, and works with a proposed change. See for examples.
5 |
6 | The stubs are developed with a certain [philosophy](philosophy.md) that should be
7 | understood by developers proposing changes to the stubs.
8 |
9 | Instructions for working with the code are found here:
10 |
11 | - [How to set up the environment](setup.md)
12 | - [How to test the project](tests.md)
13 | - [How to make a release](release_procedure.md)
14 |
--------------------------------------------------------------------------------
/docs/release_procedure.md:
--------------------------------------------------------------------------------
1 | # Release procedure
2 |
3 | 1. Determine version based on pandas version tested (a.b.c) and date yymmdd, to make
4 | version string a.b.c.yymmdd
5 | 2. Update `pyproject.toml` and `pandas-stubs/_version.pyi` to have the new version string
6 | 3. Execute the following commands:
7 |
8 | ```shell
9 | rm dist/*
10 | poetry build
11 | twine upload dist/* # Requires having the pypi API token allowing uploads
12 | git commit -a -m "Version a.b.c.yymmdd"
13 | git push upstream main
14 | git tag va.b.c.yymmdd
15 | git push upstream --tags
16 | ```
17 |
18 | The conda bots will recognize that a new version has been uploaded to pypi, and generate a pull request sent to the maintainers to approve it.
19 |
--------------------------------------------------------------------------------
/docs/setup.md:
--------------------------------------------------------------------------------
1 | ## Set Up Environment
2 |
3 | - Make sure you have `python >= 3.10` installed.
4 | - If using macOS, you may need to install `hdf5` (e.g., via `brew install hdf5`).
5 | - Install poetry: `pip install 'poetry>=1.8'`
6 | - Install the project dependencies: `poetry update`
7 | - Enter the virtual environment: `poetry shell`
8 | - Run all tests: `poe test_all`
9 | - Enable pre-commit: `pre-commit install`
10 | - Do you want to add a new dependency? `poetry add foo-pkg --group dev`
11 |
--------------------------------------------------------------------------------
/docs/tests.md:
--------------------------------------------------------------------------------
1 | ## Test
2 |
3 | [Poe](https://github.com/nat-n/poethepoet) is used to run all tests.
4 |
5 | Here are the most important options. Fore more details, please use `poe --help`.
6 |
7 | - Run all tests (against both source and installed stubs): `poe test_all`
8 | - Run tests against the source code: `poe test`
9 | - Run only mypy: `poe mypy`
10 | - Run only pyright: `poe pyright`
11 | - Run only pytest: `poe pytest`
12 | - Run only pre-commit: `poe style`
13 | - Run tests against the installed stubs (this will install and uninstall the stubs): `poe test_dist`
14 |
15 | These tests originally came from https://github.com/VirtusLab/pandas-stubs.
16 |
17 | The following tests are **optional**. Some of them are run by the CI but it is okay if they fail.
18 |
19 | - Run pytest against pandas nightly: `poe pytest --nightly`
20 | - Use mypy nightly to validate the annotations: `poe mypy --mypy_nightly`
21 | - Use pyright in full strict mode: `poe pyright_strict`
22 | - Run stubtest to compare the installed pandas-stubs against pandas (this will fail): `poe stubtest`. If you have created an allowlist to ignore certain errors: `poe stubtest path_to_the_allow_list`
23 |
24 | Among the tests above, the following can be run directly during a PR by commenting in the discussion.
25 |
26 | - Run pytest against pandas nightly by commenting `/pandas_nightly`
27 | - Use mypy nightly to validate the annotations by commenting `/mypy_nightly`
28 | - Use pyright in full strict mode by commenting `/pyright_strict`
29 |
--------------------------------------------------------------------------------
/pandas-stubs/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas import (
2 | api as api,
3 | arrays as arrays,
4 | errors as errors,
5 | io as io,
6 | plotting as plotting,
7 | testing as testing,
8 | tseries as tseries,
9 | util as util,
10 | )
11 | from pandas.core.api import (
12 | NA as NA,
13 | ArrowDtype as ArrowDtype,
14 | BooleanDtype as BooleanDtype,
15 | Categorical as Categorical,
16 | CategoricalDtype as CategoricalDtype,
17 | CategoricalIndex as CategoricalIndex,
18 | DataFrame as DataFrame,
19 | DateOffset as DateOffset,
20 | DatetimeIndex as DatetimeIndex,
21 | DatetimeTZDtype as DatetimeTZDtype,
22 | Float32Dtype as Float32Dtype,
23 | Float64Dtype as Float64Dtype,
24 | Grouper as Grouper,
25 | Index as Index,
26 | IndexSlice as IndexSlice,
27 | Int8Dtype as Int8Dtype,
28 | Int16Dtype as Int16Dtype,
29 | Int32Dtype as Int32Dtype,
30 | Int64Dtype as Int64Dtype,
31 | Interval as Interval,
32 | IntervalDtype as IntervalDtype,
33 | IntervalIndex as IntervalIndex,
34 | MultiIndex as MultiIndex,
35 | NamedAgg as NamedAgg,
36 | NaT as NaT,
37 | Period as Period,
38 | PeriodDtype as PeriodDtype,
39 | PeriodIndex as PeriodIndex,
40 | RangeIndex as RangeIndex,
41 | Series as Series,
42 | StringDtype as StringDtype,
43 | Timedelta as Timedelta,
44 | TimedeltaIndex as TimedeltaIndex,
45 | Timestamp as Timestamp,
46 | UInt8Dtype as UInt8Dtype,
47 | UInt16Dtype as UInt16Dtype,
48 | UInt32Dtype as UInt32Dtype,
49 | UInt64Dtype as UInt64Dtype,
50 | array as array,
51 | bdate_range as bdate_range,
52 | date_range as date_range,
53 | factorize as factorize,
54 | interval_range as interval_range,
55 | isna as isna,
56 | isnull as isnull,
57 | notna as notna,
58 | notnull as notnull,
59 | period_range as period_range,
60 | set_eng_float_format as set_eng_float_format,
61 | timedelta_range as timedelta_range,
62 | to_datetime as to_datetime,
63 | to_numeric as to_numeric,
64 | to_timedelta as to_timedelta,
65 | unique as unique,
66 | value_counts as value_counts,
67 | )
68 | from pandas.core.arrays.sparse import SparseDtype as SparseDtype
69 | from pandas.core.computation.api import eval as eval
70 | from pandas.core.reshape.api import (
71 | concat as concat,
72 | crosstab as crosstab,
73 | cut as cut,
74 | from_dummies as from_dummies,
75 | get_dummies as get_dummies,
76 | lreshape as lreshape,
77 | melt as melt,
78 | merge as merge,
79 | merge_asof as merge_asof,
80 | merge_ordered as merge_ordered,
81 | pivot as pivot,
82 | pivot_table as pivot_table,
83 | qcut as qcut,
84 | wide_to_long as wide_to_long,
85 | )
86 |
87 | from pandas._config import (
88 | describe_option as describe_option,
89 | get_option as get_option,
90 | option_context as option_context,
91 | options as options,
92 | reset_option as reset_option,
93 | set_option as set_option,
94 | )
95 |
96 | from pandas.util._print_versions import show_versions as show_versions
97 | from pandas.util._tester import test as test
98 |
99 | from pandas.io.api import (
100 | ExcelFile as ExcelFile,
101 | ExcelWriter as ExcelWriter,
102 | HDFStore as HDFStore,
103 | read_clipboard as read_clipboard,
104 | read_csv as read_csv,
105 | read_excel as read_excel,
106 | read_feather as read_feather,
107 | read_fwf as read_fwf,
108 | read_hdf as read_hdf,
109 | read_html as read_html,
110 | read_json as read_json,
111 | read_orc as read_orc,
112 | read_parquet as read_parquet,
113 | read_pickle as read_pickle,
114 | read_sas as read_sas,
115 | read_spss as read_spss,
116 | read_sql as read_sql,
117 | read_sql_query as read_sql_query,
118 | read_sql_table as read_sql_table,
119 | read_stata as read_stata,
120 | read_table as read_table,
121 | read_xml as read_xml,
122 | to_pickle as to_pickle,
123 | )
124 | from pandas.io.json._normalize import json_normalize as json_normalize
125 | from pandas.tseries import offsets as offsets
126 | from pandas.tseries.api import infer_freq as infer_freq
127 |
128 | __version__: str
129 |
--------------------------------------------------------------------------------
/pandas-stubs/_config/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas._config.config import (
2 | describe_option as describe_option,
3 | get_option as get_option,
4 | option_context as option_context,
5 | options as options,
6 | reset_option as reset_option,
7 | set_option as set_option,
8 | )
9 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas._libs.interval import Interval as Interval
2 | from pandas._libs.tslibs import (
3 | NaT as NaT,
4 | NaTType as NaTType,
5 | OutOfBoundsDatetime as OutOfBoundsDatetime,
6 | Period as Period,
7 | Timedelta as Timedelta,
8 | Timestamp as Timestamp,
9 | iNaT as iNaT,
10 | )
11 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/indexing.pyi:
--------------------------------------------------------------------------------
1 | class _NDFrameIndexerBase:
2 | def __init__(self, name: str, obj: object) -> None: ...
3 | @property
4 | def ndim(self) -> int: ...
5 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/json.pyi:
--------------------------------------------------------------------------------
1 | def decode(*args, **kwargs): ...
2 | def dumps(*args, **kwargs): ...
3 | def encode(*args, **kwargs): ...
4 | def loads(*args, **kwargs): ...
5 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/lib.pyi:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import (
3 | Final,
4 | Literal,
5 | )
6 |
7 | import numpy as np
8 | from typing_extensions import (
9 | TypeAlias,
10 | TypeGuard,
11 | )
12 |
13 | class _NoDefault(Enum):
14 | no_default = ...
15 |
16 | no_default: Final = _NoDefault.no_default
17 | NoDefault: TypeAlias = Literal[_NoDefault.no_default]
18 |
19 | def infer_dtype(value: object, skipna: bool = ...) -> str: ...
20 | def is_iterator(obj: object) -> bool: ...
21 | def is_scalar(val: object) -> bool: ...
22 | def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ...
23 | def is_complex(val: object) -> TypeGuard[complex]: ...
24 | def is_bool(val: object) -> TypeGuard[bool | np.bool_]: ...
25 | def is_integer(val: object) -> TypeGuard[int | np.integer]: ...
26 | def is_float(val: object) -> TypeGuard[float | np.floating]: ...
27 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/missing.pyi:
--------------------------------------------------------------------------------
1 | from typing_extensions import Self
2 |
3 | class NAType:
4 | def __new__(cls, *args, **kwargs) -> Self: ...
5 | def __format__(self, format_spec: str) -> str: ...
6 | def __bool__(self) -> None: ...
7 | def __hash__(self) -> int: ...
8 | def __reduce__(self) -> str: ...
9 | def __add__(self, other) -> NAType: ...
10 | def __radd__(self, other) -> NAType: ...
11 | def __sub__(self, other) -> NAType: ...
12 | def __rsub__(self, other) -> NAType: ...
13 | def __mul__(self, other) -> NAType: ...
14 | def __rmul__(self, other) -> NAType: ...
15 | def __matmul__(self, other) -> NAType: ...
16 | def __rmatmul__(self, other) -> NAType: ...
17 | def __truediv__(self, other) -> NAType: ...
18 | def __rtruediv__(self, other) -> NAType: ...
19 | def __floordiv__(self, other) -> NAType: ...
20 | def __rfloordiv__(self, other) -> NAType: ...
21 | def __mod__(self, other) -> NAType: ...
22 | def __rmod__(self, other) -> NAType: ...
23 | def __divmod__(self, other) -> NAType: ...
24 | def __rdivmod__(self, other) -> NAType: ...
25 | def __eq__(self, other) -> bool: ...
26 | def __ne__(self, other) -> bool: ...
27 | def __le__(self, other) -> bool: ...
28 | def __lt__(self, other) -> bool: ...
29 | def __gt__(self, other) -> bool: ...
30 | def __ge__(self, other) -> bool: ...
31 | def __neg__(self, other) -> NAType: ...
32 | def __pos__(self, other) -> NAType: ...
33 | def __abs__(self, other) -> NAType: ...
34 | def __invert__(self, other) -> NAType: ...
35 | def __pow__(self, other) -> NAType: ...
36 | def __rpow__(self, other) -> NAType: ...
37 | def __and__(self, other) -> NAType | None: ...
38 | __rand__ = __and__
39 | def __or__(self, other) -> bool | NAType: ...
40 | __ror__ = __or__
41 | def __xor__(self, other) -> NAType: ...
42 | __rxor__ = __xor__
43 | __array_priority__: int
44 | def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
45 |
46 | NA: NAType = ...
47 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/ops_dispatch.pyi:
--------------------------------------------------------------------------------
1 | DISPATCHED_UFUNCS = ...
2 | REVERSED_NAMES = ...
3 | UFUNC_ALIASES = ...
4 |
5 | def maybe_dispatch_ufunc_to_dunder_op(*args, **kwargs): ...
6 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/properties.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 |
3 | class CachedProperty:
4 | def __init__(self, func: Callable) -> None: ...
5 | def __get__(self, obj, typ): ...
6 | def __set__(self, obj, value) -> None: ...
7 |
8 | # note: this is a lie to make type checkers happy (they special
9 | # case property). cache_readonly uses attribute names similar to
10 | # property (fget) but it does not provide fset and fdel.
11 | cache_readonly = property
12 |
13 | class AxisProperty:
14 | def __init__(self, axis: int = ..., doc: str = ...) -> None: ...
15 | def __get__(self, obj, typ): ...
16 | def __set__(self, obj, value) -> None: ...
17 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/sparse.pyi:
--------------------------------------------------------------------------------
1 | class SparseIndex: ...
2 | class BlockIndex(SparseIndex): ...
3 | class IntIndex(SparseIndex): ...
4 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/tslibs/__init__.pyi:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "Period",
3 | "Timestamp",
4 | "Timedelta",
5 | "NaT",
6 | "NaTType",
7 | "iNaT",
8 | "nat_strings",
9 | "BaseOffset",
10 | "Tick",
11 | "OutOfBoundsDatetime",
12 | ]
13 | from pandas._libs.tslibs.nattype import (
14 | NaT,
15 | NaTType,
16 | iNaT,
17 | nat_strings,
18 | )
19 | from pandas._libs.tslibs.np_datetime import (
20 | OutOfBoundsDatetime as OutOfBoundsDatetime,
21 | OutOfBoundsTimedelta as OutOfBoundsTimedelta,
22 | )
23 | from pandas._libs.tslibs.offsets import (
24 | BaseOffset,
25 | Tick,
26 | )
27 | from pandas._libs.tslibs.period import Period
28 | from pandas._libs.tslibs.timedeltas import Timedelta
29 | from pandas._libs.tslibs.timestamps import Timestamp
30 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/tslibs/base.pyi:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | class ABCTimestamp(datetime): ...
4 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/tslibs/conversion.pyi:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | def localize_pydatetime(dt: datetime, tz: object) -> datetime: ...
4 |
5 | class OutOfBoundsTimedelta(ValueError): ...
6 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/tslibs/np_datetime.pyi:
--------------------------------------------------------------------------------
1 | class OutOfBoundsDatetime(ValueError): ...
2 | class OutOfBoundsTimedelta(ValueError): ...
3 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/tslibs/parsing.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | def parse_time_string(*args, **kwargs) -> Any: ...
4 |
5 | class DateParseError(ValueError):
6 | def __init__(self, *args, **kwargs) -> None: ...
7 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/tslibs/strptime.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | def array_strptime(*args, **kwargs) -> Any: ...
4 |
--------------------------------------------------------------------------------
/pandas-stubs/_libs/window/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/_libs/window/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/_version.pyi:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | version_json: str = ...
4 |
5 | def get_versions(): ...
6 |
7 | _stub_version: Literal["2.2.3.250527"]
8 |
--------------------------------------------------------------------------------
/pandas-stubs/api/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.api import (
2 | extensions as extensions,
3 | indexers as indexers,
4 | interchange as interchange,
5 | types as types,
6 | typing as typing,
7 | )
8 |
--------------------------------------------------------------------------------
/pandas-stubs/api/extensions/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.accessor import (
2 | register_dataframe_accessor as register_dataframe_accessor,
3 | register_index_accessor as register_index_accessor,
4 | register_series_accessor as register_series_accessor,
5 | )
6 | from pandas.core.algorithms import take as take
7 | from pandas.core.arrays import (
8 | ExtensionArray as ExtensionArray,
9 | ExtensionScalarOpsMixin as ExtensionScalarOpsMixin,
10 | )
11 |
12 | from pandas._libs.lib import no_default as no_default
13 |
14 | from pandas.core.dtypes.base import (
15 | ExtensionDtype as ExtensionDtype,
16 | register_extension_dtype as register_extension_dtype,
17 | )
18 |
--------------------------------------------------------------------------------
/pandas-stubs/api/indexers/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.indexers import check_array_indexer as check_array_indexer
2 | from pandas.core.indexers.objects import (
3 | BaseIndexer as BaseIndexer,
4 | FixedForwardWindowIndexer as FixedForwardWindowIndexer,
5 | VariableOffsetWindowIndexer as VariableOffsetWindowIndexer,
6 | )
7 |
--------------------------------------------------------------------------------
/pandas-stubs/api/interchange/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrame
2 | from pandas.core.interchange.from_dataframe import from_dataframe as from_dataframe
3 |
--------------------------------------------------------------------------------
/pandas-stubs/api/types/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas._libs.lib import infer_dtype as infer_dtype
2 |
3 | from pandas.core.dtypes.api import (
4 | is_any_real_numeric_dtype as is_any_real_numeric_dtype,
5 | is_array_like as is_array_like,
6 | is_bool as is_bool,
7 | is_bool_dtype as is_bool_dtype,
8 | is_complex as is_complex,
9 | is_complex_dtype as is_complex_dtype,
10 | is_datetime64_any_dtype as is_datetime64_any_dtype,
11 | is_datetime64_dtype as is_datetime64_dtype,
12 | is_datetime64_ns_dtype as is_datetime64_ns_dtype,
13 | is_dict_like as is_dict_like,
14 | is_dtype_equal as is_dtype_equal,
15 | is_extension_array_dtype as is_extension_array_dtype,
16 | is_file_like as is_file_like,
17 | is_float as is_float,
18 | is_float_dtype as is_float_dtype,
19 | is_hashable as is_hashable,
20 | is_integer as is_integer,
21 | is_integer_dtype as is_integer_dtype,
22 | is_iterator as is_iterator,
23 | is_list_like as is_list_like,
24 | is_named_tuple as is_named_tuple,
25 | is_number as is_number,
26 | is_numeric_dtype as is_numeric_dtype,
27 | is_object_dtype as is_object_dtype,
28 | is_re as is_re,
29 | is_re_compilable as is_re_compilable,
30 | is_scalar as is_scalar,
31 | is_signed_integer_dtype as is_signed_integer_dtype,
32 | is_string_dtype as is_string_dtype,
33 | is_timedelta64_dtype as is_timedelta64_dtype,
34 | is_timedelta64_ns_dtype as is_timedelta64_ns_dtype,
35 | is_unsigned_integer_dtype as is_unsigned_integer_dtype,
36 | pandas_dtype as pandas_dtype,
37 | )
38 | from pandas.core.dtypes.concat import union_categoricals as union_categoricals
39 | from pandas.core.dtypes.dtypes import (
40 | CategoricalDtype as CategoricalDtype,
41 | DatetimeTZDtype as DatetimeTZDtype,
42 | IntervalDtype as IntervalDtype,
43 | PeriodDtype as PeriodDtype,
44 | )
45 |
--------------------------------------------------------------------------------
/pandas-stubs/api/typing/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.groupby import (
2 | DataFrameGroupBy as DataFrameGroupBy,
3 | SeriesGroupBy as SeriesGroupBy,
4 | )
5 | from pandas.core.indexes.frozen import FrozenList as FrozenList
6 | from pandas.core.resample import (
7 | DatetimeIndexResamplerGroupby as DatetimeIndexResamplerGroupby,
8 | PeriodIndexResamplerGroupby as PeriodIndexResamplerGroupby,
9 | Resampler as Resampler,
10 | TimedeltaIndexResamplerGroupby as TimedeltaIndexResamplerGroupby,
11 | TimeGrouper as TimeGrouper,
12 | )
13 | from pandas.core.window import (
14 | Expanding as Expanding,
15 | ExpandingGroupby as ExpandingGroupby,
16 | ExponentialMovingWindow as ExponentialMovingWindow,
17 | ExponentialMovingWindowGroupby as ExponentialMovingWindowGroupby,
18 | Rolling as Rolling,
19 | RollingGroupby as RollingGroupby,
20 | Window as Window,
21 | )
22 |
23 | from pandas._libs import NaTType as NaTType
24 | from pandas._libs.lib import NoDefault as NoDefault
25 | from pandas._libs.missing import NAType as NAType
26 |
27 | from pandas.io.json._json import JsonReader as JsonReader
28 |
29 | # SASReader is not defined so commenting it out for now
30 | # from pandas.io.sas.sasreader import SASReader as SASReader
31 | from pandas.io.stata import StataReader as StataReader
32 |
--------------------------------------------------------------------------------
/pandas-stubs/arrays/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.arrays import (
2 | BooleanArray as BooleanArray,
3 | Categorical as Categorical,
4 | DatetimeArray as DatetimeArray,
5 | IntegerArray as IntegerArray,
6 | IntervalArray as IntervalArray,
7 | PandasArray as PandasArray,
8 | PeriodArray as PeriodArray,
9 | SparseArray as SparseArray,
10 | StringArray as StringArray,
11 | TimedeltaArray as TimedeltaArray,
12 | )
13 |
--------------------------------------------------------------------------------
/pandas-stubs/core/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/accessor.pyi:
--------------------------------------------------------------------------------
1 | from typing import Callable
2 |
3 | from pandas._typing import TypeT
4 |
5 | class PandasDelegate: ...
6 |
7 | def register_dataframe_accessor(name: str) -> Callable[[TypeT], TypeT]: ...
8 | def register_series_accessor(name: str) -> Callable[[TypeT], TypeT]: ...
9 | def register_index_accessor(name: str) -> Callable[[TypeT], TypeT]: ...
10 |
--------------------------------------------------------------------------------
/pandas-stubs/core/algorithms.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | Literal,
3 | overload,
4 | )
5 |
6 | import numpy as np
7 | from pandas import (
8 | Categorical,
9 | CategoricalIndex,
10 | Index,
11 | IntervalIndex,
12 | PeriodIndex,
13 | Series,
14 | )
15 | from pandas.api.extensions import ExtensionArray
16 |
17 | from pandas._typing import (
18 | AnyArrayLike,
19 | IntervalT,
20 | TakeIndexer,
21 | )
22 |
23 | # These are type: ignored because the Index types overlap due to inheritance but indices
24 | # with extension types return the same type while standard type return ndarray
25 |
26 | @overload
27 | def unique( # pyright: ignore[reportOverlappingOverload]
28 | values: PeriodIndex,
29 | ) -> PeriodIndex: ...
30 | @overload
31 | def unique(values: CategoricalIndex) -> CategoricalIndex: ... # type: ignore[overload-overlap]
32 | @overload
33 | def unique(values: IntervalIndex[IntervalT]) -> IntervalIndex[IntervalT]: ...
34 | @overload
35 | def unique(values: Index) -> np.ndarray: ...
36 | @overload
37 | def unique(values: Categorical) -> Categorical: ...
38 | @overload
39 | def unique(values: Series) -> np.ndarray | ExtensionArray: ...
40 | @overload
41 | def unique(values: np.ndarray) -> np.ndarray: ...
42 | @overload
43 | def unique(values: ExtensionArray) -> ExtensionArray: ...
44 | @overload
45 | def factorize(
46 | values: np.ndarray,
47 | sort: bool = ...,
48 | use_na_sentinel: bool = ...,
49 | size_hint: int | None = ...,
50 | ) -> tuple[np.ndarray, np.ndarray]: ...
51 | @overload
52 | def factorize(
53 | values: Index | Series,
54 | sort: bool = ...,
55 | use_na_sentinel: bool = ...,
56 | size_hint: int | None = ...,
57 | ) -> tuple[np.ndarray, Index]: ...
58 | @overload
59 | def factorize(
60 | values: Categorical,
61 | sort: bool = ...,
62 | use_na_sentinel: bool = ...,
63 | size_hint: int | None = ...,
64 | ) -> tuple[np.ndarray, Categorical]: ...
65 | def value_counts(
66 | values: AnyArrayLike | list | tuple,
67 | sort: bool = ...,
68 | ascending: bool = ...,
69 | normalize: bool = ...,
70 | bins: int | None = ...,
71 | dropna: bool = ...,
72 | ) -> Series: ...
73 | def take(
74 | arr,
75 | indices: TakeIndexer,
76 | axis: Literal[0, 1] = 0,
77 | allow_fill: bool = False,
78 | fill_value=None,
79 | ): ...
80 |
--------------------------------------------------------------------------------
/pandas-stubs/core/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.algorithms import (
2 | factorize as factorize,
3 | unique as unique,
4 | value_counts as value_counts,
5 | )
6 | from pandas.core.arrays import Categorical as Categorical
7 | from pandas.core.arrays.arrow.dtype import ArrowDtype as ArrowDtype
8 | from pandas.core.arrays.boolean import BooleanDtype as BooleanDtype
9 | from pandas.core.arrays.floating import (
10 | Float32Dtype as Float32Dtype,
11 | Float64Dtype as Float64Dtype,
12 | )
13 | from pandas.core.arrays.integer import (
14 | Int8Dtype as Int8Dtype,
15 | Int16Dtype as Int16Dtype,
16 | Int32Dtype as Int32Dtype,
17 | Int64Dtype as Int64Dtype,
18 | UInt8Dtype as UInt8Dtype,
19 | UInt16Dtype as UInt16Dtype,
20 | UInt32Dtype as UInt32Dtype,
21 | UInt64Dtype as UInt64Dtype,
22 | )
23 | from pandas.core.arrays.string_ import StringDtype as StringDtype
24 | from pandas.core.construction import array as array
25 | from pandas.core.frame import DataFrame as DataFrame
26 | from pandas.core.groupby import (
27 | Grouper as Grouper,
28 | NamedAgg as NamedAgg,
29 | )
30 | from pandas.core.indexes.api import (
31 | CategoricalIndex as CategoricalIndex,
32 | DatetimeIndex as DatetimeIndex,
33 | Index as Index,
34 | IntervalIndex as IntervalIndex,
35 | MultiIndex as MultiIndex,
36 | PeriodIndex as PeriodIndex,
37 | RangeIndex as RangeIndex,
38 | TimedeltaIndex as TimedeltaIndex,
39 | )
40 | from pandas.core.indexes.datetimes import (
41 | bdate_range as bdate_range,
42 | date_range as date_range,
43 | )
44 | from pandas.core.indexes.interval import (
45 | Interval as Interval,
46 | interval_range as interval_range,
47 | )
48 | from pandas.core.indexes.period import period_range as period_range
49 | from pandas.core.indexes.timedeltas import timedelta_range as timedelta_range
50 | from pandas.core.indexing import IndexSlice as IndexSlice
51 | from pandas.core.series import Series as Series
52 | from pandas.core.tools.datetimes import to_datetime as to_datetime
53 | from pandas.core.tools.numeric import to_numeric as to_numeric
54 | from pandas.core.tools.timedeltas import to_timedelta as to_timedelta
55 |
56 | from pandas._libs import (
57 | NaT as NaT,
58 | Period as Period,
59 | Timedelta as Timedelta,
60 | )
61 | from pandas._libs.missing import NA as NA
62 | from pandas._libs.tslibs import Timestamp as Timestamp
63 |
64 | from pandas.core.dtypes.dtypes import (
65 | CategoricalDtype as CategoricalDtype,
66 | DatetimeTZDtype as DatetimeTZDtype,
67 | IntervalDtype as IntervalDtype,
68 | PeriodDtype as PeriodDtype,
69 | )
70 | from pandas.core.dtypes.missing import (
71 | isna as isna,
72 | isnull as isnull,
73 | notna as notna,
74 | notnull as notnull,
75 | )
76 |
77 | from pandas.io.formats.format import set_eng_float_format as set_eng_float_format
78 | from pandas.tseries.offsets import DateOffset as DateOffset
79 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arraylike.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from typing_extensions import Self
4 |
5 | from pandas._libs.ops_dispatch import (
6 | maybe_dispatch_ufunc_to_dunder_op as maybe_dispatch_ufunc_to_dunder_op,
7 | )
8 |
9 | class OpsMixin:
10 | def __eq__(self, other: object) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
11 | def __ne__(self, other: object) -> Self: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
12 | def __lt__(self, other: Any) -> Self: ...
13 | def __le__(self, other: Any) -> Self: ...
14 | def __gt__(self, other: Any) -> Self: ...
15 | def __ge__(self, other: Any) -> Self: ...
16 | # -------------------------------------------------------------
17 | # Logical Methods
18 | def __and__(self, other: Any) -> Self: ...
19 | def __rand__(self, other: Any) -> Self: ...
20 | def __or__(self, other: Any) -> Self: ...
21 | def __ror__(self, other: Any) -> Self: ...
22 | def __xor__(self, other: Any) -> Self: ...
23 | def __rxor__(self, other: Any) -> Self: ...
24 | # -------------------------------------------------------------
25 | # Arithmetic Methods
26 | def __add__(self, other: Any) -> Self: ...
27 | def __radd__(self, other: Any) -> Self: ...
28 | def __sub__(self, other: Any) -> Self: ...
29 | def __rsub__(self, other: Any) -> Self: ...
30 | def __mul__(self, other: Any) -> Self: ...
31 | def __rmul__(self, other: Any) -> Self: ...
32 | # Handled by subclasses that specify only the valid values
33 | # that can be passed
34 | # def __truediv__(self, other: Any) -> Self: ...
35 | # def __rtruediv__(self, other: Any) -> Self: ...
36 | # def __floordiv__(self, other: Any) -> Self: ...
37 | # def __rfloordiv__(self, other: Any) -> Self: ...
38 | def __mod__(self, other: Any) -> Self: ...
39 | def __rmod__(self, other: Any) -> Self: ...
40 | def __divmod__(self, other: Any) -> tuple[Self, Self]: ...
41 | def __rdivmod__(self, other: Any) -> tuple[Self, Self]: ...
42 | def __pow__(self, other: Any) -> Self: ...
43 | def __rpow__(self, other: Any) -> Self: ...
44 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.arrays.base import (
2 | ExtensionArray as ExtensionArray,
3 | ExtensionOpsMixin as ExtensionOpsMixin,
4 | ExtensionScalarOpsMixin as ExtensionScalarOpsMixin,
5 | )
6 | from pandas.core.arrays.boolean import BooleanArray as BooleanArray
7 | from pandas.core.arrays.categorical import Categorical as Categorical
8 | from pandas.core.arrays.datetimes import DatetimeArray as DatetimeArray
9 | from pandas.core.arrays.integer import IntegerArray as IntegerArray
10 | from pandas.core.arrays.interval import IntervalArray as IntervalArray
11 | from pandas.core.arrays.numpy_ import PandasArray as PandasArray
12 | from pandas.core.arrays.period import PeriodArray as PeriodArray
13 | from pandas.core.arrays.sparse import SparseArray as SparseArray
14 | from pandas.core.arrays.string_ import StringArray as StringArray
15 | from pandas.core.arrays.timedeltas import TimedeltaArray as TimedeltaArray
16 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/arrow/dtype.pyi:
--------------------------------------------------------------------------------
1 | import pyarrow as pa
2 |
3 | from pandas._libs.missing import NAType
4 |
5 | from pandas.core.dtypes.base import StorageExtensionDtype
6 |
7 | class ArrowDtype(StorageExtensionDtype):
8 | pyarrow_dtype: pa.DataType
9 | def __init__(self, pyarrow_dtype: pa.DataType) -> None: ...
10 | @property
11 | def na_value(self) -> NAType: ...
12 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/base.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | Any,
3 | overload,
4 | )
5 |
6 | import numpy as np
7 | from typing_extensions import Self
8 |
9 | from pandas._typing import (
10 | ArrayLike,
11 | Scalar,
12 | ScalarIndexer,
13 | SequenceIndexer,
14 | TakeIndexer,
15 | npt,
16 | )
17 |
18 | from pandas.core.dtypes.dtypes import ExtensionDtype as ExtensionDtype
19 |
20 | class ExtensionArray:
21 | @overload
22 | def __getitem__(self, item: ScalarIndexer) -> Any: ...
23 | @overload
24 | def __getitem__(self, item: SequenceIndexer) -> Self: ...
25 | def __setitem__(self, key: int | slice | np.ndarray, value) -> None: ...
26 | def __len__(self) -> int: ...
27 | def __iter__(self): ...
28 | def __contains__(self, item: object) -> bool | np.bool_: ...
29 | def to_numpy(
30 | self,
31 | dtype: npt.DTypeLike | None = ...,
32 | copy: bool = ...,
33 | na_value: Scalar = ...,
34 | ) -> np.ndarray: ...
35 | @property
36 | def dtype(self) -> ExtensionDtype: ...
37 | @property
38 | def shape(self) -> tuple[int, ...]: ...
39 | @property
40 | def ndim(self) -> int: ...
41 | @property
42 | def nbytes(self) -> int: ...
43 | def astype(self, dtype, copy: bool = ...): ...
44 | def isna(self) -> ArrayLike: ...
45 | def argsort(
46 | self, *, ascending: bool = ..., kind: str = ..., **kwargs
47 | ) -> np.ndarray: ...
48 | def fillna(self, value=..., method=..., limit=...): ...
49 | def dropna(self): ...
50 | def shift(self, periods: int = ..., fill_value: object = ...) -> Self: ...
51 | def unique(self): ...
52 | def searchsorted(self, value, side: str = ..., sorter=...): ...
53 | def factorize(self, use_na_sentinel: bool = ...) -> tuple[np.ndarray, Self]: ...
54 | def repeat(self, repeats, axis=...): ...
55 | def take(
56 | self,
57 | indexer: TakeIndexer,
58 | *,
59 | allow_fill: bool = ...,
60 | fill_value=...,
61 | ) -> Self: ...
62 | def copy(self) -> Self: ...
63 | def view(self, dtype=...) -> Self | np.ndarray: ...
64 | def ravel(self, order=...) -> Self: ...
65 | def tolist(self) -> list: ...
66 | def _reduce(
67 | self, name: str, *, skipna: bool = ..., keepdims: bool = ..., **kwargs
68 | ) -> object: ...
69 | def _accumulate(self, name: str, *, skipna: bool = ..., **kwargs) -> Self: ...
70 |
71 | class ExtensionOpsMixin:
72 | @classmethod
73 | def _add_arithmetic_ops(cls) -> None: ...
74 | @classmethod
75 | def _add_comparison_ops(cls) -> None: ...
76 | @classmethod
77 | def _add_logical_ops(cls) -> None: ...
78 |
79 | class ExtensionScalarOpsMixin(ExtensionOpsMixin): ...
80 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/boolean.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandas.core.arrays.masked import BaseMaskedArray as BaseMaskedArray
3 |
4 | from pandas._libs.missing import NAType
5 | from pandas._typing import type_t
6 |
7 | from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype
8 |
9 | class BooleanDtype(ExtensionDtype):
10 | @property
11 | def na_value(self) -> NAType: ...
12 | @classmethod
13 | def construct_array_type(cls) -> type_t[BooleanArray]: ...
14 |
15 | def coerce_to_array(values, mask=..., copy: bool = ...): ...
16 |
17 | class BooleanArray(BaseMaskedArray):
18 | def __init__(
19 | self, values: np.ndarray, mask: np.ndarray, copy: bool = ...
20 | ) -> None: ...
21 | @property
22 | def dtype(self): ...
23 | def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
24 | def __setitem__(self, key, value) -> None: ...
25 | def astype(self, dtype, copy: bool = ...): ...
26 | def any(self, *, skipna: bool = ..., **kwargs): ...
27 | def all(self, *, skipna: bool = ..., **kwargs): ...
28 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/datetimelike.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Sequence
2 | from typing import overload
3 |
4 | import numpy as np
5 | from pandas.core.arrays.base import (
6 | ExtensionArray,
7 | ExtensionOpsMixin,
8 | )
9 | from typing_extensions import (
10 | Self,
11 | TypeAlias,
12 | )
13 |
14 | from pandas._libs import (
15 | NaT as NaT,
16 | NaTType as NaTType,
17 | )
18 | from pandas._typing import (
19 | DatetimeLikeScalar,
20 | PositionalIndexerTuple,
21 | ScalarIndexer,
22 | SequenceIndexer,
23 | TimeAmbiguous,
24 | TimeNonexistent,
25 | TimeUnit,
26 | )
27 |
28 | DTScalarOrNaT: TypeAlias = DatetimeLikeScalar | NaTType
29 |
30 | class DatelikeOps:
31 | def strftime(self, date_format): ...
32 |
33 | class TimelikeOps:
34 | @property
35 | def unit(self) -> TimeUnit: ...
36 | def as_unit(self, unit: TimeUnit) -> Self: ...
37 | def round(
38 | self, freq, ambiguous: TimeAmbiguous = ..., nonexistent: TimeNonexistent = ...
39 | ): ...
40 | def floor(
41 | self, freq, ambiguous: TimeAmbiguous = ..., nonexistent: TimeNonexistent = ...
42 | ): ...
43 | def ceil(
44 | self, freq, ambiguous: TimeAmbiguous = ..., nonexistent: TimeNonexistent = ...
45 | ): ...
46 |
47 | class DatetimeLikeArrayMixin(ExtensionOpsMixin, ExtensionArray):
48 | @property
49 | def ndim(self) -> int: ...
50 | @property
51 | def shape(self): ...
52 | def reshape(self, *args, **kwargs): ...
53 | def ravel(self, *args, **kwargs): ...
54 | def __iter__(self): ...
55 | @property
56 | def asi8(self) -> np.ndarray: ...
57 | @property
58 | def nbytes(self): ...
59 | def __array__(self, dtype=...) -> np.ndarray: ...
60 | @property
61 | def size(self) -> int: ...
62 | def __len__(self) -> int: ...
63 | @overload
64 | def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT: ...
65 | @overload
66 | def __getitem__(
67 | self,
68 | key: SequenceIndexer | PositionalIndexerTuple,
69 | ) -> Self: ...
70 | def __setitem__( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
71 | self, key: int | Sequence[int] | Sequence[bool] | slice, value
72 | ) -> None: ...
73 | def astype(self, dtype, copy: bool = ...): ...
74 | def view(self, dtype=...): ...
75 | def unique(self): ...
76 | def copy(self): ...
77 | def shift(self, periods: int = ..., fill_value=..., axis: int = ...): ...
78 | def searchsorted(self, value, side: str = ..., sorter=...): ...
79 | def repeat(self, repeats, *args, **kwargs): ...
80 | def value_counts(self, dropna: bool = ...): ...
81 | def map(self, mapper): ...
82 | def isna(self): ...
83 | def fillna(self, value=..., method=..., limit=...): ...
84 | @property
85 | def freq(self): ...
86 | @freq.setter
87 | def freq(self, value) -> None: ...
88 | @property
89 | def freqstr(self): ...
90 | @property
91 | def inferred_freq(self): ...
92 | @property
93 | def resolution(self): ...
94 | __pow__ = ...
95 | __rpow__ = ...
96 | __rmul__ = ...
97 | def __add__(self, other): ...
98 | def __radd__(self, other): ...
99 | def __sub__(self, other): ...
100 | def __rsub__(self, other): ...
101 | def __iadd__(self, other): ...
102 | def __isub__(self, other): ...
103 | def min(self, *, axis=..., skipna: bool = ..., **kwargs): ...
104 | def max(self, *, axis=..., skipna: bool = ..., **kwargs): ...
105 | def mean(self, *, skipna: bool = ...): ...
106 |
107 | def maybe_infer_freq(freq): ...
108 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/datetimes.pyi:
--------------------------------------------------------------------------------
1 | from datetime import tzinfo as _tzinfo
2 |
3 | import numpy as np
4 | from pandas.core.arrays.datetimelike import (
5 | DatelikeOps,
6 | DatetimeLikeArrayMixin,
7 | TimelikeOps,
8 | )
9 |
10 | from pandas._typing import (
11 | TimeAmbiguous,
12 | TimeNonexistent,
13 | TimeZones,
14 | )
15 |
16 | from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtype
17 |
18 | def tz_to_dtype(tz): ...
19 |
20 | class DatetimeArray(DatetimeLikeArrayMixin, TimelikeOps, DatelikeOps):
21 | __array_priority__: int = ...
22 | def __init__(self, values, dtype=..., freq=..., copy: bool = ...) -> None: ...
23 | # ignore in dtype() is from the pandas source
24 | @property
25 | def dtype(self) -> np.dtype | DatetimeTZDtype: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
26 | @property
27 | def tz(self): ...
28 | @tz.setter
29 | def tz(self, value) -> None: ...
30 | @property
31 | def tzinfo(self) -> _tzinfo | None: ...
32 | @property
33 | def is_normalized(self): ...
34 | def __array__(self, dtype=...) -> np.ndarray: ...
35 | def __iter__(self): ...
36 | def astype(self, dtype, copy: bool = ...): ...
37 | def tz_convert(self, tz: TimeZones): ...
38 | def tz_localize(
39 | self,
40 | tz: TimeZones,
41 | ambiguous: TimeAmbiguous = ...,
42 | nonexistent: TimeNonexistent = ...,
43 | ): ...
44 | def to_pydatetime(self): ...
45 | def normalize(self): ...
46 | def to_period(self, freq=...): ...
47 | def to_perioddelta(self, freq): ...
48 | def month_name(self, locale=...): ...
49 | def day_name(self, locale=...): ...
50 | @property
51 | def time(self): ...
52 | @property
53 | def timetz(self): ...
54 | @property
55 | def date(self): ...
56 | year = ...
57 | month = ...
58 | day = ...
59 | hour = ...
60 | minute = ...
61 | second = ...
62 | microsecond = ...
63 | nanosecond = ...
64 | dayofweek = ...
65 | weekday = ...
66 | dayofyear = ...
67 | quarter = ...
68 | days_in_month = ...
69 | daysinmonth = ...
70 | is_month_start = ...
71 | is_month_end = ...
72 | is_quarter_start = ...
73 | is_quarter_end = ...
74 | is_year_start = ...
75 | is_year_end = ...
76 | is_leap_year = ...
77 | def to_julian_date(self): ...
78 |
79 | def objects_to_datetime64ns(
80 | data,
81 | dayfirst,
82 | yearfirst,
83 | utc: bool = ...,
84 | errors: str = ...,
85 | require_iso8601: bool = ...,
86 | allow_object: bool = ...,
87 | ): ...
88 | def maybe_convert_dtype(data, copy): ...
89 | def validate_tz_from_dtype(dtype, tz): ...
90 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/floating.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.arrays.numeric import NumericDtype
2 |
3 | class Float32Dtype(NumericDtype): ...
4 | class Float64Dtype(NumericDtype): ...
5 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/integer.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.arrays.masked import BaseMaskedArray
2 |
3 | from pandas._libs.missing import NAType
4 |
5 | from pandas.core.dtypes.base import ExtensionDtype as ExtensionDtype
6 |
7 | class _IntegerDtype(ExtensionDtype):
8 | base: None
9 | @property
10 | def na_value(self) -> NAType: ...
11 | @property
12 | def itemsize(self) -> int: ...
13 | @classmethod
14 | def construct_array_type(cls) -> type[IntegerArray]: ...
15 |
16 | class IntegerArray(BaseMaskedArray):
17 | @property
18 | def dtype(self) -> _IntegerDtype: ...
19 | def __init__(self, values, mask, copy: bool = ...) -> None: ...
20 | def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
21 | def __setitem__(self, key, value) -> None: ...
22 | def astype(self, dtype, copy: bool = ...): ...
23 |
24 | class Int8Dtype(_IntegerDtype): ...
25 | class Int16Dtype(_IntegerDtype): ...
26 | class Int32Dtype(_IntegerDtype): ...
27 | class Int64Dtype(_IntegerDtype): ...
28 | class UInt8Dtype(_IntegerDtype): ...
29 | class UInt16Dtype(_IntegerDtype): ...
30 | class UInt32Dtype(_IntegerDtype): ...
31 | class UInt64Dtype(_IntegerDtype): ...
32 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/interval.pyi:
--------------------------------------------------------------------------------
1 | from typing import overload
2 |
3 | import numpy as np
4 | from pandas import (
5 | Index,
6 | Series,
7 | )
8 | from pandas.core.arrays.base import ExtensionArray as ExtensionArray
9 | from typing_extensions import (
10 | Self,
11 | TypeAlias,
12 | )
13 |
14 | from pandas._libs.interval import (
15 | Interval as Interval,
16 | IntervalMixin as IntervalMixin,
17 | )
18 | from pandas._typing import (
19 | Axis,
20 | Scalar,
21 | ScalarIndexer,
22 | SequenceIndexer,
23 | TakeIndexer,
24 | np_ndarray_bool,
25 | )
26 |
27 | IntervalOrNA: TypeAlias = Interval | float
28 |
29 | class IntervalArray(IntervalMixin, ExtensionArray):
30 | can_hold_na: bool = ...
31 | def __new__(
32 | cls, data, closed=..., dtype=..., copy: bool = ..., verify_integrity: bool = ...
33 | ): ...
34 | @classmethod
35 | def from_breaks(cls, breaks, closed: str = ..., copy: bool = ..., dtype=...): ...
36 | @classmethod
37 | def from_arrays(
38 | cls, left, right, closed: str = ..., copy: bool = ..., dtype=...
39 | ): ...
40 | @classmethod
41 | def from_tuples(cls, data, closed: str = ..., copy: bool = ..., dtype=...): ...
42 | def __iter__(self): ...
43 | def __len__(self) -> int: ...
44 | @overload
45 | def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: ...
46 | @overload
47 | def __getitem__(self, key: SequenceIndexer) -> Self: ...
48 | def __setitem__(self, key, value) -> None: ...
49 | def __eq__(self, other): ...
50 | def __ne__(self, other): ...
51 | def fillna(self, value=..., method=..., limit=...): ...
52 | @property
53 | def dtype(self): ...
54 | def astype(self, dtype, copy: bool = ...): ...
55 | def copy(self): ...
56 | def isna(self): ...
57 | @property
58 | def nbytes(self) -> int: ...
59 | @property
60 | def size(self) -> int: ...
61 | def shift(self, periods: int = ..., fill_value: object = ...) -> IntervalArray: ...
62 | def take( # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
63 | self: Self,
64 | indices: TakeIndexer,
65 | *,
66 | allow_fill: bool = ...,
67 | fill_value=...,
68 | axis=...,
69 | **kwargs,
70 | ) -> Self: ...
71 | def value_counts(self, dropna: bool = ...): ...
72 | @property
73 | def left(self) -> Index: ...
74 | @property
75 | def right(self) -> Index: ...
76 | @property
77 | def closed(self) -> bool: ...
78 | def set_closed(self, closed): ...
79 | @property
80 | def length(self) -> Index: ...
81 | @property
82 | def mid(self) -> Index: ...
83 | @property
84 | def is_non_overlapping_monotonic(self) -> bool: ...
85 | def __array__(self, dtype=...) -> np.ndarray: ...
86 | def __arrow_array__(self, type=...): ...
87 | def to_tuples(self, na_tuple: bool = ...): ...
88 | def repeat(self, repeats, axis: Axis | None = ...): ...
89 | @overload
90 | def contains(self, other: Series) -> Series[bool]: ...
91 | @overload
92 | def contains(
93 | self, other: Scalar | ExtensionArray | Index | np.ndarray
94 | ) -> np_ndarray_bool: ...
95 | def overlaps(self, other: Interval) -> bool: ...
96 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/masked.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | Any,
3 | overload,
4 | )
5 |
6 | import numpy as np
7 | from pandas.core.arrays import (
8 | ExtensionArray as ExtensionArray,
9 | ExtensionOpsMixin,
10 | )
11 | from typing_extensions import Self
12 |
13 | from pandas._typing import (
14 | Scalar,
15 | ScalarIndexer,
16 | SequenceIndexer,
17 | npt,
18 | )
19 |
20 | class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin):
21 | @overload
22 | def __getitem__(self, item: ScalarIndexer) -> Any: ...
23 | @overload
24 | def __getitem__(self, item: SequenceIndexer) -> Self: ...
25 | def __iter__(self): ...
26 | def __len__(self) -> int: ...
27 | def __invert__(self): ...
28 | def to_numpy(
29 | self,
30 | dtype: npt.DTypeLike | None = ...,
31 | copy: bool = ...,
32 | na_value: Scalar = ...,
33 | ) -> np.ndarray: ...
34 | __array_priority__: int = ...
35 | def __array__(self, dtype=...) -> np.ndarray: ...
36 | def __arrow_array__(self, type=...): ...
37 | def isna(self): ...
38 | @property
39 | def nbytes(self) -> int: ...
40 | def copy(self): ...
41 | def value_counts(self, dropna: bool = ...): ...
42 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/numeric.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.dtypes.dtypes import BaseMaskedDtype
2 |
3 | class NumericDtype(BaseMaskedDtype): ...
4 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/numpy_.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy.lib.mixins import NDArrayOperatorsMixin
3 | from pandas.core.arrays.base import (
4 | ExtensionArray,
5 | ExtensionOpsMixin,
6 | )
7 |
8 | from pandas.core.dtypes.dtypes import ExtensionDtype
9 |
10 | class PandasDtype(ExtensionDtype):
11 | @property
12 | def numpy_dtype(self) -> np.dtype: ...
13 | @property
14 | def itemsize(self) -> int: ...
15 |
16 | class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
17 | def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
18 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/period.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandas import PeriodDtype
3 | from pandas.core.arrays.datetimelike import (
4 | DatelikeOps,
5 | DatetimeLikeArrayMixin,
6 | )
7 |
8 | from pandas._libs.tslibs import Timestamp
9 | from pandas._libs.tslibs.period import Period
10 |
11 | class PeriodArray(DatetimeLikeArrayMixin, DatelikeOps):
12 | __array_priority__: int = ...
13 | def __init__(self, values, freq=..., dtype=..., copy: bool = ...) -> None: ...
14 | @property
15 | def dtype(self) -> PeriodDtype: ...
16 | def __array__(self, dtype=...) -> np.ndarray: ...
17 | def __arrow_array__(self, type=...): ...
18 | year: int = ...
19 | month: int = ...
20 | day: int = ...
21 | hour: int = ...
22 | minute: int = ...
23 | second: int = ...
24 | weekofyear: int = ...
25 | week: int = ...
26 | dayofweek: int = ...
27 | weekday: int = ...
28 | dayofyear: int = ...
29 | day_of_year = ...
30 | quarter: int = ...
31 | qyear: int = ...
32 | days_in_month: int = ...
33 | daysinmonth: int = ...
34 | @property
35 | def is_leap_year(self) -> bool: ...
36 | @property
37 | def start_time(self) -> Timestamp: ...
38 | @property
39 | def end_time(self) -> Timestamp: ...
40 | def to_timestamp(self, freq: str | None = ..., how: str = ...) -> Timestamp: ...
41 | def asfreq(self, freq: str | None = ..., how: str = ...) -> Period: ...
42 | def astype(self, dtype, copy: bool = ...): ...
43 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/sparse/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.arrays.sparse.accessor import (
2 | SparseAccessor as SparseAccessor,
3 | SparseFrameAccessor as SparseFrameAccessor,
4 | )
5 | from pandas.core.arrays.sparse.array import SparseArray as SparseArray
6 | from pandas.core.arrays.sparse.dtype import SparseDtype as SparseDtype
7 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/sparse/accessor.pyi:
--------------------------------------------------------------------------------
1 | from pandas import Series
2 | from pandas.core.accessor import PandasDelegate
3 |
4 | class BaseAccessor:
5 | def __init__(self, data=...) -> None: ...
6 |
7 | class SparseAccessor(BaseAccessor, PandasDelegate):
8 | @classmethod
9 | def from_coo(cls, A, dense_index: bool = ...) -> Series: ...
10 | def to_coo(self, row_levels=..., column_levels=..., sort_labels: bool = ...): ...
11 | def to_dense(self): ...
12 |
13 | class SparseFrameAccessor(BaseAccessor, PandasDelegate):
14 | @classmethod
15 | def from_spmatrix(cls, data, index=..., columns=...): ...
16 | def to_dense(self): ...
17 | def to_coo(self): ...
18 | @property
19 | def density(self) -> float: ...
20 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/sparse/array.pyi:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import (
3 | Any,
4 | overload,
5 | )
6 |
7 | import numpy as np
8 | from pandas.core.arrays import (
9 | ExtensionArray,
10 | ExtensionOpsMixin,
11 | )
12 | from typing_extensions import Self
13 |
14 | from pandas._typing import (
15 | ScalarIndexer,
16 | SequenceIndexer,
17 | )
18 |
19 | class ellipsis(Enum):
20 | Ellipsis = "..."
21 |
22 | class SparseArray(ExtensionArray, ExtensionOpsMixin):
23 | def __init__(
24 | self,
25 | data,
26 | sparse_index=...,
27 | fill_value=...,
28 | kind: str = ...,
29 | dtype=...,
30 | copy: bool = ...,
31 | ) -> None: ...
32 | @classmethod
33 | def from_spmatrix(cls, data): ...
34 | def __array__(self, dtype=..., copy=...) -> np.ndarray: ...
35 | def __setitem__(self, key, value) -> None: ...
36 | @property
37 | def sp_index(self): ...
38 | @property
39 | def sp_values(self): ...
40 | @property
41 | def dtype(self): ...
42 | @property
43 | def fill_value(self): ...
44 | @fill_value.setter
45 | def fill_value(self, value) -> None: ...
46 | @property
47 | def kind(self) -> str: ...
48 | def __len__(self) -> int: ...
49 | @property
50 | def nbytes(self) -> int: ...
51 | @property
52 | def density(self): ...
53 | @property
54 | def npoints(self) -> int: ...
55 | def isna(self): ...
56 | def fillna(self, value=..., method=..., limit=...): ...
57 | def shift(self, periods: int = ..., fill_value=...): ...
58 | def unique(self): ...
59 | def value_counts(self, dropna: bool = ...): ...
60 | @overload
61 | def __getitem__(self, key: ScalarIndexer) -> Any: ...
62 | @overload
63 | def __getitem__(
64 | self,
65 | key: SequenceIndexer | tuple[int | ellipsis, ...],
66 | ) -> Self: ...
67 | def copy(self): ...
68 | def astype(self, dtype=..., copy: bool = ...): ...
69 | def map(self, mapper): ...
70 | def to_dense(self): ...
71 | def nonzero(self): ...
72 | def all(self, axis=..., *args, **kwargs): ...
73 | def any(self, axis: int = ..., *args, **kwargs): ...
74 | def sum(self, axis: int = ..., *args, **kwargs): ...
75 | def cumsum(self, axis: int = ..., *args, **kwargs): ...
76 | def mean(self, axis: int = ..., *args, **kwargs): ...
77 | @property
78 | def T(self): ...
79 | def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ...
80 | def __abs__(self): ...
81 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/sparse/dtype.pyi:
--------------------------------------------------------------------------------
1 | from pandas._typing import (
2 | Dtype,
3 | Scalar,
4 | npt,
5 | )
6 |
7 | from pandas.core.dtypes.base import ExtensionDtype
8 | from pandas.core.dtypes.dtypes import (
9 | register_extension_dtype as register_extension_dtype,
10 | )
11 |
12 | class SparseDtype(ExtensionDtype):
13 | def __init__(
14 | self, dtype: Dtype | npt.DTypeLike = ..., fill_value: Scalar | None = ...
15 | ) -> None: ...
16 | @property
17 | def fill_value(self) -> Scalar | None: ...
18 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/string_.pyi:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | from pandas.core.arrays import PandasArray
4 |
5 | from pandas._libs.missing import NAType
6 |
7 | from pandas.core.dtypes.base import ExtensionDtype
8 |
9 | class StringDtype(ExtensionDtype):
10 | def __init__(self, storage: Literal["python", "pyarrow"] | None = None) -> None: ...
11 | @property
12 | def na_value(self) -> NAType: ...
13 |
14 | class StringArray(PandasArray):
15 | def __init__(self, values, copy: bool = ...) -> None: ...
16 | def __arrow_array__(self, type=...): ...
17 | def __setitem__(self, key, value) -> None: ...
18 | def fillna(self, value=..., method=..., limit=...): ...
19 | def astype(self, dtype, copy: bool = ...): ...
20 | def value_counts(self, dropna: bool = ...): ...
21 |
--------------------------------------------------------------------------------
/pandas-stubs/core/arrays/timedeltas.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Sequence
2 | from datetime import timedelta
3 |
4 | from pandas.core.arrays.datetimelike import (
5 | DatetimeLikeArrayMixin,
6 | TimelikeOps,
7 | )
8 |
9 | class TimedeltaArray(DatetimeLikeArrayMixin, TimelikeOps):
10 | __array_priority__: int = ...
11 | @property
12 | def dtype(self): ...
13 | def __init__(self, values, dtype=..., freq=..., copy: bool = ...) -> None: ...
14 | def astype(self, dtype, copy: bool = ...): ...
15 | def sum(
16 | self,
17 | *,
18 | axis=...,
19 | dtype=...,
20 | out=...,
21 | keepdims: bool = ...,
22 | initial=...,
23 | skipna: bool = ...,
24 | min_count: int = ...,
25 | ): ...
26 | def std(
27 | self,
28 | *,
29 | axis=...,
30 | dtype=...,
31 | out=...,
32 | ddof: int = ...,
33 | keepdims: bool = ...,
34 | skipna: bool = ...,
35 | ): ...
36 | def median(
37 | self,
38 | *,
39 | axis=...,
40 | out=...,
41 | overwrite_input: bool = ...,
42 | keepdims: bool = ...,
43 | skipna: bool = ...,
44 | ): ...
45 | def __mul__(self, other): ...
46 | __rmul__ = ...
47 | def __truediv__(self, other): ...
48 | def __rtruediv__(self, other): ...
49 | def __floordiv__(self, other): ...
50 | def __rfloordiv__(self, other): ...
51 | def __mod__(self, other): ...
52 | def __rmod__(self, other): ...
53 | def __divmod__(self, other): ...
54 | def __rdivmod__(self, other): ...
55 | def __neg__(self): ...
56 | def __pos__(self): ...
57 | def __abs__(self): ...
58 | def total_seconds(self) -> int: ...
59 | def to_pytimedelta(self) -> Sequence[timedelta]: ...
60 | days: int = ...
61 | seconds: int = ...
62 | microseconds: int = ...
63 | nanoseconds: int = ...
64 | @property
65 | def components(self) -> int: ...
66 |
67 | def sequence_to_td64ns(data, copy: bool = ..., unit: str = ..., errors: str = ...): ...
68 | def ints_to_td64ns(data, unit: str = ...): ...
69 | def objects_to_td64ns(data, unit: str = ..., errors: str = ...): ...
70 |
--------------------------------------------------------------------------------
/pandas-stubs/core/base.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Iterator,
4 | )
5 | from typing import (
6 | Any,
7 | Generic,
8 | Literal,
9 | final,
10 | overload,
11 | )
12 |
13 | import numpy as np
14 | from pandas import (
15 | Index,
16 | Series,
17 | )
18 | from pandas.core.arraylike import OpsMixin
19 | from pandas.core.arrays import ExtensionArray
20 | from pandas.core.arrays.categorical import Categorical
21 | from typing_extensions import Self
22 |
23 | from pandas._typing import (
24 | S1,
25 | AxisIndex,
26 | DropKeep,
27 | NDFrameT,
28 | Scalar,
29 | npt,
30 | )
31 | from pandas.util._decorators import cache_readonly
32 |
33 | class NoNewAttributesMixin:
34 | def __setattr__(self, key: str, value: Any) -> None: ...
35 |
36 | class SelectionMixin(Generic[NDFrameT]):
37 | obj: NDFrameT
38 | exclusions: frozenset[Hashable]
39 | @final
40 | @cache_readonly
41 | def ndim(self) -> int: ...
42 | def __getitem__(self, key): ...
43 | def aggregate(self, func, *args, **kwargs): ...
44 |
45 | class IndexOpsMixin(OpsMixin, Generic[S1]):
46 | __array_priority__: int = ...
47 | @property
48 | def T(self) -> Self: ...
49 | @property
50 | def shape(self) -> tuple: ...
51 | @property
52 | def ndim(self) -> int: ...
53 | def item(self) -> S1: ...
54 | @property
55 | def nbytes(self) -> int: ...
56 | @property
57 | def size(self) -> int: ...
58 | @property
59 | def array(self) -> ExtensionArray: ...
60 | def to_numpy(
61 | self,
62 | dtype: npt.DTypeLike | None = ...,
63 | copy: bool = ...,
64 | na_value: Scalar = ...,
65 | **kwargs,
66 | ) -> np.ndarray: ...
67 | @property
68 | def empty(self) -> bool: ...
69 | def max(self, axis=..., skipna: bool = ..., **kwargs): ...
70 | def min(self, axis=..., skipna: bool = ..., **kwargs): ...
71 | def argmax(
72 | self, axis: AxisIndex | None = ..., skipna: bool = ..., *args, **kwargs
73 | ) -> np.int64: ...
74 | def argmin(
75 | self, axis: AxisIndex | None = ..., skipna: bool = ..., *args, **kwargs
76 | ) -> np.int64: ...
77 | def tolist(self) -> list[S1]: ...
78 | def to_list(self) -> list[S1]: ...
79 | def __iter__(self) -> Iterator[S1]: ...
80 | @property
81 | def hasnans(self) -> bool: ...
82 | @overload
83 | def value_counts(
84 | self,
85 | normalize: Literal[False] = ...,
86 | sort: bool = ...,
87 | ascending: bool = ...,
88 | bins=...,
89 | dropna: bool = ...,
90 | ) -> Series[int]: ...
91 | @overload
92 | def value_counts(
93 | self,
94 | normalize: Literal[True],
95 | sort: bool = ...,
96 | ascending: bool = ...,
97 | bins=...,
98 | dropna: bool = ...,
99 | ) -> Series[float]: ...
100 | def nunique(self, dropna: bool = ...) -> int: ...
101 | @property
102 | def is_unique(self) -> bool: ...
103 | @property
104 | def is_monotonic_decreasing(self) -> bool: ...
105 | @property
106 | def is_monotonic_increasing(self) -> bool: ...
107 | def factorize(
108 | self, sort: bool = ..., use_na_sentinel: bool = ...
109 | ) -> tuple[np.ndarray, np.ndarray | Index | Categorical]: ...
110 | def searchsorted(
111 | self, value, side: Literal["left", "right"] = ..., sorter=...
112 | ) -> int | list[int]: ...
113 | def drop_duplicates(self, *, keep: DropKeep = ...) -> Self: ...
114 |
--------------------------------------------------------------------------------
/pandas-stubs/core/common.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Collection,
3 | Iterable,
4 | )
5 |
6 | from pandas._typing import T
7 |
8 | def flatten(line) -> None: ...
9 | def consensus_name_attr(objs): ...
10 | def is_bool_indexer(key) -> bool: ...
11 | def cast_scalar_indexer(val): ...
12 | def not_none(*args): ...
13 | def any_none(*args): ...
14 | def all_none(*args): ...
15 | def any_not_none(*args): ...
16 | def all_not_none(*args): ...
17 | def count_not_none(*args): ...
18 | def asarray_tuplesafe(values, dtype=...): ...
19 | def index_labels_to_array(labels, dtype=...): ...
20 | def maybe_make_list(obj): ...
21 | def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T: ...
22 | def is_null_slice(obj): ...
23 | def is_true_slices(line): ...
24 | def is_full_slice(obj, line): ...
25 | def get_callable_name(obj): ...
26 | def apply_if_callable(maybe_callable, obj, **kwargs): ...
27 | def standardize_mapping(into): ...
28 | def random_state(state=...): ...
29 | def pipe(obj, func, *args, **kwargs): ...
30 | def get_rename_function(mapper): ...
31 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/computation/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/align.pyi:
--------------------------------------------------------------------------------
1 | def align_terms(terms): ...
2 | def reconstruct_object(typ, obj, axes, dtype): ...
3 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.computation.eval import eval as eval
2 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/common.pyi:
--------------------------------------------------------------------------------
1 | def result_type_many(*arrays_and_dtypes): ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/engines.pyi:
--------------------------------------------------------------------------------
1 | import abc
2 |
3 | class AbstractEngine(metaclass=abc.ABCMeta):
4 | has_neg_frac: bool = ...
5 | expr = ...
6 | aligned_axes = ...
7 | result_type = ...
8 | def __init__(self, expr) -> None: ...
9 | def convert(self) -> str: ...
10 | def evaluate(self) -> object: ...
11 |
12 | class NumExprEngine(AbstractEngine):
13 | has_neg_frac: bool = ...
14 |
15 | class PythonEngine(AbstractEngine):
16 | has_neg_frac: bool = ...
17 | def evaluate(self): ...
18 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/eval.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Mapping
2 | from typing import (
3 | Any,
4 | Literal,
5 | )
6 |
7 | from pandas import (
8 | DataFrame,
9 | Series,
10 | )
11 | from pandas.core.computation.ops import BinOp
12 |
13 | from pandas._typing import (
14 | Scalar,
15 | npt,
16 | )
17 |
18 | def eval(
19 | expr: str | BinOp,
20 | parser: Literal["pandas", "python"] = ...,
21 | engine: Literal["python", "numexpr"] | None = ...,
22 | local_dict: dict[str, Any] | None = ...,
23 | global_dict: dict[str, Any] | None = ...,
24 | resolvers: list[Mapping] | None = ...,
25 | level: int = ...,
26 | target: object | None = ...,
27 | inplace: bool = ...,
28 | ) -> npt.NDArray | Scalar | DataFrame | Series | None: ...
29 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/expr.pyi:
--------------------------------------------------------------------------------
1 | import ast
2 |
3 | from pandas.core.computation.ops import Term as Term
4 | from pandas.core.computation.scope import Scope as Scope
5 |
6 | intersection = ...
7 |
8 | def disallow(nodes): ...
9 | def add_ops(op_classes): ...
10 |
11 | class BaseExprVisitor(ast.NodeVisitor):
12 | const_type = ...
13 | term_type = ...
14 | binary_ops = ...
15 | binary_op_nodes = ...
16 | binary_op_nodes_map = ...
17 | unary_ops = ...
18 | unary_op_nodes = ...
19 | unary_op_nodes_map = ...
20 | rewrite_map = ...
21 | env = ...
22 | engine = ...
23 | parser = ...
24 | preparser = ...
25 | assigner = ...
26 | def __init__(self, env, engine, parser, preparser=...) -> None: ...
27 | def visit(self, node, **kwargs): ...
28 | def visit_Module(self, node, **kwargs): ...
29 | def visit_Expr(self, node, **kwargs): ...
30 | def visit_BinOp(self, node, **kwargs): ...
31 | def visit_Div(self, node, **kwargs): ...
32 | def visit_UnaryOp(self, node, **kwargs): ...
33 | def visit_Name(self, node, **kwargs): ...
34 | def visit_NameConstant(self, node, **kwargs): ...
35 | def visit_Num(self, node, **kwargs): ...
36 | def visit_Constant(self, node, **kwargs): ...
37 | def visit_Str(self, node, **kwargs): ...
38 | def visit_List(self, node, **kwargs): ...
39 | def visit_Index(self, node, **kwargs): ...
40 | def visit_Subscript(self, node, **kwargs): ...
41 | def visit_Slice(self, node, **kwargs): ...
42 | def visit_Assign(self, node, **kwargs): ...
43 | def visit_Attribute(self, node, **kwargs): ...
44 | def visit_Call(self, node, side=..., **kwargs): ...
45 | def translate_In(self, op): ...
46 | def visit_Compare(self, node, **kwargs): ...
47 | def visit_BoolOp(self, node, **kwargs): ...
48 |
49 | class PandasExprVisitor(BaseExprVisitor):
50 | def __init__(self, env, engine, parser, preparser=...) -> None: ...
51 |
52 | class PythonExprVisitor(BaseExprVisitor):
53 | def __init__(self, env, engine, parser, preparser=...) -> None: ...
54 |
55 | class Expr:
56 | env: Scope
57 | engine: str
58 | parser: str
59 | expr = ...
60 | terms = ...
61 | def __init__(
62 | self,
63 | expr,
64 | engine: str = ...,
65 | parser: str = ...,
66 | env: Scope | None = ...,
67 | level: int = ...,
68 | ) -> None: ...
69 | @property
70 | def assigner(self): ...
71 | def __call__(self): ...
72 | def __len__(self) -> int: ...
73 | def parse(self): ...
74 | @property
75 | def names(self): ...
76 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/expressions.pyi:
--------------------------------------------------------------------------------
1 | def set_use_numexpr(v: bool = ...) -> None: ...
2 | def set_numexpr_threads(n=...) -> None: ...
3 | def evaluate(op, a, b, use_numexpr: bool = ...): ...
4 | def where(cond, a, b, use_numexpr: bool = ...): ...
5 | def set_test_mode(v: bool = ...) -> None: ...
6 | def get_test_result(): ...
7 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/ops.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class UndefinedVariableError(NameError):
4 | def __init__(self, name, is_local: bool = ...) -> None: ...
5 |
6 | class Term:
7 | def __new__(cls, name, env, side=..., encoding=...): ...
8 | is_local: bool
9 | env = ...
10 | side = ...
11 | encoding = ...
12 | def __init__(self, name, env, side=..., encoding=...) -> None: ...
13 | @property
14 | def local_name(self) -> str: ...
15 | def __call__(self, *args, **kwargs): ...
16 | def evaluate(self, *args, **kwargs): ...
17 | def update(self, value) -> None: ...
18 | @property
19 | def is_scalar(self) -> bool: ...
20 | @property
21 | def type(self): ...
22 | return_type = ...
23 | @property
24 | def raw(self) -> str: ...
25 | @property
26 | def is_datetime(self) -> bool: ...
27 | @property
28 | def value(self): ...
29 | @value.setter
30 | def value(self, new_value) -> None: ...
31 | @property
32 | def name(self): ...
33 | @property
34 | def ndim(self) -> int: ...
35 |
36 | class Constant(Term):
37 | @property
38 | def name(self): ...
39 |
40 | class Op:
41 | op: str
42 | operands = ...
43 | encoding = ...
44 | def __init__(self, op: str, operands, *args, **kwargs) -> None: ...
45 | def __iter__(self): ...
46 | @property
47 | def return_type(self): ...
48 | @property
49 | def has_invalid_return_type(self) -> bool: ...
50 | @property
51 | def operand_types(self): ...
52 | @property
53 | def is_scalar(self) -> bool: ...
54 | @property
55 | def is_datetime(self) -> bool: ...
56 |
57 | def is_term(obj) -> bool: ...
58 |
59 | class BinOp(Op):
60 | lhs = ...
61 | rhs = ...
62 | func = ...
63 | def __init__(self, op: str, lhs, rhs, **kwargs) -> None: ...
64 | def __call__(self, env): ...
65 | def evaluate(self, env, engine: str, parser, term_type, eval_in_python): ...
66 | def convert_values(self): ...
67 |
68 | def isnumeric(dtype) -> bool: ...
69 |
70 | class Div(BinOp):
71 | def __init__(self, lhs, rhs, **kwargs) -> None: ...
72 |
73 | class UnaryOp(Op):
74 | operand = ...
75 | func = ...
76 | def __init__(self, op: str, operand) -> None: ...
77 | def __call__(self, env): ...
78 | @property
79 | def return_type(self) -> np.dtype: ...
80 |
81 | class MathCall(Op):
82 | func = ...
83 | def __init__(self, func, args) -> None: ...
84 | def __call__(self, env): ...
85 |
86 | class FuncNode:
87 | name = ...
88 | func = ...
89 | def __init__(self, name: str) -> None: ...
90 | def __call__(self, *args): ...
91 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/parsing.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Iterator
2 | import tokenize
3 |
4 | BACKTICK_QUOTED_STRING: int
5 |
6 | def create_valid_python_identifier(name: str) -> str: ...
7 | def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]: ...
8 | def clean_column_name(name: str) -> str: ...
9 | def tokenize_backtick_quoted_string(
10 | token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int
11 | ) -> tuple[int, str]: ...
12 | def tokenize_string(source: str) -> Iterator[tuple[int, str]]: ...
13 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/pytables.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from pandas.core.computation import (
4 | expr as expr,
5 | ops as ops,
6 | scope as _scope,
7 | )
8 | from pandas.core.computation.expr import BaseExprVisitor as BaseExprVisitor
9 | from pandas.core.indexes.base import Index
10 |
11 | class PyTablesScope(_scope.Scope):
12 | queryables: dict[str, Any]
13 | def __init__(
14 | self,
15 | level: int,
16 | global_dict=...,
17 | local_dict=...,
18 | queryables: dict[str, Any] | None = ...,
19 | ) -> None: ...
20 |
21 | class Term(ops.Term):
22 | env = ...
23 | def __new__(cls, name, env, side=..., encoding=...): ...
24 | def __init__(self, name, env: PyTablesScope, side=..., encoding=...) -> None: ...
25 | @property
26 | def value(self): ...
27 | @value.setter
28 | def value(self, new_value) -> None: ...
29 |
30 | class Constant(Term):
31 | def __init__(self, name, env: PyTablesScope, side=..., encoding=...) -> None: ...
32 |
33 | class BinOp(ops.BinOp):
34 | op: str
35 | queryables: dict[str, Any]
36 | encoding = ...
37 | condition = ...
38 | def __init__(
39 | self, op: str, lhs, rhs, queryables: dict[str, Any], encoding
40 | ) -> None: ...
41 | def prune(self, klass): ...
42 | def conform(self, rhs): ...
43 | @property
44 | def is_valid(self) -> bool: ...
45 | @property
46 | def is_in_table(self) -> bool: ...
47 | @property
48 | def kind(self): ...
49 | @property
50 | def meta(self): ...
51 | @property
52 | def metadata(self): ...
53 | def generate(self, v) -> str: ...
54 | def convert_value(self, v) -> TermValue: ...
55 | def convert_values(self) -> None: ...
56 |
57 | class FilterBinOp(BinOp):
58 | filter: tuple[Any, Any, Index] | None = ...
59 | def invert(self): ...
60 | def format(self): ...
61 | def generate_filter_op(self, invert: bool = ...): ...
62 |
63 | class JointFilterBinOp(FilterBinOp):
64 | def format(self) -> None: ...
65 |
66 | class ConditionBinOp(BinOp):
67 | def invert(self) -> None: ...
68 | def format(self): ...
69 | condition = ...
70 |
71 | class JointConditionBinOp(ConditionBinOp):
72 | condition = ...
73 |
74 | class UnaryOp(ops.UnaryOp):
75 | def prune(self, klass): ...
76 |
77 | class PyTablesExprVisitor(BaseExprVisitor):
78 | const_type = ...
79 | term_type = ...
80 | def __init__(self, env, engine, parser, **kwargs) -> None: ...
81 | def visit_UnaryOp(self, node, **kwargs): ...
82 | def visit_Index(self, node, **kwargs): ...
83 | def visit_Assign(self, node, **kwargs): ...
84 | def visit_Subscript(self, node, **kwargs): ...
85 | def visit_Attribute(self, node, **kwargs): ...
86 | def translate_In(self, op): ...
87 |
88 | class PyTablesExpr(expr.Expr):
89 | encoding = ...
90 | condition = ...
91 | filter = ...
92 | terms = ...
93 | expr = ...
94 | def __init__(
95 | self,
96 | where,
97 | queryables: dict[str, Any] | None = ...,
98 | encoding=...,
99 | scope_level: int = ...,
100 | ) -> None: ...
101 | def evaluate(self): ...
102 |
103 | class TermValue:
104 | value = ...
105 | converted = ...
106 | kind = ...
107 | def __init__(self, value, converted, kind: str) -> None: ...
108 | def tostring(self, encoding) -> str: ...
109 |
110 | def maybe_expression(s) -> bool: ...
111 |
--------------------------------------------------------------------------------
/pandas-stubs/core/computation/scope.pyi:
--------------------------------------------------------------------------------
1 | def ensure_scope(
2 | level: int, global_dict=..., local_dict=..., resolvers=..., target=..., **kwargs
3 | ) -> Scope: ...
4 |
5 | class Scope:
6 | level = ...
7 | scope = ...
8 | target = ...
9 | resolvers = ...
10 | temps = ...
11 | def __init__(
12 | self, level, global_dict=..., local_dict=..., resolvers=..., target=...
13 | ) -> None: ...
14 | @property
15 | def has_resolvers(self) -> bool: ...
16 | def resolve(self, key: str, is_local: bool): ...
17 | def swapkey(self, old_key: str, new_key: str, new_value=...): ...
18 | def add_tmp(self, value) -> str: ...
19 | @property
20 | def ntemps(self) -> int: ...
21 | @property
22 | def full_scope(self): ...
23 |
--------------------------------------------------------------------------------
/pandas-stubs/core/config_init.pyi:
--------------------------------------------------------------------------------
1 | from typing import Literal
2 |
3 | use_bottleneck_doc: str = ...
4 |
5 | def use_bottleneck_cb(key) -> None: ...
6 |
7 | use_numexpr_doc: str = ...
8 |
9 | def use_numexpr_cb(key) -> None: ...
10 |
11 | pc_precision_doc: str = ...
12 | pc_colspace_doc: str = ...
13 | pc_max_rows_doc: str = ...
14 | pc_min_rows_doc: str = ...
15 | pc_max_cols_doc: str = ...
16 | pc_max_categories_doc: str = ...
17 | pc_max_info_cols_doc: str = ...
18 | pc_nb_repr_h_doc: str = ...
19 | pc_pprint_nest_depth: str = ...
20 | pc_multi_sparse_doc: str = ...
21 | float_format_doc: str = ...
22 | max_colwidth_doc: str = ...
23 | colheader_justify_doc: str = ...
24 | pc_expand_repr_doc: str = ...
25 | pc_show_dimensions_doc: str = ...
26 | pc_east_asian_width_doc: str = ...
27 | pc_ambiguous_as_wide_doc: str = ...
28 | pc_latex_repr_doc: str = ...
29 | pc_table_schema_doc: str = ...
30 | pc_html_border_doc: str = ...
31 | pc_html_use_mathjax_doc: str = ...
32 | pc_width_doc: str = ...
33 | pc_chop_threshold_doc: str = ...
34 | pc_max_seq_items: str = ...
35 | pc_max_info_rows_doc: str = ...
36 | pc_large_repr_doc: str = ...
37 | pc_memory_usage_doc: str = ...
38 | pc_latex_escape: str = ...
39 | pc_latex_longtable: str = ...
40 | pc_latex_multicolumn: str = ...
41 | pc_latex_multicolumn_format: str = ...
42 | pc_latex_multirow: str = ...
43 |
44 | def table_schema_cb(key) -> None: ...
45 | def is_terminal() -> bool: ...
46 |
47 | max_cols: int = ...
48 | tc_sim_interactive_doc: str = ...
49 | use_inf_as_null_doc: str = ...
50 | use_inf_as_na_doc: str = ...
51 |
52 | def use_inf_as_na_cb(key) -> None: ...
53 |
54 | chained_assignment: Literal["warn", "raise"] | None
55 | reader_engine_doc: str = ...
56 | writer_engine_doc: str = ...
57 | parquet_engine_doc: str = ...
58 | plotting_backend_doc: str = ...
59 |
60 | def register_plotting_backend_cb(key) -> None: ...
61 |
62 | register_converter_doc: str = ...
63 |
64 | def register_converter_cb(key) -> None: ...
65 |
--------------------------------------------------------------------------------
/pandas-stubs/core/construction.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Sequence
2 |
3 | import numpy as np
4 | from pandas.core.arrays.base import ExtensionArray
5 |
6 | from pandas.core.dtypes.dtypes import ExtensionDtype
7 |
8 | def array(
9 | data: Sequence[object],
10 | dtype: str | np.dtype | ExtensionDtype | None = ...,
11 | copy: bool = ...,
12 | ) -> ExtensionArray: ...
13 | def extract_array(obj, extract_numpy: bool = ...): ...
14 | def sanitize_array(
15 | data, index, dtype=..., copy: bool = ..., raise_cast_failure: bool = ...
16 | ): ...
17 | def is_empty_data(data) -> bool: ...
18 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/dtypes/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.dtypes.common import (
2 | is_any_real_numeric_dtype as is_any_real_numeric_dtype,
3 | is_array_like as is_array_like,
4 | is_bool as is_bool,
5 | is_bool_dtype as is_bool_dtype,
6 | is_complex as is_complex,
7 | is_complex_dtype as is_complex_dtype,
8 | is_datetime64_any_dtype as is_datetime64_any_dtype,
9 | is_datetime64_dtype as is_datetime64_dtype,
10 | is_datetime64_ns_dtype as is_datetime64_ns_dtype,
11 | is_dict_like as is_dict_like,
12 | is_dtype_equal as is_dtype_equal,
13 | is_extension_array_dtype as is_extension_array_dtype,
14 | is_file_like as is_file_like,
15 | is_float as is_float,
16 | is_float_dtype as is_float_dtype,
17 | is_hashable as is_hashable,
18 | is_integer as is_integer,
19 | is_integer_dtype as is_integer_dtype,
20 | is_iterator as is_iterator,
21 | is_list_like as is_list_like,
22 | is_named_tuple as is_named_tuple,
23 | is_number as is_number,
24 | is_numeric_dtype as is_numeric_dtype,
25 | is_object_dtype as is_object_dtype,
26 | is_re as is_re,
27 | is_re_compilable as is_re_compilable,
28 | is_scalar as is_scalar,
29 | is_signed_integer_dtype as is_signed_integer_dtype,
30 | is_string_dtype as is_string_dtype,
31 | is_timedelta64_dtype as is_timedelta64_dtype,
32 | is_timedelta64_ns_dtype as is_timedelta64_ns_dtype,
33 | is_unsigned_integer_dtype as is_unsigned_integer_dtype,
34 | pandas_dtype as pandas_dtype,
35 | )
36 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/base.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | ClassVar,
3 | Literal,
4 | TypeVar,
5 | )
6 |
7 | from pandas.core.arrays import ExtensionArray
8 |
9 | from pandas._typing import type_t
10 |
11 | class ExtensionDtype:
12 | type: ClassVar[type_t]
13 | name: ClassVar[str]
14 |
15 | @property
16 | def na_value(self) -> object: ...
17 | @property
18 | def kind(
19 | self,
20 | ) -> Literal["b", "i", "u", "f", "c", "m", "M", "O", "S", "U", "V", "T"]: ...
21 | @property
22 | def names(self) -> list[str] | None: ...
23 | def empty(self, size: int | tuple[int, ...]) -> type_t[ExtensionArray]: ...
24 | @classmethod
25 | def construct_array_type(cls) -> type_t[ExtensionArray]: ...
26 | @classmethod
27 | def construct_from_string(cls, string: str) -> ExtensionDtype: ...
28 | @classmethod
29 | def is_dtype(cls, dtype: object) -> bool: ...
30 |
31 | class StorageExtensionDtype(ExtensionDtype): ...
32 |
33 | _ExtensionDtypeT = TypeVar("_ExtensionDtypeT", bound=ExtensionDtype)
34 |
35 | def register_extension_dtype(cls: type[_ExtensionDtypeT]) -> type[_ExtensionDtypeT]: ...
36 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/cast.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/dtypes/cast.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/common.pyi:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from pandas.api.extensions import ExtensionDtype
3 | from typing_extensions import TypeAlias
4 |
5 | from pandas._typing import (
6 | ArrayLike,
7 | Dtype,
8 | DtypeObj,
9 | npt,
10 | )
11 |
12 | from pandas.core.dtypes.inference import (
13 | is_array_like as is_array_like,
14 | is_bool as is_bool,
15 | is_complex as is_complex,
16 | is_dict_like as is_dict_like,
17 | is_file_like as is_file_like,
18 | is_float as is_float,
19 | is_hashable as is_hashable,
20 | is_integer as is_integer,
21 | is_iterator as is_iterator,
22 | is_list_like as is_list_like,
23 | is_named_tuple as is_named_tuple,
24 | is_number as is_number,
25 | is_re as is_re,
26 | is_re_compilable as is_re_compilable,
27 | is_scalar as is_scalar,
28 | )
29 |
30 | _ArrayOrDtype: TypeAlias = (
31 | ArrayLike | npt.DTypeLike | pd.Series | pd.DataFrame | pd.Index | ExtensionDtype
32 | )
33 |
34 | def is_object_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
35 | def is_datetime64_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
36 | def is_timedelta64_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
37 | def is_dtype_equal(source: Dtype, target: Dtype) -> bool: ...
38 | def is_string_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
39 | def is_integer_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
40 | def is_signed_integer_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
41 | def is_unsigned_integer_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
42 | def is_datetime64_any_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
43 | def is_datetime64_ns_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
44 | def is_timedelta64_ns_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
45 | def is_numeric_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
46 | def is_float_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
47 | def is_bool_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
48 | def is_extension_array_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
49 | def is_complex_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
50 | def is_any_real_numeric_dtype(arr_or_dtype: _ArrayOrDtype) -> bool: ...
51 | def pandas_dtype(dtype: object) -> DtypeObj: ...
52 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/concat.pyi:
--------------------------------------------------------------------------------
1 | from typing import TypeVar
2 |
3 | from pandas import (
4 | Categorical,
5 | CategoricalIndex,
6 | Series,
7 | )
8 |
9 | _CatT = TypeVar("_CatT", bound=Categorical | CategoricalIndex | Series)
10 |
11 | def union_categoricals(
12 | to_union: list[_CatT], sort_categories: bool = ..., ignore_order: bool = ...
13 | ) -> Categorical: ...
14 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/dtypes.pyi:
--------------------------------------------------------------------------------
1 | import datetime as dt
2 | from typing import (
3 | Any,
4 | Literal,
5 | )
6 |
7 | import numpy as np
8 | from pandas.core.indexes.base import Index
9 | from pandas.core.series import Series
10 |
11 | from pandas._libs import NaTType
12 | from pandas._libs.tslibs import BaseOffset
13 | from pandas._libs.tslibs.offsets import (
14 | RelativeDeltaOffset,
15 | SingleConstructorOffset,
16 | )
17 | from pandas._typing import (
18 | Ordered,
19 | TimeZones,
20 | npt,
21 | )
22 |
23 | from pandas.core.dtypes.base import (
24 | ExtensionDtype as ExtensionDtype,
25 | register_extension_dtype as register_extension_dtype,
26 | )
27 |
28 | class BaseMaskedDtype(ExtensionDtype): ...
29 | class PandasExtensionDtype(ExtensionDtype): ...
30 |
31 | class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
32 | def __init__(
33 | self,
34 | categories: Series | Index | list[Any] | None = ...,
35 | ordered: Ordered = ...,
36 | ) -> None: ...
37 | @property
38 | def categories(self) -> Index: ...
39 | @property
40 | def ordered(self) -> Ordered: ...
41 |
42 | class DatetimeTZDtype(PandasExtensionDtype):
43 | def __init__(self, unit: Literal["ns"] = ..., tz: TimeZones = ...) -> None: ...
44 | @property
45 | def unit(self) -> Literal["ns"]: ...
46 | @property
47 | def tz(self) -> dt.tzinfo: ...
48 | @property
49 | def na_value(self) -> NaTType: ...
50 |
51 | class PeriodDtype(PandasExtensionDtype):
52 | def __init__(
53 | self, freq: str | SingleConstructorOffset | RelativeDeltaOffset = ...
54 | ) -> None: ...
55 | @property
56 | def freq(self) -> BaseOffset: ...
57 | @property
58 | def na_value(self) -> NaTType: ...
59 |
60 | class IntervalDtype(PandasExtensionDtype):
61 | def __init__(self, subtype: str | npt.DTypeLike | None = ...) -> None: ...
62 | @property
63 | def subtype(self) -> np.dtype | None: ...
64 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/generic.pyi:
--------------------------------------------------------------------------------
1 | from pandas import Series
2 | from pandas.core.arrays import ExtensionArray
3 | from typing_extensions import TypeAlias
4 |
5 | ABCSeries: TypeAlias = type[Series]
6 | ABCExtensionArray: TypeAlias = type[ExtensionArray]
7 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/inference.pyi:
--------------------------------------------------------------------------------
1 | from pandas._libs import lib
2 |
3 | is_bool = lib.is_bool
4 | is_integer = lib.is_integer
5 | is_float = lib.is_float
6 | is_complex = lib.is_complex
7 | is_scalar = lib.is_scalar
8 | is_list_like = lib.is_list_like
9 | is_iterator = lib.is_iterator
10 |
11 | def is_number(obj: object) -> bool: ...
12 | def is_file_like(obj: object) -> bool: ...
13 | def is_re(obj: object) -> bool: ...
14 | def is_array_like(obj: object) -> bool: ...
15 | def is_re_compilable(obj: object) -> bool: ...
16 | def is_dict_like(obj: object) -> bool: ...
17 | def is_named_tuple(obj: object) -> bool: ...
18 | def is_hashable(obj: object) -> bool: ...
19 |
--------------------------------------------------------------------------------
/pandas-stubs/core/dtypes/missing.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | Any,
3 | overload,
4 | )
5 |
6 | import numpy as np
7 | from numpy import typing as npt
8 | from pandas import (
9 | DataFrame,
10 | Index,
11 | Series,
12 | )
13 | from typing_extensions import TypeIs
14 |
15 | from pandas._libs.missing import NAType
16 | from pandas._libs.tslibs import NaTType
17 | from pandas._typing import (
18 | ArrayLike,
19 | Scalar,
20 | ScalarT,
21 | )
22 |
23 | isposinf_scalar = ...
24 | isneginf_scalar = ...
25 |
26 | @overload
27 | def isna(obj: DataFrame) -> DataFrame: ...
28 | @overload
29 | def isna(obj: Series[Any]) -> Series[bool]: ...
30 | @overload
31 | def isna(obj: Index[Any] | list[Any] | ArrayLike) -> npt.NDArray[np.bool_]: ...
32 | @overload
33 | def isna(
34 | obj: Scalar | NaTType | NAType | None,
35 | ) -> TypeIs[NaTType | NAType | None]: ...
36 |
37 | isnull = isna
38 |
39 | @overload
40 | def notna(obj: DataFrame) -> DataFrame: ...
41 | @overload
42 | def notna(obj: Series[Any]) -> Series[bool]: ...
43 | @overload
44 | def notna(obj: Index[Any] | list[Any] | ArrayLike) -> npt.NDArray[np.bool_]: ...
45 | @overload
46 | def notna(obj: ScalarT | NaTType | NAType | None) -> TypeIs[ScalarT]: ...
47 |
48 | notnull = notna
49 |
--------------------------------------------------------------------------------
/pandas-stubs/core/groupby/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.groupby.generic import (
2 | DataFrameGroupBy as DataFrameGroupBy,
3 | NamedAgg as NamedAgg,
4 | SeriesGroupBy as SeriesGroupBy,
5 | )
6 | from pandas.core.groupby.groupby import GroupBy as GroupBy
7 | from pandas.core.groupby.grouper import Grouper as Grouper
8 |
9 | __all__ = [
10 | "DataFrameGroupBy",
11 | "NamedAgg",
12 | "SeriesGroupBy",
13 | "GroupBy",
14 | "Grouper",
15 | ]
16 |
--------------------------------------------------------------------------------
/pandas-stubs/core/groupby/base.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Hashable
2 | import dataclasses
3 |
4 | @dataclasses.dataclass(order=True, frozen=True)
5 | class OutputKey:
6 | label: Hashable
7 | position: int
8 |
--------------------------------------------------------------------------------
/pandas-stubs/core/groupby/categorical.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/groupby/categorical.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/groupby/grouper.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Iterator,
4 | )
5 | from typing import (
6 | final,
7 | overload,
8 | )
9 |
10 | import numpy as np
11 | from pandas import (
12 | DataFrame,
13 | Index,
14 | Series,
15 | )
16 | from pandas.core.resample import TimeGrouper
17 | from typing_extensions import Self
18 |
19 | from pandas._libs.lib import NoDefault
20 | from pandas._typing import (
21 | ArrayLike,
22 | Axis,
23 | Frequency,
24 | Incomplete,
25 | KeysArgType,
26 | Level,
27 | ListLikeHashable,
28 | npt,
29 | )
30 | from pandas.util._decorators import cache_readonly
31 |
32 | class Grouper:
33 | key: KeysArgType | None
34 | level: Level | ListLikeHashable[Level] | None
35 | freq: Frequency | None
36 | axis: Axis
37 | sort: bool
38 | dropna: bool
39 | binner: Incomplete
40 | @overload
41 | def __new__(
42 | cls,
43 | key: KeysArgType | None = ...,
44 | level: Level | ListLikeHashable[Level] | None = ...,
45 | axis: Axis | NoDefault = ...,
46 | sort: bool = ...,
47 | dropna: bool = ...,
48 | ) -> Self: ...
49 | @overload
50 | def __new__(cls, *args, freq: Frequency, **kwargs) -> TimeGrouper: ...
51 | @final
52 | def __repr__(self) -> str: ... # noqa: PYI029 __repr__ here is final
53 |
54 | @final
55 | class Grouping:
56 | level: Level | None
57 | obj: DataFrame | Series | None
58 | in_axis: bool
59 | grouping_vector: Incomplete
60 | def __iter__(self) -> Iterator[Hashable]: ...
61 | @cache_readonly
62 | def name(self) -> Hashable: ...
63 | @cache_readonly
64 | def ngroups(self) -> int: ...
65 | @cache_readonly
66 | def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: ...
67 | @property
68 | def codes(self) -> npt.NDArray[np.signedinteger]: ...
69 | @cache_readonly
70 | def group_arraylike(self) -> ArrayLike: ...
71 | @cache_readonly
72 | def result_index(self) -> Index: ...
73 | @cache_readonly
74 | def group_index(self) -> Index: ...
75 |
--------------------------------------------------------------------------------
/pandas-stubs/core/groupby/indexing.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | Any,
3 | Generic,
4 | Literal,
5 | TypeVar,
6 | )
7 |
8 | from pandas import (
9 | DataFrame,
10 | Series,
11 | )
12 | from pandas.core.groupby import groupby
13 |
14 | from pandas._typing import PositionalIndexer
15 |
16 | _GroupByT = TypeVar("_GroupByT", bound=groupby.GroupBy[Any])
17 |
18 | class GroupByIndexingMixin: ...
19 |
20 | class GroupByPositionalSelector:
21 | groupby_object: groupby.GroupBy
22 | def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series: ...
23 |
24 | class GroupByNthSelector(Generic[_GroupByT]):
25 | groupby_object: _GroupByT
26 |
27 | def __call__(
28 | self,
29 | n: PositionalIndexer | tuple,
30 | dropna: Literal["any", "all", None] = ...,
31 | ) -> DataFrame | Series: ...
32 | def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series: ...
33 |
--------------------------------------------------------------------------------
/pandas-stubs/core/groupby/ops.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Callable,
3 | Hashable,
4 | Iterator,
5 | )
6 | from typing import (
7 | Generic,
8 | final,
9 | )
10 |
11 | import numpy as np
12 | from pandas import (
13 | Index,
14 | Series,
15 | )
16 | from pandas.core.groupby import grouper
17 |
18 | from pandas._typing import (
19 | ArrayLike,
20 | AxisInt,
21 | Incomplete,
22 | NDFrameT,
23 | Shape,
24 | T,
25 | npt,
26 | )
27 | from pandas.util._decorators import cache_readonly
28 |
29 | class BaseGrouper:
30 | axis: Index
31 | dropna: bool
32 | @property
33 | def groupings(self) -> list[grouper.Grouping]: ...
34 | @property
35 | def shape(self) -> Shape: ...
36 | def __iter__(self) -> Iterator: ...
37 | @property
38 | def nkeys(self) -> int: ...
39 | def get_iterator(
40 | self, data: NDFrameT, axis: AxisInt = ...
41 | ) -> Iterator[tuple[Hashable, NDFrameT]]: ...
42 | @final
43 | @cache_readonly
44 | def group_keys_seq(self): ...
45 | @cache_readonly
46 | def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: ...
47 | @final
48 | def result_ilocs(self) -> npt.NDArray[np.intp]: ...
49 | @final
50 | @property
51 | def codes(self) -> list[npt.NDArray[np.signedinteger]]: ...
52 | @property
53 | def levels(self) -> list[Index]: ...
54 | @property
55 | def names(self) -> list: ...
56 | @final
57 | def size(self) -> Series: ...
58 | @cache_readonly
59 | def groups(self) -> dict[Hashable, np.ndarray]: ...
60 | @final
61 | @cache_readonly
62 | def is_monotonic(self) -> bool: ...
63 | @final
64 | @cache_readonly
65 | def has_dropped_na(self) -> bool: ...
66 | @cache_readonly
67 | def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: ...
68 | @cache_readonly
69 | def codes_info(self) -> npt.NDArray[np.intp]: ...
70 | @final
71 | @cache_readonly
72 | def ngroups(self) -> int: ...
73 | @property
74 | def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]: ...
75 | @cache_readonly
76 | def result_index(self) -> Index: ...
77 | @final
78 | def get_group_levels(self) -> list[ArrayLike]: ...
79 | @final
80 | def agg_series(
81 | self,
82 | obj: Series,
83 | func: Callable[[Series], object],
84 | preserve_dtype: bool = ...,
85 | ) -> ArrayLike: ...
86 | @final
87 | def apply_groupwise(
88 | self, f: Callable[[NDFrameT], T], data: NDFrameT, axis: AxisInt = ...
89 | ) -> tuple[list[T], bool]: ...
90 |
91 | class BinGrouper(BaseGrouper):
92 | bins: npt.NDArray[np.int64]
93 | binlabels: Index
94 | indexer: npt.NDArray[np.intp]
95 | @cache_readonly
96 | def indices(self) -> dict[Incomplete, list[int]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
97 |
98 | class DataSplitter(Generic[NDFrameT]):
99 | data: NDFrameT
100 | labels: npt.NDArray[np.intp]
101 | ngroups: int
102 | axis: AxisInt
103 | def __iter__(self) -> Iterator[NDFrameT]: ...
104 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexers.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def is_list_like_indexer(key) -> bool: ...
4 | def is_scalar_indexer(indexer, arr_value) -> bool: ...
5 | def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool: ...
6 | def check_setitem_lengths(indexer, value, values) -> None: ...
7 | def validate_indices(indices: np.ndarray, n: int) -> None: ...
8 | def maybe_convert_indices(indices, n: int): ...
9 | def length_of_indexer(indexer, target=...) -> int: ...
10 | def deprecate_ndim_indexing(result) -> None: ...
11 | def check_array_indexer(arrayArrayLike, indexer): ...
12 |
13 | class BaseIndexer:
14 | def __init__(
15 | self,
16 | index_array: np.ndarray | None = ...,
17 | window_size: int = ...,
18 | **kwargs,
19 | ) -> None: ...
20 | def get_window_bounds(
21 | self,
22 | num_values: int = ...,
23 | min_periods: int | None = ...,
24 | center: bool | None = ...,
25 | closed: str | None = ...,
26 | ) -> tuple[np.ndarray, np.ndarray]: ...
27 |
28 | class VariableOffsetWindowIndexer(BaseIndexer):
29 | def __init__(
30 | self,
31 | index_array: np.ndarray | None = ...,
32 | window_size: int = ...,
33 | index=...,
34 | offset=...,
35 | **kwargs,
36 | ) -> None: ...
37 | def get_window_bounds(
38 | self,
39 | num_values: int = ...,
40 | min_periods: int | None = ...,
41 | center: bool | None = ...,
42 | closed: str | None = ...,
43 | ) -> tuple[np.ndarray, np.ndarray]: ...
44 |
45 | class FixedForwardWindowIndexer(BaseIndexer):
46 | def get_window_bounds(
47 | self,
48 | num_values: int = ...,
49 | min_periods: int | None = ...,
50 | center: bool | None = ...,
51 | closed: str | None = ...,
52 | ) -> tuple[np.ndarray, np.ndarray]: ...
53 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/indexes/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.indexes.base import Index as Index
2 | from pandas.core.indexes.category import CategoricalIndex as CategoricalIndex
3 | from pandas.core.indexes.datetimes import DatetimeIndex as DatetimeIndex
4 | from pandas.core.indexes.interval import IntervalIndex as IntervalIndex
5 | from pandas.core.indexes.multi import MultiIndex as MultiIndex
6 | from pandas.core.indexes.period import PeriodIndex as PeriodIndex
7 | from pandas.core.indexes.range import RangeIndex as RangeIndex
8 | from pandas.core.indexes.timedeltas import TimedeltaIndex as TimedeltaIndex
9 |
10 | def get_objs_combined_axis(
11 | objs, intersect: bool = ..., axis=..., sort: bool = ...
12 | ) -> Index: ...
13 | def union_indexes(indexes, sort=...) -> Index: ...
14 | def all_indexes_same(indexes): ...
15 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/category.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Iterable,
4 | )
5 | from typing import Literal
6 |
7 | import numpy as np
8 | from pandas.core import accessor
9 | from pandas.core.indexes.base import Index
10 | from pandas.core.indexes.extension import ExtensionIndex
11 | from typing_extensions import Self
12 |
13 | from pandas._typing import (
14 | S1,
15 | DtypeArg,
16 | )
17 |
18 | class CategoricalIndex(ExtensionIndex[S1], accessor.PandasDelegate):
19 | codes: np.ndarray = ...
20 | categories: Index = ...
21 | def __new__(
22 | cls,
23 | data: Iterable[S1] = ...,
24 | categories=...,
25 | ordered=...,
26 | dtype=...,
27 | copy: bool = ...,
28 | name: Hashable = ...,
29 | ) -> Self: ...
30 | def equals(self, other): ...
31 | @property
32 | def inferred_type(self) -> str: ...
33 | @property
34 | def values(self): ...
35 | def __contains__(self, key) -> bool: ...
36 | def __array__(self, dtype=...) -> np.ndarray: ...
37 | def astype(self, dtype: DtypeArg, copy: bool = ...) -> Index: ...
38 | def fillna(self, value=...): ...
39 | @property
40 | def is_unique(self) -> bool: ...
41 | @property
42 | def is_monotonic_increasing(self) -> bool: ...
43 | @property
44 | def is_monotonic_decreasing(self) -> bool: ...
45 | def unique(self, level=...): ...
46 | def duplicated(self, keep: Literal["first", "last", False] = ...): ...
47 | def where(self, cond, other=...): ...
48 | def reindex(self, target, method=..., level=..., limit=..., tolerance=...): ...
49 | def get_indexer(self, target, method=..., limit=..., tolerance=...): ...
50 | def get_indexer_non_unique(self, target): ...
51 | def delete(self, loc): ...
52 | def insert(self, loc, item): ...
53 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/datetimelike.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandas.core.indexes.extension import ExtensionIndex
3 | from pandas.core.indexes.timedeltas import TimedeltaIndex
4 | from typing_extensions import Self
5 |
6 | from pandas._libs.tslibs import BaseOffset
7 | from pandas._typing import (
8 | S1,
9 | AxisIndex,
10 | TimeUnit,
11 | )
12 |
13 | class DatetimeIndexOpsMixin(ExtensionIndex[S1]):
14 | @property
15 | def freq(self) -> BaseOffset | None: ...
16 | @property
17 | def freqstr(self) -> str | None: ...
18 | @property
19 | def is_all_dates(self) -> bool: ...
20 | def min(
21 | self, axis: AxisIndex | None = ..., skipna: bool = ..., *args, **kwargs
22 | ) -> S1: ...
23 | def argmin(
24 | self, axis: AxisIndex | None = ..., skipna: bool = ..., *args, **kwargs
25 | ) -> np.int64: ...
26 | def max(
27 | self, axis: AxisIndex | None = ..., skipna: bool = ..., *args, **kwargs
28 | ) -> S1: ...
29 | def argmax(
30 | self, axis: AxisIndex | None = ..., skipna: bool = ..., *args, **kwargs
31 | ) -> np.int64: ...
32 | def __rsub__( # type: ignore[override]
33 | self, other: DatetimeIndexOpsMixin
34 | ) -> TimedeltaIndex: ...
35 |
36 | class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin[S1]):
37 | @property
38 | def unit(self) -> TimeUnit: ...
39 | def as_unit(self, unit: TimeUnit) -> Self: ...
40 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/datetimes.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Sequence,
4 | )
5 | from datetime import (
6 | datetime,
7 | timedelta,
8 | tzinfo as _tzinfo,
9 | )
10 | from typing import overload
11 |
12 | import numpy as np
13 | from pandas import (
14 | DataFrame,
15 | Index,
16 | Timedelta,
17 | TimedeltaIndex,
18 | Timestamp,
19 | )
20 | from pandas.core.indexes.accessors import DatetimeIndexProperties
21 | from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
22 | from pandas.core.series import (
23 | TimedeltaSeries,
24 | TimestampSeries,
25 | )
26 | from typing_extensions import Self
27 |
28 | from pandas._typing import (
29 | AxesData,
30 | DateAndDatetimeLike,
31 | Dtype,
32 | Frequency,
33 | IntervalClosedType,
34 | TimeUnit,
35 | TimeZones,
36 | )
37 |
38 | from pandas.core.dtypes.dtypes import DatetimeTZDtype
39 |
40 | from pandas.tseries.offsets import BaseOffset
41 |
42 | class DatetimeIndex(DatetimeTimedeltaMixin[Timestamp], DatetimeIndexProperties):
43 | def __init__(
44 | self,
45 | data: AxesData,
46 | freq: Frequency = ...,
47 | tz: TimeZones = ...,
48 | ambiguous: str = ...,
49 | dayfirst: bool = ...,
50 | yearfirst: bool = ...,
51 | dtype: Dtype = ...,
52 | copy: bool = ...,
53 | name: Hashable = ...,
54 | ) -> None: ...
55 | def __array__(self, dtype=...) -> np.ndarray: ...
56 | def __reduce__(self): ...
57 | # various ignores needed for mypy, as we do want to restrict what can be used in
58 | # arithmetic for these types
59 | @overload
60 | def __add__(self, other: TimedeltaSeries) -> TimestampSeries: ...
61 | @overload
62 | def __add__(
63 | self, other: timedelta | Timedelta | TimedeltaIndex | BaseOffset
64 | ) -> DatetimeIndex: ...
65 | @overload
66 | def __sub__(self, other: TimedeltaSeries) -> TimestampSeries: ...
67 | @overload
68 | def __sub__(
69 | self, other: timedelta | Timedelta | TimedeltaIndex | BaseOffset
70 | ) -> DatetimeIndex: ...
71 | @overload
72 | def __sub__(
73 | self, other: datetime | Timestamp | DatetimeIndex
74 | ) -> TimedeltaIndex: ...
75 | def to_series(self, index=..., name: Hashable = ...) -> TimestampSeries: ...
76 | def snap(self, freq: str = ...): ...
77 | def slice_indexer(self, start=..., end=..., step=...): ...
78 | def searchsorted(self, value, side: str = ..., sorter=...): ...
79 | @property
80 | def inferred_type(self) -> str: ...
81 | def indexer_at_time(self, time, asof: bool = ...): ...
82 | def indexer_between_time(
83 | self, start_time, end_time, include_start: bool = ..., include_end: bool = ...
84 | ): ...
85 | def to_julian_date(self) -> Index[float]: ...
86 | def isocalendar(self) -> DataFrame: ...
87 | @property
88 | def tzinfo(self) -> _tzinfo | None: ...
89 | @property
90 | def dtype(self) -> np.dtype | DatetimeTZDtype: ...
91 | def shift(self, periods: int = ..., freq=...) -> Self: ...
92 |
93 | def date_range(
94 | start: str | DateAndDatetimeLike | None = ...,
95 | end: str | DateAndDatetimeLike | None = ...,
96 | periods: int | None = ...,
97 | freq: str | timedelta | Timedelta | BaseOffset = ...,
98 | tz: TimeZones = ...,
99 | normalize: bool = ...,
100 | name: Hashable | None = ...,
101 | inclusive: IntervalClosedType = ...,
102 | unit: TimeUnit | None = ...,
103 | ) -> DatetimeIndex: ...
104 | @overload
105 | def bdate_range(
106 | start: str | DateAndDatetimeLike | None = ...,
107 | end: str | DateAndDatetimeLike | None = ...,
108 | periods: int | None = ...,
109 | freq: str | timedelta | Timedelta | BaseOffset = ...,
110 | tz: TimeZones = ...,
111 | normalize: bool = ...,
112 | name: Hashable | None = ...,
113 | weekmask: str | None = ...,
114 | holidays: None = ...,
115 | inclusive: IntervalClosedType = ...,
116 | ) -> DatetimeIndex: ...
117 | @overload
118 | def bdate_range(
119 | start: str | DateAndDatetimeLike | None = ...,
120 | end: str | DateAndDatetimeLike | None = ...,
121 | periods: int | None = ...,
122 | *,
123 | freq: str | timedelta | Timedelta | BaseOffset,
124 | tz: TimeZones = ...,
125 | normalize: bool = ...,
126 | name: Hashable | None = ...,
127 | weekmask: str | None = ...,
128 | holidays: Sequence[str | DateAndDatetimeLike],
129 | inclusive: IntervalClosedType = ...,
130 | ) -> DatetimeIndex: ...
131 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/extension.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.indexes.base import Index
2 |
3 | from pandas._typing import S1
4 |
5 | class ExtensionIndex(Index[S1]): ...
6 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/frozen.pyi:
--------------------------------------------------------------------------------
1 | class FrozenList(list):
2 | def union(self, other) -> FrozenList: ...
3 | def difference(self, other) -> FrozenList: ...
4 | def __getitem__(self, n): ...
5 | def __radd__(self, other): ...
6 | def __eq__(self, other) -> bool: ...
7 | def __mul__(self, other): ...
8 | def __reduce__(self): ...
9 | def __hash__(self) -> int: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride]
10 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/period.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Hashable
2 | import datetime
3 | from typing import overload
4 |
5 | import numpy as np
6 | import pandas as pd
7 | from pandas import Index
8 | from pandas.core.indexes.accessors import PeriodIndexFieldOps
9 | from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
10 | from pandas.core.indexes.timedeltas import TimedeltaIndex
11 | from typing_extensions import Self
12 |
13 | from pandas._libs.tslibs import (
14 | BaseOffset,
15 | NaTType,
16 | Period,
17 | )
18 | from pandas._libs.tslibs.period import _PeriodAddSub
19 |
20 | class PeriodIndex(DatetimeIndexOpsMixin[pd.Period], PeriodIndexFieldOps):
21 | def __new__(
22 | cls,
23 | data=...,
24 | ordinal=...,
25 | freq=...,
26 | tz=...,
27 | dtype=...,
28 | copy: bool = ...,
29 | name: Hashable = ...,
30 | **fields,
31 | ): ...
32 | @property
33 | def values(self): ...
34 | def __contains__(self, key) -> bool: ...
35 | @overload
36 | def __sub__(self, other: Period) -> Index: ...
37 | @overload
38 | def __sub__(self, other: Self) -> Index: ...
39 | @overload
40 | def __sub__(self, other: _PeriodAddSub) -> Self: ...
41 | @overload
42 | def __sub__(self, other: NaTType) -> NaTType: ...
43 | @overload
44 | def __sub__(self, other: TimedeltaIndex | pd.Timedelta) -> Self: ...
45 | @overload # type: ignore[override]
46 | def __rsub__(self, other: Period) -> Index: ...
47 | @overload
48 | def __rsub__(self, other: Self) -> Index: ...
49 | @overload
50 | def __rsub__( # pyright: ignore[reportIncompatibleMethodOverride]
51 | self, other: NaTType
52 | ) -> NaTType: ...
53 | def __array__(self, dtype=...) -> np.ndarray: ...
54 | def __array_wrap__(self, result, context=...): ...
55 | def asof_locs(self, where, mask): ...
56 | def astype(self, dtype, copy: bool = ...): ...
57 | def searchsorted(self, value, side: str = ..., sorter=...): ...
58 | @property
59 | def is_full(self) -> bool: ...
60 | @property
61 | def inferred_type(self) -> str: ...
62 | def get_indexer(self, target, method=..., limit=..., tolerance=...): ...
63 | def get_indexer_non_unique(self, target): ...
64 | def insert(self, loc, item): ...
65 | def join(
66 | self,
67 | other,
68 | *,
69 | how: str = ...,
70 | level=...,
71 | return_indexers: bool = ...,
72 | sort: bool = ...,
73 | ): ...
74 | def difference(self, other, sort=...): ...
75 | def memory_usage(self, deep: bool = ...): ...
76 | @property
77 | def freqstr(self) -> str: ...
78 | def shift(self, periods: int = ..., freq=...) -> Self: ...
79 |
80 | def period_range(
81 | start: (
82 | str | datetime.datetime | datetime.date | pd.Timestamp | pd.Period | None
83 | ) = ...,
84 | end: (
85 | str | datetime.datetime | datetime.date | pd.Timestamp | pd.Period | None
86 | ) = ...,
87 | periods: int | None = ...,
88 | freq: str | BaseOffset | None = ...,
89 | name: Hashable | None = ...,
90 | ) -> PeriodIndex: ...
91 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/range.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Sequence,
4 | )
5 | from typing import overload
6 |
7 | import numpy as np
8 | from pandas.core.indexes.base import Index
9 |
10 | from pandas._typing import (
11 | HashableT,
12 | MaskType,
13 | np_ndarray_anyint,
14 | npt,
15 | )
16 |
17 | class RangeIndex(Index[int]):
18 | def __new__(
19 | cls,
20 | start: int | RangeIndex | range = ...,
21 | stop: int = ...,
22 | step: int = ...,
23 | dtype=...,
24 | copy: bool = ...,
25 | name: Hashable = ...,
26 | ): ...
27 | @classmethod
28 | def from_range(cls, data, name: Hashable = ..., dtype=...): ...
29 | def __reduce__(self): ...
30 | @property
31 | def start(self) -> int: ...
32 | @property
33 | def stop(self) -> int: ...
34 | @property
35 | def step(self) -> int: ...
36 | @property
37 | def nbytes(self) -> int: ...
38 | def memory_usage(self, deep: bool = ...) -> int: ...
39 | @property
40 | def dtype(self) -> np.dtype: ...
41 | @property
42 | def is_unique(self) -> bool: ...
43 | @property
44 | def is_monotonic_increasing(self) -> bool: ...
45 | @property
46 | def is_monotonic_decreasing(self) -> bool: ...
47 | @property
48 | def has_duplicates(self) -> bool: ...
49 | def __contains__(self, key: int | np.integer) -> bool: ...
50 | def get_indexer(self, target, method=..., limit=..., tolerance=...): ...
51 | def tolist(self): ...
52 | def copy(self, name: Hashable = ..., deep: bool = ..., dtype=..., **kwargs): ...
53 | def min(self, axis=..., skipna: bool = ..., *args, **kwargs): ...
54 | def max(self, axis=..., skipna: bool = ..., *args, **kwargs): ...
55 | def argsort(self, *args, **kwargs): ...
56 | def factorize(
57 | self, sort: bool = ..., use_na_sentinel: bool = ...
58 | ) -> tuple[npt.NDArray[np.intp], RangeIndex]: ...
59 | def equals(self, other): ...
60 | def join(
61 | self,
62 | other,
63 | *,
64 | how: str = ...,
65 | level=...,
66 | return_indexers: bool = ...,
67 | sort: bool = ...,
68 | ): ...
69 | def __len__(self) -> int: ...
70 | @property
71 | def size(self) -> int: ...
72 | def __floordiv__(self, other): ...
73 | def all(self) -> bool: ...
74 | def any(self) -> bool: ...
75 | def union(
76 | self, other: list[HashableT] | Index, sort=...
77 | ) -> Index | Index[int] | RangeIndex: ...
78 | @overload # type: ignore[override]
79 | def __getitem__(
80 | self,
81 | idx: slice | np_ndarray_anyint | Sequence[int] | Index | MaskType,
82 | ) -> Index: ...
83 | @overload
84 | def __getitem__( # pyright: ignore[reportIncompatibleMethodOverride]
85 | self, idx: int
86 | ) -> int: ...
87 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexes/timedeltas.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Sequence,
4 | )
5 | import datetime as dt
6 | from typing import (
7 | Literal,
8 | overload,
9 | )
10 |
11 | import numpy as np
12 | from pandas import (
13 | DateOffset,
14 | Index,
15 | Period,
16 | )
17 | from pandas.core.indexes.accessors import TimedeltaIndexProperties
18 | from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
19 | from pandas.core.indexes.datetimes import DatetimeIndex
20 | from pandas.core.indexes.period import PeriodIndex
21 | from pandas.core.series import TimedeltaSeries
22 | from typing_extensions import Self
23 |
24 | from pandas._libs import (
25 | Timedelta,
26 | Timestamp,
27 | )
28 | from pandas._libs.tslibs import BaseOffset
29 | from pandas._typing import (
30 | AxesData,
31 | TimedeltaConvertibleTypes,
32 | num,
33 | )
34 |
35 | class TimedeltaIndex(DatetimeTimedeltaMixin[Timedelta], TimedeltaIndexProperties):
36 | def __new__(
37 | cls,
38 | data: (
39 | Sequence[dt.timedelta | Timedelta | np.timedelta64 | float] | AxesData
40 | ) = ...,
41 | freq: str | BaseOffset = ...,
42 | closed: object = ...,
43 | dtype: Literal[" Self: ...
47 | # various ignores needed for mypy, as we do want to restrict what can be used in
48 | # arithmetic for these types
49 | @overload
50 | def __add__(self, other: Period) -> PeriodIndex: ...
51 | @overload
52 | def __add__(self, other: DatetimeIndex) -> DatetimeIndex: ...
53 | @overload
54 | def __add__(self, other: dt.timedelta | Timedelta | Self) -> Self: ...
55 | def __radd__(self, other: dt.datetime | Timestamp | DatetimeIndex) -> DatetimeIndex: ... # type: ignore[override]
56 | def __sub__(self, other: dt.timedelta | Timedelta | Self) -> Self: ...
57 | def __mul__(self, other: num) -> Self: ...
58 | @overload # type: ignore[override]
59 | def __truediv__(self, other: num | Sequence[float]) -> Self: ...
60 | @overload
61 | def __truediv__( # pyright: ignore[reportIncompatibleMethodOverride]
62 | self, other: dt.timedelta | Sequence[dt.timedelta]
63 | ) -> Index[float]: ...
64 | def __rtruediv__(self, other: dt.timedelta | Sequence[dt.timedelta]) -> Index[float]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
65 | @overload # type: ignore[override]
66 | def __floordiv__(self, other: num | Sequence[float]) -> Self: ...
67 | @overload
68 | def __floordiv__( # pyright: ignore[reportIncompatibleMethodOverride]
69 | self, other: dt.timedelta | Sequence[dt.timedelta]
70 | ) -> Index[int]: ...
71 | def __rfloordiv__(self, other: dt.timedelta | Sequence[dt.timedelta]) -> Index[int]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
72 | def astype(self, dtype, copy: bool = ...): ...
73 | def searchsorted(self, value, side: str = ..., sorter=...): ...
74 | @property
75 | def inferred_type(self) -> str: ...
76 | def to_series(self, index=..., name: Hashable = ...) -> TimedeltaSeries: ...
77 | def shift(self, periods: int = ..., freq=...) -> Self: ...
78 |
79 | def timedelta_range(
80 | start: TimedeltaConvertibleTypes = ...,
81 | end: TimedeltaConvertibleTypes = ...,
82 | periods: int | None = ...,
83 | freq: str | DateOffset | Timedelta | dt.timedelta | None = ...,
84 | name: Hashable | None = ...,
85 | closed: Literal["left", "right"] | None = ...,
86 | ) -> TimedeltaIndex: ...
87 |
--------------------------------------------------------------------------------
/pandas-stubs/core/indexing.pyi:
--------------------------------------------------------------------------------
1 | from typing import TypeVar
2 |
3 | import numpy as np
4 | from pandas.core.indexes.api import Index
5 | from typing_extensions import TypeAlias
6 |
7 | from pandas._libs.indexing import _NDFrameIndexerBase
8 | from pandas._typing import (
9 | MaskType,
10 | Scalar,
11 | ScalarT,
12 | )
13 |
14 | _IndexSliceTuple: TypeAlias = tuple[
15 | Index | MaskType | Scalar | list[ScalarT] | slice | tuple[Scalar, ...], ...
16 | ]
17 |
18 | _IndexSliceUnion: TypeAlias = slice | _IndexSliceTuple
19 |
20 | _IndexSliceUnionT = TypeVar("_IndexSliceUnionT", bound=_IndexSliceUnion)
21 |
22 | class _IndexSlice:
23 | def __getitem__(self, arg: _IndexSliceUnionT) -> _IndexSliceUnionT: ...
24 |
25 | IndexSlice: _IndexSlice
26 |
27 | class IndexingMixin:
28 | @property
29 | def iloc(self) -> _iLocIndexer: ...
30 | @property
31 | def loc(self) -> _LocIndexer: ...
32 | @property
33 | def at(self) -> _AtIndexer: ...
34 | @property
35 | def iat(self) -> _iAtIndexer: ...
36 |
37 | class _NDFrameIndexer(_NDFrameIndexerBase):
38 | axis = ...
39 | def __call__(self, axis=...): ...
40 | def __getitem__(self, key): ...
41 | def __setitem__(self, key, value) -> None: ...
42 |
43 | class _LocationIndexer(_NDFrameIndexer):
44 | def __getitem__(self, key): ...
45 |
46 | class _LocIndexer(_LocationIndexer): ...
47 | class _iLocIndexer(_LocationIndexer): ...
48 |
49 | class _ScalarAccessIndexer(_NDFrameIndexerBase):
50 | def __getitem__(self, key): ...
51 | def __setitem__(self, key, value) -> None: ...
52 |
53 | class _AtIndexer(_ScalarAccessIndexer): ...
54 | class _iAtIndexer(_ScalarAccessIndexer): ...
55 |
56 | def convert_to_index_sliceable(obj, key): ...
57 | def check_bool_indexer(index: Index, key) -> np.ndarray: ...
58 | def convert_missing_indexer(indexer): ...
59 | def convert_from_missing_indexer_tuple(indexer, axes): ...
60 | def maybe_convert_ix(*args): ...
61 | def is_nested_tuple(tup, labels) -> bool: ...
62 | def is_label_like(key) -> bool: ...
63 | def need_slice(obj) -> bool: ...
64 |
--------------------------------------------------------------------------------
/pandas-stubs/core/interchange/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrame
2 | from pandas.core.interchange.from_dataframe import from_dataframe as from_dataframe
3 |
--------------------------------------------------------------------------------
/pandas-stubs/core/interchange/dataframe_protocol.pyi:
--------------------------------------------------------------------------------
1 | import abc
2 | from abc import (
3 | ABC,
4 | abstractmethod,
5 | )
6 | from collections.abc import (
7 | Iterable,
8 | Sequence,
9 | )
10 | import enum
11 | from typing import (
12 | Any,
13 | TypedDict,
14 | cast,
15 | )
16 |
17 | class DlpackDeviceType(enum.IntEnum):
18 | CPU = cast(int, ...)
19 | CUDA = cast(int, ...)
20 | CPU_PINNED = cast(int, ...)
21 | OPENCL = cast(int, ...)
22 | VULKAN = cast(int, ...)
23 | METAL = cast(int, ...)
24 | VPI = cast(int, ...)
25 | ROCM = cast(int, ...)
26 |
27 | class DtypeKind(enum.IntEnum):
28 | INT = cast(int, ...)
29 | UINT = cast(int, ...)
30 | FLOAT = cast(int, ...)
31 | BOOL = cast(int, ...)
32 | STRING = cast(int, ...)
33 | DATETIME = cast(int, ...)
34 | CATEGORICAL = cast(int, ...)
35 |
36 | class ColumnNullType(enum.IntEnum):
37 | NON_NULLABLE = cast(int, ...)
38 | USE_NAN = cast(int, ...)
39 | USE_SENTINEL = cast(int, ...)
40 | USE_BITMASK = cast(int, ...)
41 | USE_BYTEMASK = cast(int, ...)
42 |
43 | class ColumnBuffers(TypedDict):
44 | data: tuple[Buffer, Any]
45 | validity: tuple[Buffer, Any] | None
46 | offsets: tuple[Buffer, Any] | None
47 |
48 | class CategoricalDescription(TypedDict):
49 | is_ordered: bool
50 | is_dictionary: bool
51 | categories: Column | None
52 |
53 | class Buffer(ABC, metaclass=abc.ABCMeta):
54 | @property
55 | @abstractmethod
56 | def bufsize(self) -> int: ...
57 | @property
58 | @abstractmethod
59 | def ptr(self) -> int: ...
60 | @abstractmethod
61 | def __dlpack__(self): ...
62 | @abstractmethod
63 | def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: ...
64 |
65 | class Column(ABC, metaclass=abc.ABCMeta):
66 | @property
67 | @abstractmethod
68 | def size(self) -> int: ...
69 | @property
70 | @abstractmethod
71 | def offset(self) -> int: ...
72 | @property
73 | @abstractmethod
74 | def dtype(self) -> tuple[DtypeKind, int, str, str]: ...
75 | @property
76 | @abstractmethod
77 | def describe_categorical(self) -> CategoricalDescription: ...
78 | @property
79 | @abstractmethod
80 | def describe_null(self) -> tuple[ColumnNullType, Any]: ...
81 | @property
82 | @abstractmethod
83 | def null_count(self) -> int | None: ...
84 | @property
85 | @abstractmethod
86 | def metadata(self) -> dict[str, Any]: ...
87 | @abstractmethod
88 | def num_chunks(self) -> int: ...
89 | @abstractmethod
90 | def get_chunks(self, n_chunks: int | None = ...) -> Iterable[Column]: ...
91 | @abstractmethod
92 | def get_buffers(self) -> ColumnBuffers: ...
93 |
94 | class DataFrame(ABC, metaclass=abc.ABCMeta):
95 | version: int
96 | @abstractmethod
97 | def __dataframe__(self, nan_as_null: bool = ..., allow_copy: bool = ...): ...
98 | @property
99 | @abstractmethod
100 | def metadata(self) -> dict[str, Any]: ...
101 | @abstractmethod
102 | def num_columns(self) -> int: ...
103 | @abstractmethod
104 | def num_rows(self) -> int | None: ...
105 | @abstractmethod
106 | def num_chunks(self) -> int: ...
107 | @abstractmethod
108 | def column_names(self) -> Iterable[str]: ...
109 | @abstractmethod
110 | def get_column(self, i: int) -> Column: ...
111 | @abstractmethod
112 | def get_column_by_name(self, name: str) -> Column: ...
113 | @abstractmethod
114 | def get_columns(self) -> Iterable[Column]: ...
115 | @abstractmethod
116 | def select_columns(self, indices: Sequence[int]) -> DataFrame: ...
117 | @abstractmethod
118 | def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: ...
119 | @abstractmethod
120 | def get_chunks(self, n_chunks: int | None = ...) -> Iterable[DataFrame]: ...
121 |
--------------------------------------------------------------------------------
/pandas-stubs/core/interchange/from_dataframe.pyi:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | def from_dataframe(df, allow_copy: bool = ...) -> pd.DataFrame: ...
4 |
--------------------------------------------------------------------------------
/pandas-stubs/core/missing.pyi:
--------------------------------------------------------------------------------
1 | def mask_missing(arr, values_to_mask): ...
2 | def clean_fill_method(method, allow_nearest: bool = ...): ...
3 | def clean_interp_method(method, **kwargs): ...
4 | def interpolate_2d(
5 | values, method: str = ..., axis: int = ..., limit=..., fill_value=..., dtype=...
6 | ): ...
7 | def get_fill_func(method): ...
8 | def clean_reindex_fill_method(method): ...
9 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/__init__.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | ARITHMETIC_BINOPS: set[str] = ...
4 | COMPARISON_BINOPS: set[str] = ...
5 |
6 | def get_op_result_name(left: Any, right: Any): ...
7 | def fill_binop(left: Any, right: Any, fill_value: Any): ...
8 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/array_ops.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from pandas.core.dtypes.generic import ABCExtensionArray as ABCExtensionArray
4 |
5 | def arithmetic_op(left: np.ndarray | ABCExtensionArray, right, op, str_rep: str): ...
6 | def comparison_op(
7 | left: np.ndarray | ABCExtensionArray, right, op
8 | ) -> np.ndarray | ABCExtensionArray: ...
9 | def na_logical_op(x: np.ndarray, y, op): ...
10 | def logical_op(
11 | left: np.ndarray | ABCExtensionArray, right, op
12 | ) -> np.ndarray | ABCExtensionArray: ...
13 | def get_array_op(op, str_rep: str | None = ...): ...
14 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/common.pyi:
--------------------------------------------------------------------------------
1 | def unpack_zerodim_and_defer(name: str): ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/dispatch.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.dtypes.generic import ABCSeries
2 |
3 | def should_extension_dispatch(left: ABCSeries, right) -> bool: ...
4 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/docstrings.pyi:
--------------------------------------------------------------------------------
1 | reverse_op = ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/invalid.pyi:
--------------------------------------------------------------------------------
1 | def invalid_comparison(left, right, op): ...
2 | def make_invalid_op(name: str): ...
3 |
--------------------------------------------------------------------------------
/pandas-stubs/core/ops/mask_ops.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from pandas._libs import missing as libmissing
4 |
5 | def kleene_or(
6 | left: bool | np.ndarray,
7 | right: bool | np.ndarray,
8 | left_mask: np.ndarray | None,
9 | right_mask: np.ndarray | None,
10 | ): ...
11 | def kleene_xor(
12 | left: bool | np.ndarray,
13 | right: bool | np.ndarray,
14 | left_mask: np.ndarray | None,
15 | right_mask: np.ndarray | None,
16 | ): ...
17 | def kleene_and(
18 | left: bool | libmissing.NAType | np.ndarray,
19 | right: bool | libmissing.NAType | np.ndarray,
20 | left_mask: np.ndarray | None,
21 | right_mask: np.ndarray | None,
22 | ): ...
23 | def raise_for_nan(value, method) -> None: ...
24 |
--------------------------------------------------------------------------------
/pandas-stubs/core/reshape/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/reshape/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/reshape/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.reshape.concat import concat as concat
2 | from pandas.core.reshape.encoding import (
3 | from_dummies as from_dummies,
4 | get_dummies as get_dummies,
5 | )
6 | from pandas.core.reshape.melt import (
7 | lreshape as lreshape,
8 | melt as melt,
9 | wide_to_long as wide_to_long,
10 | )
11 | from pandas.core.reshape.merge import (
12 | merge as merge,
13 | merge_asof as merge_asof,
14 | merge_ordered as merge_ordered,
15 | )
16 | from pandas.core.reshape.pivot import (
17 | crosstab as crosstab,
18 | pivot as pivot,
19 | pivot_table as pivot_table,
20 | )
21 | from pandas.core.reshape.tile import (
22 | cut as cut,
23 | qcut as qcut,
24 | )
25 |
--------------------------------------------------------------------------------
/pandas-stubs/core/reshape/encoding.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Iterable,
4 | )
5 |
6 | from pandas import DataFrame
7 |
8 | from pandas._typing import (
9 | AnyArrayLike,
10 | Dtype,
11 | HashableT1,
12 | HashableT2,
13 | )
14 |
15 | def get_dummies(
16 | data: AnyArrayLike | DataFrame,
17 | prefix: str | Iterable[str] | dict[HashableT1, str] | None = ...,
18 | prefix_sep: str = ...,
19 | dummy_na: bool = ...,
20 | columns: list[HashableT2] | None = ...,
21 | sparse: bool = ...,
22 | drop_first: bool = ...,
23 | dtype: Dtype | None = ...,
24 | ) -> DataFrame: ...
25 | def from_dummies(
26 | data: DataFrame,
27 | sep: str | None = ...,
28 | default_category: Hashable | dict[str, Hashable] | None = ...,
29 | ) -> DataFrame: ...
30 |
--------------------------------------------------------------------------------
/pandas-stubs/core/reshape/melt.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Hashable
2 |
3 | import numpy as np
4 | from pandas.core.frame import DataFrame
5 |
6 | from pandas._typing import HashableT
7 |
8 | def melt(
9 | frame: DataFrame,
10 | id_vars: tuple | list | np.ndarray | None = ...,
11 | value_vars: tuple | list | np.ndarray | None = ...,
12 | var_name: str | None = ...,
13 | value_name: Hashable = ...,
14 | col_level: int | str | None = ...,
15 | ignore_index: bool = ...,
16 | ) -> DataFrame: ...
17 | def lreshape(
18 | data: DataFrame, groups: dict[HashableT, list[HashableT]], dropna: bool = ...
19 | ) -> DataFrame: ...
20 | def wide_to_long(
21 | df: DataFrame,
22 | stubnames: str | list[str],
23 | i: str | list[str],
24 | j: str,
25 | sep: str = ...,
26 | suffix: str = ...,
27 | ) -> DataFrame: ...
28 |
--------------------------------------------------------------------------------
/pandas-stubs/core/reshape/merge.pyi:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 | from typing import (
3 | Literal,
4 | overload,
5 | )
6 |
7 | from pandas import (
8 | DataFrame,
9 | Series,
10 | Timedelta,
11 | )
12 |
13 | from pandas._typing import (
14 | AnyArrayLike,
15 | HashableT,
16 | JoinHow,
17 | Label,
18 | MergeHow,
19 | Suffixes,
20 | ValidationOptions,
21 | )
22 |
23 | def merge(
24 | left: DataFrame | Series,
25 | right: DataFrame | Series,
26 | how: MergeHow = ...,
27 | on: Label | list[HashableT] | AnyArrayLike | None = ...,
28 | left_on: Label | list[HashableT] | AnyArrayLike | None = ...,
29 | right_on: Label | list[HashableT] | AnyArrayLike | None = ...,
30 | left_index: bool = ...,
31 | right_index: bool = ...,
32 | sort: bool = ...,
33 | suffixes: Suffixes = ...,
34 | indicator: bool | str = ...,
35 | validate: ValidationOptions = ...,
36 | ) -> DataFrame: ...
37 | @overload
38 | def merge_ordered(
39 | left: DataFrame,
40 | right: DataFrame,
41 | on: Label | list[HashableT] | None = ...,
42 | left_on: Label | list[HashableT] | None = ...,
43 | right_on: Label | list[HashableT] | None = ...,
44 | left_by: Label | list[HashableT] | None = ...,
45 | right_by: Label | list[HashableT] | None = ...,
46 | fill_method: Literal["ffill"] | None = ...,
47 | suffixes: Suffixes = ...,
48 | how: JoinHow = ...,
49 | ) -> DataFrame: ...
50 | @overload
51 | def merge_ordered(
52 | left: Series,
53 | right: DataFrame | Series,
54 | on: Label | list[HashableT] | None = ...,
55 | left_on: Label | list[HashableT] | None = ...,
56 | right_on: Label | list[HashableT] | None = ...,
57 | left_by: None = ...,
58 | right_by: None = ...,
59 | fill_method: Literal["ffill"] | None = ...,
60 | suffixes: (
61 | list[str | None] | tuple[str, str] | tuple[None, str] | tuple[str, None]
62 | ) = ...,
63 | how: JoinHow = ...,
64 | ) -> DataFrame: ...
65 | @overload
66 | def merge_ordered(
67 | left: DataFrame | Series,
68 | right: Series,
69 | on: Label | list[HashableT] | None = ...,
70 | left_on: Label | list[HashableT] | None = ...,
71 | right_on: Label | list[HashableT] | None = ...,
72 | left_by: None = ...,
73 | right_by: None = ...,
74 | fill_method: Literal["ffill"] | None = ...,
75 | suffixes: Suffixes = ...,
76 | how: JoinHow = ...,
77 | ) -> DataFrame: ...
78 | def merge_asof(
79 | left: DataFrame | Series,
80 | right: DataFrame | Series,
81 | on: Label | None = ...,
82 | left_on: Label | None = ...,
83 | right_on: Label | None = ...,
84 | left_index: bool = ...,
85 | right_index: bool = ...,
86 | by: Label | list[HashableT] | None = ...,
87 | left_by: Label | list[HashableT] | None = ...,
88 | right_by: Label | list[HashableT] | None = ...,
89 | suffixes: Suffixes = ...,
90 | tolerance: int | timedelta | Timedelta | None = ...,
91 | allow_exact_matches: bool = ...,
92 | direction: Literal["backward", "forward", "nearest"] = ...,
93 | ) -> DataFrame: ...
94 |
--------------------------------------------------------------------------------
/pandas-stubs/core/reshape/util.pyi:
--------------------------------------------------------------------------------
1 | def cartesian_product(X): ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/core/sparse/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/sparse/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/tools/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/tools/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/tools/datetimes.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Sequence
2 | from datetime import (
3 | date,
4 | datetime,
5 | )
6 | from typing import (
7 | Literal,
8 | TypedDict,
9 | overload,
10 | )
11 |
12 | import numpy as np
13 | from pandas import (
14 | Index,
15 | Timestamp,
16 | )
17 | from pandas.core.arrays import ExtensionArray
18 | from pandas.core.indexes.datetimes import DatetimeIndex
19 | from pandas.core.series import (
20 | Series,
21 | TimestampSeries,
22 | )
23 | from typing_extensions import TypeAlias
24 |
25 | from pandas._libs.tslibs import NaTType
26 | from pandas._typing import (
27 | AnyArrayLike,
28 | DictConvertible,
29 | IgnoreRaise,
30 | RaiseCoerce,
31 | TimestampConvertibleTypes,
32 | npt,
33 | )
34 |
35 | ArrayConvertible: TypeAlias = list | tuple | AnyArrayLike
36 | Scalar: TypeAlias = float | str
37 | DatetimeScalar: TypeAlias = Scalar | datetime | np.datetime64 | date
38 |
39 | DatetimeScalarOrArrayConvertible: TypeAlias = DatetimeScalar | ArrayConvertible
40 |
41 | DatetimeDictArg: TypeAlias = list[Scalar] | tuple[Scalar, ...] | AnyArrayLike
42 |
43 | class YearMonthDayDict(TypedDict, total=True):
44 | year: DatetimeDictArg
45 | month: DatetimeDictArg
46 | day: DatetimeDictArg
47 |
48 | class FulldatetimeDict(YearMonthDayDict, total=False):
49 | hour: DatetimeDictArg
50 | hours: DatetimeDictArg
51 | minute: DatetimeDictArg
52 | minutes: DatetimeDictArg
53 | second: DatetimeDictArg
54 | seconds: DatetimeDictArg
55 | ms: DatetimeDictArg
56 | us: DatetimeDictArg
57 | ns: DatetimeDictArg
58 |
59 | @overload
60 | def to_datetime(
61 | arg: DatetimeScalar,
62 | errors: IgnoreRaise = ...,
63 | dayfirst: bool = ...,
64 | yearfirst: bool = ...,
65 | utc: bool = ...,
66 | format: str | None = ...,
67 | exact: bool = ...,
68 | unit: str | None = ...,
69 | origin: Literal["julian", "unix"] | TimestampConvertibleTypes = ...,
70 | cache: bool = ...,
71 | ) -> Timestamp: ...
72 | @overload
73 | def to_datetime(
74 | arg: DatetimeScalar,
75 | errors: Literal["coerce"],
76 | dayfirst: bool = ...,
77 | yearfirst: bool = ...,
78 | utc: bool = ...,
79 | format: str | None = ...,
80 | exact: bool = ...,
81 | unit: str | None = ...,
82 | origin: Literal["julian", "unix"] | TimestampConvertibleTypes = ...,
83 | cache: bool = ...,
84 | ) -> Timestamp | NaTType: ...
85 | @overload
86 | def to_datetime(
87 | arg: Series | DictConvertible,
88 | errors: RaiseCoerce = ...,
89 | dayfirst: bool = ...,
90 | yearfirst: bool = ...,
91 | utc: bool = ...,
92 | format: str | None = ...,
93 | exact: bool = ...,
94 | unit: str | None = ...,
95 | origin: Literal["julian", "unix"] | TimestampConvertibleTypes = ...,
96 | cache: bool = ...,
97 | ) -> TimestampSeries: ...
98 | @overload
99 | def to_datetime(
100 | arg: (
101 | Sequence[float | date]
102 | | list[str]
103 | | tuple[float | str | date, ...]
104 | | npt.NDArray[np.datetime64]
105 | | npt.NDArray[np.str_]
106 | | npt.NDArray[np.int_]
107 | | Index
108 | | ExtensionArray
109 | ),
110 | errors: RaiseCoerce = ...,
111 | dayfirst: bool = ...,
112 | yearfirst: bool = ...,
113 | utc: bool = ...,
114 | format: str | None = ...,
115 | exact: bool = ...,
116 | unit: str | None = ...,
117 | origin: Literal["julian", "unix"] | TimestampConvertibleTypes = ...,
118 | cache: bool = ...,
119 | ) -> DatetimeIndex: ...
120 |
--------------------------------------------------------------------------------
/pandas-stubs/core/tools/numeric.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | Literal,
3 | overload,
4 | )
5 |
6 | import numpy as np
7 | import pandas as pd
8 | from typing_extensions import TypeAlias
9 |
10 | from pandas._libs.lib import NoDefault
11 | from pandas._typing import (
12 | DtypeBackend,
13 | RaiseCoerce,
14 | Scalar,
15 | npt,
16 | )
17 |
18 | _Downcast: TypeAlias = Literal["integer", "signed", "unsigned", "float"] | None
19 |
20 | @overload
21 | def to_numeric(
22 | arg: Scalar,
23 | errors: Literal["raise", "coerce"] = ...,
24 | downcast: _Downcast = ...,
25 | dtype_backend: DtypeBackend | NoDefault = ...,
26 | ) -> float: ...
27 | @overload
28 | def to_numeric(
29 | arg: list | tuple | np.ndarray,
30 | errors: RaiseCoerce = ...,
31 | downcast: _Downcast = ...,
32 | dtype_backend: DtypeBackend | NoDefault = ...,
33 | ) -> npt.NDArray: ...
34 | @overload
35 | def to_numeric(
36 | arg: pd.Series,
37 | errors: RaiseCoerce = ...,
38 | downcast: _Downcast = ...,
39 | dtype_backend: DtypeBackend | NoDefault = ...,
40 | ) -> pd.Series: ...
41 |
--------------------------------------------------------------------------------
/pandas-stubs/core/tools/timedeltas.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Sequence
2 | from datetime import timedelta
3 | from typing import overload
4 |
5 | from pandas import Index
6 | from pandas.core.indexes.timedeltas import TimedeltaIndex
7 | from pandas.core.series import (
8 | Series,
9 | TimedeltaSeries,
10 | )
11 |
12 | from pandas._libs.tslibs import Timedelta
13 | from pandas._libs.tslibs.timedeltas import TimeDeltaUnitChoices
14 | from pandas._typing import (
15 | ArrayLike,
16 | RaiseCoerce,
17 | SequenceNotStr,
18 | )
19 |
20 | @overload
21 | def to_timedelta(
22 | arg: str | float | timedelta,
23 | unit: TimeDeltaUnitChoices | None = ...,
24 | errors: RaiseCoerce = ...,
25 | ) -> Timedelta: ...
26 | @overload
27 | def to_timedelta(
28 | arg: Series,
29 | unit: TimeDeltaUnitChoices | None = ...,
30 | errors: RaiseCoerce = ...,
31 | ) -> TimedeltaSeries: ...
32 | @overload
33 | def to_timedelta(
34 | arg: (
35 | SequenceNotStr
36 | | Sequence[float | timedelta]
37 | | tuple[str | float | timedelta, ...]
38 | | range
39 | | ArrayLike
40 | | Index
41 | ),
42 | unit: TimeDeltaUnitChoices | None = ...,
43 | errors: RaiseCoerce = ...,
44 | ) -> TimedeltaIndex: ...
45 |
--------------------------------------------------------------------------------
/pandas-stubs/core/util/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/core/util/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/core/util/hashing.pyi:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from pandas import (
3 | DataFrame,
4 | Index,
5 | Series,
6 | )
7 |
8 | from pandas._typing import (
9 | ArrayLike,
10 | npt,
11 | )
12 |
13 | def hash_pandas_object(
14 | obj: Index | Series | DataFrame,
15 | index: bool = ...,
16 | encoding: str = ...,
17 | hash_key: str | None = ...,
18 | categorize: bool = ...,
19 | ) -> Series: ...
20 | def hash_array(
21 | vals: ArrayLike, encoding: str = ..., hash_key: str = ..., categorize: bool = ...
22 | ) -> npt.NDArray[np.uint64]: ...
23 |
--------------------------------------------------------------------------------
/pandas-stubs/core/window/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.window.ewm import (
2 | ExponentialMovingWindow as ExponentialMovingWindow,
3 | ExponentialMovingWindowGroupby as ExponentialMovingWindowGroupby,
4 | )
5 | from pandas.core.window.expanding import (
6 | Expanding as Expanding,
7 | ExpandingGroupby as ExpandingGroupby,
8 | )
9 | from pandas.core.window.rolling import (
10 | Rolling as Rolling,
11 | RollingGroupby as RollingGroupby,
12 | Window as Window,
13 | )
14 |
--------------------------------------------------------------------------------
/pandas-stubs/core/window/ewm.pyi:
--------------------------------------------------------------------------------
1 | from pandas import (
2 | DataFrame,
3 | Series,
4 | )
5 | from pandas.core.window.rolling import (
6 | BaseWindow,
7 | BaseWindowGroupby,
8 | )
9 |
10 | from pandas._typing import (
11 | NDFrameT,
12 | WindowingEngine,
13 | WindowingEngineKwargs,
14 | )
15 |
16 | class ExponentialMovingWindow(BaseWindow[NDFrameT]):
17 | def online(
18 | self,
19 | engine: WindowingEngine = ...,
20 | engine_kwargs: WindowingEngineKwargs = ...,
21 | ) -> OnlineExponentialMovingWindow[NDFrameT]: ...
22 | def mean(
23 | self,
24 | numeric_only: bool = ...,
25 | engine: WindowingEngine = ...,
26 | engine_kwargs: WindowingEngineKwargs = ...,
27 | ) -> NDFrameT: ...
28 | def sum(
29 | self,
30 | numeric_only: bool = ...,
31 | engine: WindowingEngine = ...,
32 | engine_kwargs: WindowingEngineKwargs = ...,
33 | ) -> NDFrameT: ...
34 | def std(self, bias: bool = ..., numeric_only: bool = ...) -> NDFrameT: ...
35 | def var(self, bias: bool = ..., numeric_only: bool = ...) -> NDFrameT: ...
36 | def cov(
37 | self,
38 | other: DataFrame | Series | None = ...,
39 | pairwise: bool | None = ...,
40 | bias: bool = ...,
41 | numeric_only: bool = ...,
42 | ) -> NDFrameT: ...
43 | def corr(
44 | self,
45 | other: DataFrame | Series | None = ...,
46 | pairwise: bool | None = ...,
47 | numeric_only: bool = ...,
48 | ) -> NDFrameT: ...
49 |
50 | class ExponentialMovingWindowGroupby(
51 | BaseWindowGroupby[NDFrameT], ExponentialMovingWindow[NDFrameT]
52 | ): ...
53 |
54 | class OnlineExponentialMovingWindow(ExponentialMovingWindow[NDFrameT]):
55 | def reset(self) -> None: ...
56 | def aggregate(self, func, *args, **kwargs): ...
57 | def std(self, bias: bool = ..., *args, **kwargs): ...
58 | def corr(
59 | self,
60 | other: DataFrame | Series | None = ...,
61 | pairwise: bool | None = ...,
62 | numeric_only: bool = ...,
63 | ): ...
64 | def cov(
65 | self,
66 | other: DataFrame | Series | None = ...,
67 | pairwise: bool | None = ...,
68 | bias: bool = ...,
69 | numeric_only: bool = ...,
70 | ): ...
71 | def var(self, bias: bool = ..., numeric_only: bool = ...): ...
72 | def mean(
73 | self, *args, update: NDFrameT | None = ..., update_times: None = ..., **kwargs
74 | ) -> NDFrameT: ...
75 |
--------------------------------------------------------------------------------
/pandas-stubs/core/window/expanding.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.window.rolling import (
2 | BaseWindowGroupby,
3 | RollingAndExpandingMixin,
4 | )
5 |
6 | from pandas._typing import NDFrameT
7 |
8 | class Expanding(RollingAndExpandingMixin[NDFrameT]): ...
9 | class ExpandingGroupby(BaseWindowGroupby[NDFrameT], Expanding[NDFrameT]): ...
10 |
--------------------------------------------------------------------------------
/pandas-stubs/errors/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.computation.ops import UndefinedVariableError as UndefinedVariableError
2 |
3 | from pandas._config.config import OptionError as OptionError
4 |
5 | from pandas._libs.tslibs import (
6 | OutOfBoundsDatetime as OutOfBoundsDatetime,
7 | OutOfBoundsTimedelta as OutOfBoundsTimedelta,
8 | )
9 |
10 | class IntCastingNaNError(ValueError): ...
11 | class NullFrequencyError(ValueError): ...
12 | class PerformanceWarning(Warning): ...
13 | class UnsupportedFunctionCall(ValueError): ...
14 | class UnsortedIndexError(KeyError): ...
15 | class ParserError(ValueError): ...
16 | class DtypeWarning(Warning): ...
17 | class EmptyDataError(ValueError): ...
18 | class ParserWarning(Warning): ...
19 | class MergeError(ValueError): ...
20 | class AccessorRegistrationWarning(Warning): ...
21 |
22 | class AbstractMethodError(NotImplementedError):
23 | def __init__(self, class_instance, methodtype: str = ...) -> None: ...
24 |
25 | class NumbaUtilError(Exception): ...
26 | class DuplicateLabelError(ValueError): ...
27 | class InvalidIndexError(Exception): ...
28 | class DataError(Exception): ...
29 | class SpecificationError(Exception): ...
30 | class SettingWithCopyError(ValueError): ...
31 | class SettingWithCopyWarning(Warning): ...
32 | class NumExprClobberingError(NameError): ...
33 | class IndexingError(Exception): ...
34 | class PyperclipException(RuntimeError): ...
35 |
36 | class PyperclipWindowsException(PyperclipException):
37 | def __init__(self, message) -> None: ...
38 |
39 | class CSSWarning(UserWarning): ...
40 | class PossibleDataLossError(Exception): ...
41 | class ClosedFileError(Exception): ...
42 | class IncompatibilityWarning(Warning): ...
43 | class AttributeConflictWarning(Warning): ...
44 | class DatabaseError(OSError): ...
45 | class PossiblePrecisionLoss(Warning): ...
46 | class ValueLabelTypeMismatch(Warning): ...
47 | class InvalidColumnName(Warning): ...
48 | class CategoricalConversionWarning(Warning): ...
49 |
--------------------------------------------------------------------------------
/pandas-stubs/io/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io import (
2 | formats as formats,
3 | json as json,
4 | stata as stata,
5 | )
6 |
--------------------------------------------------------------------------------
/pandas-stubs/io/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io.clipboards import read_clipboard as read_clipboard
2 | from pandas.io.excel import (
3 | ExcelFile as ExcelFile,
4 | ExcelWriter as ExcelWriter,
5 | read_excel as read_excel,
6 | )
7 | from pandas.io.feather_format import read_feather as read_feather
8 | from pandas.io.gbq import read_gbq as read_gbq
9 | from pandas.io.html import read_html as read_html
10 | from pandas.io.json import read_json as read_json
11 | from pandas.io.orc import read_orc as read_orc
12 | from pandas.io.parquet import read_parquet as read_parquet
13 | from pandas.io.parsers import (
14 | read_csv as read_csv,
15 | read_fwf as read_fwf,
16 | read_table as read_table,
17 | )
18 | from pandas.io.pickle import (
19 | read_pickle as read_pickle,
20 | to_pickle as to_pickle,
21 | )
22 | from pandas.io.pytables import (
23 | HDFStore as HDFStore,
24 | read_hdf as read_hdf,
25 | )
26 | from pandas.io.sas import read_sas as read_sas
27 | from pandas.io.spss import read_spss as read_spss
28 | from pandas.io.sql import (
29 | read_sql as read_sql,
30 | read_sql_query as read_sql_query,
31 | read_sql_table as read_sql_table,
32 | )
33 | from pandas.io.stata import read_stata as read_stata
34 | from pandas.io.xml import read_xml as read_xml
35 |
--------------------------------------------------------------------------------
/pandas-stubs/io/clipboard/__init__.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/io/clipboard/__init__.pyi
--------------------------------------------------------------------------------
/pandas-stubs/io/common.pyi:
--------------------------------------------------------------------------------
1 | from typing import (
2 | IO,
3 | AnyStr,
4 | Generic,
5 | )
6 |
7 | from pandas._typing import CompressionDict
8 |
9 | class IOHandles(Generic[AnyStr]):
10 | handle: IO[AnyStr]
11 | compression: CompressionDict
12 | created_handles: list[IO[AnyStr]]
13 | is_wrapped: bool
14 | def close(self) -> None: ...
15 | def __enter__(self) -> IOHandles[AnyStr]: ...
16 | def __exit__(self, *args: object) -> None: ...
17 | def __init__(self, handle, compression, created_handles, is_wrapped) -> None: ...
18 |
--------------------------------------------------------------------------------
/pandas-stubs/io/excel/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io.excel._base import (
2 | ExcelFile as ExcelFile,
3 | ExcelWriter as ExcelWriter,
4 | read_excel as read_excel,
5 | )
6 |
--------------------------------------------------------------------------------
/pandas-stubs/io/excel/_util.pyi:
--------------------------------------------------------------------------------
1 | def register_writer(klass) -> None: ...
2 | def get_writer(engine_name): ...
3 |
--------------------------------------------------------------------------------
/pandas-stubs/io/feather_format.pyi:
--------------------------------------------------------------------------------
1 | from pandas import DataFrame
2 |
3 | from pandas._libs.lib import NoDefault
4 | from pandas._typing import (
5 | DtypeBackend,
6 | FilePath,
7 | HashableT,
8 | ReadBuffer,
9 | StorageOptions,
10 | )
11 |
12 | def read_feather(
13 | path: FilePath | ReadBuffer[bytes],
14 | columns: list[HashableT] | None = ...,
15 | use_threads: bool = ...,
16 | storage_options: StorageOptions = ...,
17 | dtype_backend: DtypeBackend | NoDefault = ...,
18 | ) -> DataFrame: ...
19 |
--------------------------------------------------------------------------------
/pandas-stubs/io/formats/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io.formats import style as style
2 |
--------------------------------------------------------------------------------
/pandas-stubs/io/formats/css.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/io/formats/css.pyi
--------------------------------------------------------------------------------
/pandas-stubs/io/formats/format.pyi:
--------------------------------------------------------------------------------
1 | class EngFormatter:
2 | ENG_PREFIXES = ...
3 | accuracy = ...
4 | use_eng_prefix = ...
5 | def __init__(
6 | self, accuracy: int | None = ..., use_eng_prefix: bool = ...
7 | ) -> None: ...
8 | def __call__(self, num: float) -> str: ...
9 |
10 | def set_eng_float_format(accuracy: int = ..., use_eng_prefix: bool = ...) -> None: ...
11 |
--------------------------------------------------------------------------------
/pandas-stubs/io/formats/style_render.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Callable,
3 | Sequence,
4 | )
5 | from typing import (
6 | Any,
7 | Literal,
8 | TypedDict,
9 | )
10 |
11 | from jinja2.environment import (
12 | Environment,
13 | Template,
14 | )
15 | from jinja2.loaders import PackageLoader
16 | from pandas import Index
17 | from pandas.core.indexing import _IndexSlice
18 | from typing_extensions import (
19 | Self,
20 | TypeAlias,
21 | )
22 |
23 | from pandas._typing import (
24 | Axis,
25 | HashableT,
26 | Level,
27 | )
28 |
29 | BaseFormatter: TypeAlias = str | Callable[[object], str]
30 | ExtFormatter: TypeAlias = BaseFormatter | dict[Any, BaseFormatter | None]
31 | CSSPair: TypeAlias = tuple[str, str | float]
32 | CSSList: TypeAlias = list[CSSPair]
33 | CSSProperties: TypeAlias = str | CSSList
34 |
35 | class CSSDict(TypedDict):
36 | selector: str
37 | props: CSSProperties
38 |
39 | class StyleExportDict(TypedDict, total=False):
40 | apply: Any
41 | table_attributes: Any
42 | table_styles: Any
43 | hide_index: bool
44 | hide_columns: bool
45 | hide_index_names: bool
46 | hide_column_names: bool
47 | css: dict[str, str | int]
48 |
49 | CSSStyles: TypeAlias = list[CSSDict]
50 | Subset: TypeAlias = _IndexSlice | slice | tuple[slice, ...] | list[HashableT] | Index
51 |
52 | class StylerRenderer:
53 | loader: PackageLoader
54 | env: Environment
55 | template_html: Template
56 | template_html_table: Template
57 | template_html_style: Template
58 | template_latex: Template
59 | def format(
60 | self,
61 | formatter: ExtFormatter | None = ...,
62 | subset: Subset | None = ...,
63 | na_rep: str | None = ...,
64 | precision: int | None = ...,
65 | decimal: str = ...,
66 | thousands: str | None = ...,
67 | escape: str | None = ...,
68 | hyperlinks: Literal["html", "latex"] | None = ...,
69 | ) -> Self: ...
70 | def format_index(
71 | self,
72 | formatter: ExtFormatter | None = ...,
73 | axis: Axis = ...,
74 | level: Level | list[Level] | None = ...,
75 | na_rep: str | None = ...,
76 | precision: int | None = ...,
77 | decimal: str = ...,
78 | thousands: str | None = ...,
79 | escape: str | None = ...,
80 | hyperlinks: Literal["html", "latex"] | None = ...,
81 | ) -> Self: ...
82 | def relabel_index(
83 | self,
84 | labels: Sequence[str] | Index,
85 | axis: Axis = ...,
86 | level: Level | list[Level] | None = ...,
87 | ) -> Self: ...
88 | @property
89 | def columns(self) -> Index[Any]: ...
90 | @property
91 | def index(self) -> Index[Any]: ...
92 |
--------------------------------------------------------------------------------
/pandas-stubs/io/html.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Callable,
3 | Hashable,
4 | Mapping,
5 | Sequence,
6 | )
7 | from re import Pattern
8 | from typing import (
9 | Any,
10 | Literal,
11 | )
12 |
13 | from pandas.core.frame import DataFrame
14 |
15 | from pandas._libs.lib import NoDefault
16 | from pandas._typing import (
17 | DtypeBackend,
18 | FilePath,
19 | HashableT1,
20 | HashableT2,
21 | HashableT3,
22 | HashableT4,
23 | HashableT5,
24 | HTMLFlavors,
25 | ReadBuffer,
26 | StorageOptions,
27 | )
28 |
29 | def read_html(
30 | io: FilePath | ReadBuffer[str],
31 | *,
32 | match: str | Pattern = ...,
33 | flavor: HTMLFlavors | Sequence[HTMLFlavors] | None = ...,
34 | header: int | Sequence[int] | None = ...,
35 | index_col: int | Sequence[int] | list[HashableT1] | None = ...,
36 | skiprows: int | Sequence[int] | slice | None = ...,
37 | attrs: dict[str, str] | None = ...,
38 | parse_dates: (
39 | bool
40 | | Sequence[int]
41 | | list[HashableT2] # Cannot be Sequence[Hashable] to prevent str
42 | | Sequence[Sequence[Hashable]]
43 | | dict[str, Sequence[int]]
44 | | dict[str, list[HashableT3]]
45 | ) = ...,
46 | thousands: str = ...,
47 | encoding: str | None = ...,
48 | decimal: str = ...,
49 | converters: Mapping[int | HashableT4, Callable[[str], Any]] | None = ...,
50 | na_values: (
51 | str | list[str] | dict[HashableT5, str] | dict[HashableT5, list[str]] | None
52 | ) = ...,
53 | keep_default_na: bool = ...,
54 | displayed_only: bool = ...,
55 | extract_links: Literal["header", "footer", "body", "all"] | None = ...,
56 | dtype_backend: DtypeBackend | NoDefault = ...,
57 | storage_options: StorageOptions = ...,
58 | ) -> list[DataFrame]: ...
59 |
--------------------------------------------------------------------------------
/pandas-stubs/io/json/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io.json._json import (
2 | read_json as read_json,
3 | )
4 |
5 | # below are untyped imports so commented out
6 | # to_json as to_json,; ujson_dumps as ujson_dumps,; ujson_loads as ujson_loads,
7 | from pandas.io.json._table_schema import build_table_schema as build_table_schema
8 |
--------------------------------------------------------------------------------
/pandas-stubs/io/json/_normalize.pyi:
--------------------------------------------------------------------------------
1 | from pandas import DataFrame
2 |
3 | from pandas._typing import IgnoreRaise
4 |
5 | def json_normalize(
6 | data: dict | list[dict],
7 | record_path: str | list | None = ...,
8 | meta: str | list[str | list[str]] | None = ...,
9 | meta_prefix: str | None = ...,
10 | record_prefix: str | None = ...,
11 | errors: IgnoreRaise = ...,
12 | sep: str = ...,
13 | max_level: int | None = ...,
14 | ) -> DataFrame: ...
15 |
--------------------------------------------------------------------------------
/pandas-stubs/io/json/_table_schema.pyi:
--------------------------------------------------------------------------------
1 | from pandas import (
2 | DataFrame,
3 | Series,
4 | )
5 |
6 | from pandas._typing import JSONSerializable
7 |
8 | def build_table_schema(
9 | data: DataFrame | Series,
10 | index: bool = ...,
11 | primary_key: bool | None = ...,
12 | version: bool = ...,
13 | ) -> dict[str, JSONSerializable]: ...
14 |
--------------------------------------------------------------------------------
/pandas-stubs/io/orc.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from pandas import DataFrame
4 |
5 | from pandas._libs.lib import NoDefault
6 | from pandas._typing import (
7 | DtypeBackend,
8 | FilePath,
9 | HashableT,
10 | ReadBuffer,
11 | )
12 |
13 | def read_orc(
14 | path: FilePath | ReadBuffer[bytes],
15 | columns: list[HashableT] | None = ...,
16 | dtype_backend: DtypeBackend | NoDefault = ...,
17 | # TODO type with the correct pyarrow types
18 | # filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem
19 | filesystem: Any | None = ...,
20 | **kwargs: Any,
21 | ) -> DataFrame: ...
22 |
--------------------------------------------------------------------------------
/pandas-stubs/io/parquet.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from pandas import DataFrame
4 |
5 | from pandas._typing import (
6 | FilePath,
7 | ParquetEngine,
8 | ReadBuffer,
9 | StorageOptions,
10 | )
11 |
12 | def read_parquet(
13 | path: FilePath | ReadBuffer[bytes],
14 | engine: ParquetEngine = ...,
15 | columns: list[str] | None = ...,
16 | storage_options: StorageOptions = ...,
17 | **kwargs: Any,
18 | ) -> DataFrame: ...
19 |
--------------------------------------------------------------------------------
/pandas-stubs/io/parsers.pyi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/io/parsers.pyi
--------------------------------------------------------------------------------
/pandas-stubs/io/parsers/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io.parsers.readers import (
2 | TextFileReader as TextFileReader,
3 | read_csv as read_csv,
4 | read_fwf as read_fwf,
5 | read_table as read_table,
6 | )
7 |
--------------------------------------------------------------------------------
/pandas-stubs/io/pickle.pyi:
--------------------------------------------------------------------------------
1 | from typing import Any
2 |
3 | from pandas._typing import (
4 | CompressionOptions,
5 | FilePath,
6 | ReadPickleBuffer,
7 | StorageOptions,
8 | WriteBuffer,
9 | )
10 |
11 | def to_pickle(
12 | obj: object,
13 | filepath_or_buffer: FilePath | WriteBuffer[bytes],
14 | compression: CompressionOptions = ...,
15 | protocol: int = ...,
16 | storage_options: StorageOptions = ...,
17 | ) -> None: ...
18 | def read_pickle(
19 | filepath_or_buffer: FilePath | ReadPickleBuffer,
20 | compression: CompressionOptions = ...,
21 | storage_options: StorageOptions = ...,
22 | ) -> Any: ...
23 |
--------------------------------------------------------------------------------
/pandas-stubs/io/sas/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.io.sas.sasreader import read_sas as read_sas
2 |
--------------------------------------------------------------------------------
/pandas-stubs/io/sas/sas7bdat.pyi:
--------------------------------------------------------------------------------
1 | from pandas import DataFrame
2 |
3 | from pandas.io.sas.sasreader import ReaderBase
4 |
5 | class SAS7BDATReader(ReaderBase):
6 | def close(self) -> None: ...
7 | def __next__(self) -> DataFrame: ...
8 | def read(self, nrows: int | None = ...) -> DataFrame: ...
9 |
--------------------------------------------------------------------------------
/pandas-stubs/io/sas/sas_xport.pyi:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | from pandas.io.sas.sasreader import ReaderBase
4 |
5 | class XportReader(ReaderBase):
6 | def close(self) -> None: ...
7 | def __next__(self) -> pd.DataFrame: ...
8 | def read(self, nrows: int | None = ...) -> pd.DataFrame: ...
9 |
--------------------------------------------------------------------------------
/pandas-stubs/io/sas/sasreader.pyi:
--------------------------------------------------------------------------------
1 | from abc import (
2 | ABCMeta,
3 | abstractmethod,
4 | )
5 | from collections.abc import Hashable
6 | from typing import (
7 | Literal,
8 | overload,
9 | )
10 |
11 | from pandas import DataFrame
12 | from typing_extensions import Self
13 |
14 | from pandas._typing import (
15 | CompressionOptions as CompressionOptions,
16 | FilePath as FilePath,
17 | ReadBuffer,
18 | )
19 |
20 | from pandas.io.sas.sas7bdat import SAS7BDATReader
21 | from pandas.io.sas.sas_xport import XportReader
22 |
23 | class ReaderBase(metaclass=ABCMeta):
24 | @abstractmethod
25 | def read(self, nrows: int | None = ...) -> DataFrame: ...
26 | @abstractmethod
27 | def close(self) -> None: ...
28 | def __enter__(self) -> Self: ...
29 | def __exit__(self, exc_type, exc_value, traceback) -> None: ...
30 |
31 | @overload
32 | def read_sas(
33 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
34 | *,
35 | format: Literal["sas7bdat"],
36 | index: Hashable | None = ...,
37 | encoding: str | None = ...,
38 | chunksize: int,
39 | iterator: bool = ...,
40 | compression: CompressionOptions = ...,
41 | ) -> SAS7BDATReader: ...
42 | @overload
43 | def read_sas(
44 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
45 | *,
46 | format: Literal["xport"],
47 | index: Hashable | None = ...,
48 | encoding: str | None = ...,
49 | chunksize: int,
50 | iterator: bool = ...,
51 | compression: CompressionOptions = ...,
52 | ) -> XportReader: ...
53 | @overload
54 | def read_sas(
55 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
56 | *,
57 | format: None = ...,
58 | index: Hashable | None = ...,
59 | encoding: str | None = ...,
60 | chunksize: int,
61 | iterator: bool = ...,
62 | compression: CompressionOptions = ...,
63 | ) -> XportReader | SAS7BDATReader: ...
64 | @overload
65 | def read_sas(
66 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
67 | *,
68 | format: Literal["sas7bdat"],
69 | index: Hashable | None = ...,
70 | encoding: str | None = ...,
71 | chunksize: int | None = ...,
72 | iterator: Literal[True],
73 | compression: CompressionOptions = ...,
74 | ) -> SAS7BDATReader: ...
75 | @overload
76 | def read_sas(
77 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
78 | *,
79 | format: Literal["xport"],
80 | index: Hashable | None = ...,
81 | encoding: str | None = ...,
82 | chunksize: int | None = ...,
83 | iterator: Literal[True],
84 | compression: CompressionOptions = ...,
85 | ) -> XportReader: ...
86 | @overload
87 | def read_sas(
88 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
89 | *,
90 | format: None = ...,
91 | index: Hashable | None = ...,
92 | encoding: str | None = ...,
93 | chunksize: int | None = ...,
94 | iterator: Literal[True],
95 | compression: CompressionOptions = ...,
96 | ) -> XportReader | SAS7BDATReader: ...
97 | @overload
98 | def read_sas(
99 | filepath_or_buffer: FilePath | ReadBuffer[bytes],
100 | *,
101 | format: Literal["xport", "sas7bdat"] | None = ...,
102 | index: Hashable | None = ...,
103 | encoding: str | None = ...,
104 | chunksize: None = ...,
105 | iterator: Literal[False] = ...,
106 | compression: CompressionOptions = ...,
107 | ) -> DataFrame: ...
108 |
--------------------------------------------------------------------------------
/pandas-stubs/io/spss.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.frame import DataFrame
2 |
3 | from pandas._libs.lib import NoDefault
4 | from pandas._typing import (
5 | DtypeBackend,
6 | FilePath,
7 | HashableT,
8 | )
9 |
10 | def read_spss(
11 | path: FilePath,
12 | usecols: list[HashableT] | None = ...,
13 | convert_categoricals: bool = ...,
14 | dtype_backend: DtypeBackend | NoDefault = ...,
15 | ) -> DataFrame: ...
16 |
--------------------------------------------------------------------------------
/pandas-stubs/io/xml.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Sequence
2 |
3 | from pandas.core.frame import DataFrame
4 |
5 | from pandas._libs.lib import NoDefault
6 | from pandas._typing import (
7 | CompressionOptions,
8 | ConvertersArg,
9 | DtypeArg,
10 | DtypeBackend,
11 | FilePath,
12 | ParseDatesArg,
13 | ReadBuffer,
14 | StorageOptions,
15 | XMLParsers,
16 | )
17 |
18 | def read_xml(
19 | path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],
20 | *,
21 | xpath: str = ...,
22 | namespaces: dict[str, str] | None = ...,
23 | elems_only: bool = ...,
24 | attrs_only: bool = ...,
25 | names: Sequence[str] | None = ...,
26 | dtype: DtypeArg | None = ...,
27 | converters: ConvertersArg | None = ...,
28 | parse_dates: ParseDatesArg | None = ...,
29 | # encoding can not be None for lxml and StringIO input
30 | encoding: str | None = ...,
31 | parser: XMLParsers = ...,
32 | stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = ...,
33 | iterparse: dict[str, list[str]] | None = ...,
34 | compression: CompressionOptions = ...,
35 | storage_options: StorageOptions = ...,
36 | dtype_backend: DtypeBackend | NoDefault = ...,
37 | ) -> DataFrame: ...
38 |
--------------------------------------------------------------------------------
/pandas-stubs/plotting/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.plotting._core import (
2 | PlotAccessor as PlotAccessor,
3 | boxplot as boxplot,
4 | )
5 | from pandas.plotting._misc import (
6 | andrews_curves as andrews_curves,
7 | autocorrelation_plot as autocorrelation_plot,
8 | bootstrap_plot as bootstrap_plot,
9 | deregister,
10 | lag_plot as lag_plot,
11 | parallel_coordinates as parallel_coordinates,
12 | plot_params as plot_params,
13 | radviz as radviz,
14 | register,
15 | scatter_matrix as scatter_matrix,
16 | table as table,
17 | )
18 |
19 | deregister_matplotlib_converters = deregister
20 | register_matplotlib_converters = register
21 |
--------------------------------------------------------------------------------
/pandas-stubs/plotting/_misc.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import (
2 | Hashable,
3 | Sequence,
4 | )
5 | from typing import (
6 | Any,
7 | Literal,
8 | )
9 |
10 | from matplotlib.axes import Axes
11 | from matplotlib.colors import Colormap
12 | from matplotlib.figure import Figure
13 | from matplotlib.table import Table
14 | import numpy as np
15 | from pandas.core.frame import DataFrame
16 | from pandas.core.series import Series
17 | from typing_extensions import TypeAlias
18 |
19 | from pandas._typing import (
20 | HashableT,
21 | npt,
22 | )
23 |
24 | _Color: TypeAlias = str | Sequence[float]
25 |
26 | def table(
27 | ax: Axes,
28 | data: DataFrame | Series,
29 | **kwargs,
30 | ) -> Table: ...
31 | def register() -> None: ...
32 | def deregister() -> None: ...
33 | def scatter_matrix(
34 | frame: DataFrame,
35 | alpha: float = ...,
36 | figsize: tuple[float, float] | None = ...,
37 | ax: Axes | None = ...,
38 | grid: bool = ...,
39 | diagonal: Literal["hist", "kde"] = ...,
40 | marker: str = ...,
41 | density_kwds: dict[str, Any] | None = ...,
42 | hist_kwds: dict[str, Any] | None = ...,
43 | range_padding: float = ...,
44 | **kwargs,
45 | ) -> npt.NDArray[np.object_]: ...
46 | def radviz(
47 | frame: DataFrame,
48 | class_column: Hashable,
49 | ax: Axes | None = ...,
50 | color: _Color | Sequence[_Color] | None = ...,
51 | colormap: str | Colormap | None = ...,
52 | **kwds,
53 | ) -> Axes: ...
54 | def andrews_curves(
55 | frame: DataFrame,
56 | class_column: Hashable,
57 | ax: Axes | None = ...,
58 | samples: int = ...,
59 | color: _Color | Sequence[_Color] | None = ...,
60 | colormap: str | Colormap | None = ...,
61 | **kwargs,
62 | ) -> Axes: ...
63 | def bootstrap_plot(
64 | series: Series,
65 | fig: Figure | None = ...,
66 | size: int = ...,
67 | samples: int = ...,
68 | **kwds,
69 | ) -> Figure: ...
70 | def parallel_coordinates(
71 | frame: DataFrame,
72 | class_column: Hashable,
73 | cols: list[HashableT] | None = ...,
74 | ax: Axes | None = ...,
75 | color: _Color | Sequence[_Color] | None = ...,
76 | use_columns: bool = ...,
77 | xticks: Sequence[float] | None = ...,
78 | colormap: str | Colormap | None = ...,
79 | axvlines: bool = ...,
80 | axvlines_kwds: dict[str, Any] | None = ...,
81 | sort_labels: bool = ...,
82 | **kwargs,
83 | ) -> Axes: ...
84 | def lag_plot(series: Series, lag: int = ..., ax: Axes | None = ..., **kwds) -> Axes: ...
85 | def autocorrelation_plot(series: Series, ax: Axes | None = ..., **kwargs) -> Axes: ...
86 |
87 | plot_params: dict[str, Any]
88 |
--------------------------------------------------------------------------------
/pandas-stubs/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/pandas-stubs/py.typed
--------------------------------------------------------------------------------
/pandas-stubs/testing.pyi:
--------------------------------------------------------------------------------
1 | from pandas._testing import (
2 | assert_extension_array_equal as assert_extension_array_equal,
3 | assert_frame_equal as assert_frame_equal,
4 | assert_index_equal as assert_index_equal,
5 | assert_series_equal as assert_series_equal,
6 | )
7 |
8 | __all__ = [
9 | "assert_extension_array_equal",
10 | "assert_frame_equal",
11 | "assert_series_equal",
12 | "assert_index_equal",
13 | ]
14 |
--------------------------------------------------------------------------------
/pandas-stubs/tseries/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.tseries import (
2 | frequencies as frequencies,
3 | offsets as offsets,
4 | )
5 |
--------------------------------------------------------------------------------
/pandas-stubs/tseries/api.pyi:
--------------------------------------------------------------------------------
1 | from pandas.tseries.frequencies import infer_freq as infer_freq
2 |
--------------------------------------------------------------------------------
/pandas-stubs/tseries/frequencies.pyi:
--------------------------------------------------------------------------------
1 | from typing import overload
2 |
3 | from pandas import (
4 | DatetimeIndex,
5 | Series,
6 | TimedeltaIndex,
7 | )
8 |
9 | from pandas._typing import Frequency
10 |
11 | from pandas.tseries.offsets import DateOffset
12 |
13 | def get_period_alias(offset_str: str) -> str | None: ...
14 | @overload
15 | def to_offset(freq: None, is_period: bool = ...) -> None: ...
16 | @overload
17 | def to_offset(freq: Frequency, is_period: bool = ...) -> DateOffset: ...
18 | def infer_freq(index: Series | DatetimeIndex | TimedeltaIndex) -> str | None: ...
19 |
--------------------------------------------------------------------------------
/pandas-stubs/tseries/holiday.pyi:
--------------------------------------------------------------------------------
1 | from collections.abc import Callable
2 | from datetime import (
3 | date as _date,
4 | datetime,
5 | )
6 | from typing import (
7 | Literal,
8 | overload,
9 | )
10 |
11 | import numpy as np
12 | from pandas import (
13 | DatetimeIndex,
14 | Series,
15 | )
16 |
17 | from pandas._libs.tslibs.offsets import BaseOffset
18 | from pandas._libs.tslibs.timestamps import Timestamp
19 |
20 | def next_monday(dt: datetime) -> datetime: ...
21 | def next_monday_or_tuesday(dt: datetime) -> datetime: ...
22 | def previous_friday(dt: datetime) -> datetime: ...
23 | def sunday_to_monday(dt: datetime) -> datetime: ...
24 | def weekend_to_monday(dt: datetime) -> datetime: ...
25 | def nearest_workday(dt: datetime) -> datetime: ...
26 | def next_workday(dt: datetime) -> datetime: ...
27 | def previous_workday(dt: datetime) -> datetime: ...
28 | def before_nearest_workday(dt: datetime) -> datetime: ...
29 | def after_nearest_workday(dt: datetime) -> datetime: ...
30 |
31 | class Holiday:
32 | def __init__(
33 | self,
34 | name: str,
35 | year: int | None = ...,
36 | month: int | None = ...,
37 | day: int | None = ...,
38 | offset: BaseOffset | list[BaseOffset] | None = ...,
39 | observance: Callable[[datetime], datetime] | None = ...,
40 | # Values accepted by Timestamp(), or None:
41 | start_date: (
42 | np.integer | float | str | _date | datetime | np.datetime64 | None
43 | ) = ...,
44 | end_date: (
45 | np.integer | float | str | _date | datetime | np.datetime64 | None
46 | ) = ...,
47 | days_of_week: tuple[int, ...] | None = ...,
48 | ) -> None: ...
49 | @overload
50 | def dates(
51 | self,
52 | start_date: np.integer | float | str | _date | datetime | np.datetime64 | None,
53 | end_date: np.integer | float | str | _date | datetime | np.datetime64 | None,
54 | return_name: Literal[False],
55 | ) -> DatetimeIndex: ...
56 | @overload
57 | def dates(
58 | self,
59 | start_date: np.integer | float | str | _date | datetime | np.datetime64 | None,
60 | end_date: np.integer | float | str | _date | datetime | np.datetime64 | None,
61 | return_name: Literal[True] = ...,
62 | ) -> Series: ...
63 |
64 | holiday_calendars: dict[str, type[AbstractHolidayCalendar]]
65 |
66 | def register(cls: type[AbstractHolidayCalendar]) -> None: ...
67 | def get_calendar(name: str) -> AbstractHolidayCalendar: ...
68 |
69 | class AbstractHolidayCalendar:
70 | rules: list[Holiday]
71 | start_date: Timestamp
72 | end_date: Timestamp
73 |
74 | def __init__(self, name: str = "", rules: list[Holiday] | None = None) -> None: ...
75 | def rule_from_name(self, name: str) -> Holiday | None: ...
76 | @overload
77 | def holidays(
78 | self,
79 | start: datetime | None = ...,
80 | end: datetime | None = ...,
81 | *,
82 | return_name: Literal[True],
83 | ) -> Series: ...
84 | @overload
85 | def holidays(
86 | self,
87 | start: datetime | None = ...,
88 | end: datetime | None = ...,
89 | return_name: Literal[False] = ...,
90 | ) -> DatetimeIndex: ...
91 | @staticmethod
92 | def merge_class(
93 | base: AbstractHolidayCalendar | type[AbstractHolidayCalendar] | list[Holiday],
94 | other: AbstractHolidayCalendar | type[AbstractHolidayCalendar] | list[Holiday],
95 | ) -> list[Holiday]: ...
96 | @overload
97 | def merge(
98 | self,
99 | other: AbstractHolidayCalendar | type[AbstractHolidayCalendar],
100 | inplace: Literal[True],
101 | ) -> None: ...
102 | @overload
103 | def merge(
104 | self,
105 | other: AbstractHolidayCalendar | type[AbstractHolidayCalendar],
106 | inplace: Literal[False] = ...,
107 | ) -> list[Holiday]: ...
108 |
109 | USMemorialDay: Holiday
110 | USLaborDay: Holiday
111 | USColumbusDay: Holiday
112 | USThanksgivingDay: Holiday
113 | USMartinLutherKingJr: Holiday
114 | USPresidentsDay: Holiday
115 | GoodFriday: Holiday
116 | EasterMonday: Holiday
117 |
118 | class USFederalHolidayCalendar(AbstractHolidayCalendar): ...
119 |
120 | def HolidayCalendarFactory(
121 | name: str,
122 | base: type[AbstractHolidayCalendar],
123 | other: type[AbstractHolidayCalendar],
124 | base_class: type[AbstractHolidayCalendar] = ...,
125 | ) -> type[AbstractHolidayCalendar]: ...
126 |
--------------------------------------------------------------------------------
/pandas-stubs/tseries/offsets.pyi:
--------------------------------------------------------------------------------
1 | from pandas._libs.tslibs.offsets import (
2 | FY5253 as FY5253,
3 | BaseOffset as BaseOffset,
4 | BDay as BDay,
5 | BMonthBegin as BMonthBegin,
6 | BMonthEnd as BMonthEnd,
7 | BQuarterBegin as BQuarterBegin,
8 | BQuarterEnd as BQuarterEnd,
9 | BusinessDay as BusinessDay,
10 | BusinessHour as BusinessHour,
11 | BusinessMonthBegin as BusinessMonthBegin,
12 | BusinessMonthEnd as BusinessMonthEnd,
13 | BYearBegin as BYearBegin,
14 | BYearEnd as BYearEnd,
15 | CBMonthBegin as CBMonthBegin,
16 | CBMonthEnd as CBMonthEnd,
17 | CDay as CDay,
18 | CustomBusinessDay as CustomBusinessDay,
19 | CustomBusinessHour as CustomBusinessHour,
20 | CustomBusinessMonthBegin as CustomBusinessMonthBegin,
21 | CustomBusinessMonthEnd as CustomBusinessMonthEnd,
22 | DateOffset as DateOffset,
23 | Day as Day,
24 | Easter as Easter,
25 | FY5253Quarter as FY5253Quarter,
26 | Hour as Hour,
27 | LastWeekOfMonth as LastWeekOfMonth,
28 | Micro as Micro,
29 | Milli as Milli,
30 | Minute as Minute,
31 | MonthBegin as MonthBegin,
32 | MonthEnd as MonthEnd,
33 | Nano as Nano,
34 | QuarterBegin as QuarterBegin,
35 | QuarterEnd as QuarterEnd,
36 | Second as Second,
37 | SemiMonthBegin as SemiMonthBegin,
38 | SemiMonthEnd as SemiMonthEnd,
39 | Tick as Tick,
40 | Week as Week,
41 | WeekOfMonth as WeekOfMonth,
42 | YearBegin as YearBegin,
43 | YearEnd as YearEnd,
44 | )
45 |
--------------------------------------------------------------------------------
/pandas-stubs/util/__init__.pyi:
--------------------------------------------------------------------------------
1 | from pandas.core.util.hashing import (
2 | hash_array as hash_array,
3 | hash_pandas_object as hash_pandas_object,
4 | )
5 |
--------------------------------------------------------------------------------
/pandas-stubs/util/_decorators.pyi:
--------------------------------------------------------------------------------
1 | from pandas._libs.properties import cache_readonly as cache_readonly
2 |
--------------------------------------------------------------------------------
/pandas-stubs/util/_doctools.pyi:
--------------------------------------------------------------------------------
1 | class TablePlotter:
2 | cell_width = ...
3 | cell_height = ...
4 | font_size = ...
5 | def __init__(
6 | self, cell_width: float = ..., cell_height: float = ..., font_size: float = ...
7 | ) -> None: ...
8 | def plot(self, left, right, labels=..., vertical: bool = ...): ...
9 |
--------------------------------------------------------------------------------
/pandas-stubs/util/_exceptions.pyi:
--------------------------------------------------------------------------------
1 | def rewrite_exception(old_name: str, new_name: str): ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/util/_print_versions.pyi:
--------------------------------------------------------------------------------
1 | def show_versions(as_json: bool = ...) -> None: ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/util/_tester.pyi:
--------------------------------------------------------------------------------
1 | def test(extra_args: list[str] | None = ..., run_doctests: bool = ...) -> None: ...
2 |
--------------------------------------------------------------------------------
/pandas-stubs/util/version/__init__.pyi:
--------------------------------------------------------------------------------
1 | def parse(version: str) -> LegacyVersion | Version: ...
2 |
3 | class _BaseVersion:
4 | def __lt__(self, other: _BaseVersion) -> bool: ...
5 | def __le__(self, other: _BaseVersion) -> bool: ...
6 | def __eq__(self, other: object) -> bool: ...
7 | def __ge__(self, other: _BaseVersion) -> bool: ...
8 | def __gt__(self, other: _BaseVersion) -> bool: ...
9 | def __ne__(self, other: object) -> bool: ...
10 |
11 | class LegacyVersion(_BaseVersion):
12 | def __init__(self, version: str) -> None: ...
13 | @property
14 | def public(self) -> str: ...
15 | @property
16 | def base_version(self) -> str: ...
17 | @property
18 | def epoch(self) -> int: ...
19 | @property
20 | def release(self) -> None: ...
21 | @property
22 | def pre(self) -> None: ...
23 | @property
24 | def post(self) -> None: ...
25 | @property
26 | def dev(self) -> None: ...
27 | @property
28 | def local(self) -> None: ...
29 | @property
30 | def is_prerelease(self) -> bool: ...
31 | @property
32 | def is_postrelease(self) -> bool: ...
33 | @property
34 | def is_devrelease(self) -> bool: ...
35 |
36 | class Version(_BaseVersion):
37 | def __init__(self, version: str) -> None: ...
38 | @property
39 | def epoch(self) -> int: ...
40 | @property
41 | def release(self) -> tuple[int, ...]: ...
42 | @property
43 | def pre(self) -> tuple[str, int] | None: ...
44 | @property
45 | def post(self) -> int | None: ...
46 | @property
47 | def dev(self) -> int | None: ...
48 | @property
49 | def local(self) -> str | None: ...
50 | @property
51 | def public(self) -> str: ...
52 | @property
53 | def base_version(self) -> str: ...
54 | @property
55 | def is_prerelease(self) -> bool: ...
56 | @property
57 | def is_postrelease(self) -> bool: ...
58 | @property
59 | def is_devrelease(self) -> bool: ...
60 | @property
61 | def major(self) -> int: ...
62 | @property
63 | def minor(self) -> int: ...
64 | @property
65 | def micro(self) -> int: ...
66 |
--------------------------------------------------------------------------------
/pyrightconfig-strict.json:
--------------------------------------------------------------------------------
1 | {
2 | "typeCheckingMode": "strict",
3 | "stubPath": ".",
4 | "include": ["tests", "pandas-stubs"],
5 | "enableTypeIgnoreComments": false,
6 | "reportUnnecessaryTypeIgnoreComment": true,
7 | "reportMissingModuleSource": true,
8 | "useLibraryCodeForTypes": false
9 | }
10 |
--------------------------------------------------------------------------------
/scripts/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Any
3 |
4 | from loguru import logger
5 |
6 | # Config the format of log message
7 | config: dict[str, Any] = {
8 | "handlers": [
9 | {
10 | "sink": sys.stderr,
11 | "format": (
12 | "\n"
13 | "===========================================\n"
14 | "{message}\n"
15 | "===========================================\n"
16 | ""
17 | ),
18 | }
19 | ]
20 | }
21 |
22 | logger.configure(**config)
23 |
--------------------------------------------------------------------------------
/scripts/_job.py:
--------------------------------------------------------------------------------
1 | from collections import deque
2 | from dataclasses import dataclass
3 | import sys
4 | import time
5 | from typing import (
6 | Callable,
7 | Optional,
8 | )
9 |
10 | from loguru import logger
11 |
12 |
13 | @dataclass
14 | class Step:
15 | name: str
16 | run: Callable[[], None]
17 | rollback: Optional[Callable[[], None]] = None
18 |
19 |
20 | def __rollback_job(steps: deque[Step]):
21 | """
22 | Responsible to run rollback of steps.
23 | """
24 |
25 | while steps:
26 | step = steps.pop()
27 | if step.rollback is not None:
28 | logger.warning(f"Undoing: {step.name}")
29 | try:
30 | step.rollback()
31 | except Exception:
32 | logger.error(
33 | f"Rollback of Step: '{step.name}' failed! The project could be in a unstable mode."
34 | )
35 |
36 |
37 | def run_job(steps: list[Step]) -> None:
38 | """
39 | Responsible to run steps with logs.
40 | """
41 |
42 | rollback_steps = deque[Step]()
43 | failed = False
44 |
45 | for step in steps:
46 | start = time.perf_counter()
47 | logger.info(f"Beginning: '{step.name}'")
48 |
49 | try:
50 | rollback_steps.append(step)
51 | step.run()
52 |
53 | except Exception:
54 | logger.error(f"Step: '{step.name}' failed!")
55 | __rollback_job(rollback_steps)
56 | failed = True
57 |
58 | break
59 |
60 | end = time.perf_counter()
61 | logger.success(f"End: '{step.name}', runtime: {end - start:.3f} seconds.")
62 |
63 | if not failed:
64 | __rollback_job(rollback_steps)
65 |
66 | if failed:
67 | sys.exit(1)
68 |
--------------------------------------------------------------------------------
/scripts/test/__init__.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | from functools import partial
3 | from typing import Literal
4 |
5 | from scripts._job import run_job
6 | from scripts.test import _step
7 |
8 | _SRC_STEPS = [
9 | _step.mypy_src,
10 | _step.ty_src,
11 | _step.pyright_src,
12 | _step.pytest,
13 | _step.style,
14 | ]
15 | _DIST_STEPS = [
16 | _step.build_dist,
17 | _step.install_dist,
18 | _step.rename_src,
19 | _step.mypy_dist,
20 | _step.pyright_dist,
21 | ]
22 |
23 |
24 | def test(
25 | src: bool = False,
26 | dist: bool = False,
27 | type_checker: Literal["", "mypy", "pyright"] = "",
28 | ):
29 | steps = []
30 | if src:
31 | steps.extend(_SRC_STEPS)
32 |
33 | if dist:
34 | steps.extend(_DIST_STEPS)
35 |
36 | if type_checker:
37 | # either pyright or mypy
38 | remove = "mypy" if type_checker == "pyright" else "pyright"
39 | steps = [step for step in steps if remove not in step.name]
40 |
41 | run_job(steps)
42 |
43 |
44 | def stubtest(allowlist: str, check_missing: bool, nightly: bool) -> None:
45 | stubtest = dataclasses.replace(
46 | _step.stubtest,
47 | run=partial(
48 | _step.stubtest.run, allowlist=allowlist, check_missing=check_missing
49 | ),
50 | )
51 | steps = _DIST_STEPS[:2]
52 | if nightly:
53 | steps.append(_step.nightly)
54 | run_job(steps + [stubtest])
55 |
56 |
57 | def pytest(nightly: bool) -> None:
58 | setup_steps = []
59 | pytest_step = _step.pytest
60 | if nightly:
61 | setup_steps = [_step.nightly]
62 | run_job(setup_steps + [pytest_step])
63 |
64 |
65 | def mypy_src(mypy_nightly: bool) -> None:
66 | steps = [_step.mypy_nightly] if mypy_nightly else []
67 | run_job(steps + [_step.mypy_src])
68 |
--------------------------------------------------------------------------------
/scripts/test/_step.py:
--------------------------------------------------------------------------------
1 | from scripts._job import Step
2 | from scripts.test import run
3 |
4 | mypy_src = Step(
5 | name="Run mypy on 'tests' (using the local stubs) and on the local stubs",
6 | run=run.mypy_src,
7 | )
8 | ty_src = Step(
9 | name="Run ty on 'pandas-stubs' (using the local stubs) and on the local stubs",
10 | run=run.ty,
11 | )
12 | pyright_src = Step(
13 | name="Run pyright on 'tests' (using the local stubs) and on the local stubs",
14 | run=run.pyright_src,
15 | )
16 | pyright_src_strict = Step(
17 | name="Run pyright on 'tests' (using the local stubs) and on the local stubs in full strict mode",
18 | run=run.pyright_src_strict,
19 | )
20 | pytest = Step(name="Run pytest", run=run.pytest)
21 | style = Step(name="Run pre-commit", run=run.style)
22 | build_dist = Step(name="Build pandas-stubs", run=run.build_dist)
23 | install_dist = Step(
24 | name="Install pandas-stubs", run=run.install_dist, rollback=run.uninstall_dist
25 | )
26 | rename_src = Step(
27 | name="Rename local stubs",
28 | run=run.rename_src,
29 | rollback=run.restore_src,
30 | )
31 | mypy_dist = Step(
32 | name="Run mypy on 'tests' using the installed stubs", run=run.mypy_dist
33 | )
34 | pyright_dist = Step(
35 | name="Run pyright on 'tests' using the installed stubs", run=run.pyright_dist
36 | )
37 | stubtest = Step(
38 | name="Run stubtest to compare the installed stubs against pandas", run=run.stubtest
39 | )
40 | nightly = Step(
41 | name="Install pandas nightly", run=run.nightly_pandas, rollback=run.released_pandas
42 | )
43 | mypy_nightly = Step(
44 | name="Install mypy nightly", run=run.nightly_mypy, rollback=run.released_mypy
45 | )
46 |
--------------------------------------------------------------------------------
/tests/data/SSHSV1_A.xpt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/tests/data/SSHSV1_A.xpt
--------------------------------------------------------------------------------
/tests/data/airline.sas7bdat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/tests/data/airline.sas7bdat
--------------------------------------------------------------------------------
/tests/data/labelled-num.sav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/tests/data/labelled-num.sav
--------------------------------------------------------------------------------
/tests/data/myhtml_table.tpl:
--------------------------------------------------------------------------------
1 | {% extends "html_style.tpl" %}
2 | {% block style %}
3 |
4 | {{ super() }}
5 | {% endblock style %}
--------------------------------------------------------------------------------
/tests/extension/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/tests/extension/__init__.py
--------------------------------------------------------------------------------
/tests/extension/decimal/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pandas-dev/pandas-stubs/30bbb3da979a3164474bfe9ef8c018cfc66bf793/tests/extension/decimal/__init__.py
--------------------------------------------------------------------------------
/tests/test_extension.py:
--------------------------------------------------------------------------------
1 | import decimal
2 |
3 | import numpy as np
4 | import pandas as pd
5 | from pandas.arrays import IntegerArray
6 | from typing_extensions import assert_type
7 |
8 | from tests import check
9 | from tests.extension.decimal.array import (
10 | DecimalArray,
11 | DecimalDtype,
12 | )
13 |
14 |
15 | def test_constructor() -> None:
16 | arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
17 |
18 | check(assert_type(arr, DecimalArray), DecimalArray, decimal.Decimal)
19 | check(assert_type(arr.dtype, DecimalDtype), DecimalDtype)
20 |
21 |
22 | def test_tolist() -> None:
23 | data = {"State": "Texas", "Population": 2000000, "GDP": "2T"}
24 | s = pd.Series(data)
25 | data1 = [1, 2, 3]
26 | s1 = pd.Series(data1)
27 | check(assert_type(s.array.tolist(), list), list)
28 | check(assert_type(s1.array.tolist(), list), list)
29 | check(assert_type(pd.array([1, 2, 3]).tolist(), list), list)
30 |
31 |
32 | def test_ExtensionArray_reduce_accumulate() -> None:
33 | _data = IntegerArray(
34 | values=np.array([1, 2, 3], dtype=int),
35 | mask=np.array([True, False, False], dtype=bool),
36 | )
37 | check(assert_type(_data._reduce("max"), object), np.integer)
38 | check(assert_type(_data._accumulate("cumsum"), IntegerArray), IntegerArray)
39 |
--------------------------------------------------------------------------------
/tests/test_holidays.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 | import pandas as pd
4 | from typing_extensions import assert_type
5 |
6 | from tests import check
7 |
8 | from pandas.tseries.holiday import (
9 | AbstractHolidayCalendar,
10 | Holiday,
11 | USMemorialDay,
12 | nearest_workday,
13 | )
14 |
15 |
16 | def test_custom_calendar() -> None:
17 | class ExampleCalendar(AbstractHolidayCalendar):
18 | rules = [
19 | USMemorialDay,
20 | Holiday("July 4th", month=7, day=4, observance=nearest_workday),
21 | Holiday(
22 | "Columbus Day",
23 | month=10,
24 | day=1,
25 | offset=pd.DateOffset(weekday=1),
26 | ),
27 | ]
28 |
29 | cal = ExampleCalendar()
30 |
31 | result = cal.holidays(datetime(2012, 1, 1), datetime(2012, 12, 31))
32 | check(assert_type(result, pd.DatetimeIndex), pd.DatetimeIndex)
33 |
--------------------------------------------------------------------------------
/tests/test_interval_index.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import pandas as pd
4 | from typing_extensions import assert_type
5 |
6 | from pandas._typing import IntervalClosedType
7 |
8 | from tests import check
9 |
10 |
11 | def test_from_breaks() -> None:
12 | check(
13 | assert_type(
14 | pd.IntervalIndex.from_breaks([0, 1, 2, 3], name="test"),
15 | "pd.IntervalIndex[pd.Interval[int]]",
16 | ),
17 | pd.IntervalIndex,
18 | pd.Interval,
19 | )
20 | check(
21 | assert_type(
22 | pd.IntervalIndex.from_breaks([0, 1, 2, 3], closed="right", name=123),
23 | "pd.IntervalIndex[pd.Interval[int]]",
24 | ),
25 | pd.IntervalIndex,
26 | pd.Interval,
27 | )
28 |
29 |
30 | def test_from_arrays() -> None:
31 | check(
32 | assert_type(
33 | pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3], name="test"),
34 | "pd.IntervalIndex[pd.Interval[int]]",
35 | ),
36 | pd.IntervalIndex,
37 | pd.Interval,
38 | )
39 | check(
40 | assert_type(
41 | pd.IntervalIndex.from_arrays(
42 | [0, 1, 2], [1, 2, 3], closed="right", name=123
43 | ),
44 | "pd.IntervalIndex[pd.Interval[int]]",
45 | ),
46 | pd.IntervalIndex,
47 | pd.Interval,
48 | )
49 |
50 |
51 | def test_from_tuples() -> None:
52 | check(
53 | assert_type(
54 | pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)], name="test"),
55 | "pd.IntervalIndex[pd.Interval[int]]",
56 | ),
57 | pd.IntervalIndex,
58 | pd.Interval,
59 | )
60 | check(
61 | assert_type(
62 | pd.IntervalIndex.from_tuples(
63 | [(0, 1), (1, 2), (2, 3)], closed="right", name=123
64 | ),
65 | "pd.IntervalIndex[pd.Interval[int]]",
66 | ),
67 | pd.IntervalIndex,
68 | pd.Interval,
69 | )
70 |
71 |
72 | def test_to_tuples() -> None:
73 | ind = pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]).to_tuples()
74 | check(assert_type(ind, pd.Index), pd.Index, tuple)
75 |
76 |
77 | def test_subclass() -> None:
78 | assert issubclass(pd.IntervalIndex, pd.Index)
79 |
80 | def index(test: pd.Index) -> None: ...
81 |
82 | interval_index = pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
83 | index(interval_index)
84 | pd.DataFrame({"a": [1, 2]}, index=interval_index)
85 |
86 |
87 | def test_is_overlapping() -> None:
88 | ind = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
89 | check(assert_type(ind.is_overlapping, bool), bool)
90 |
91 | check(assert_type(ind.closed, IntervalClosedType), str)
92 |
--------------------------------------------------------------------------------
/tests/test_merge.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import numpy as np
4 | import pandas as pd
5 | from typing_extensions import assert_type
6 |
7 | from tests import check
8 |
9 |
10 | def test_types_merge() -> None:
11 | df = pd.DataFrame(data={"col1": [1, 1, 2], "col2": [3, 4, 5]})
12 | df2 = pd.DataFrame(data={"col1": [1, 1, 2], "col2": [0, 1, 0]})
13 | columns = ["col1", "col2"]
14 | df.merge(df2, on=columns)
15 |
16 | check(
17 | assert_type(df.merge(df2, on=pd.Series([1, 2, 3])), pd.DataFrame), pd.DataFrame
18 | )
19 | check(
20 | assert_type(df.merge(df2, on=pd.Index([1, 2, 3])), pd.DataFrame), pd.DataFrame
21 | )
22 | check(
23 | assert_type(df.merge(df2, on=np.array([1, 2, 3])), pd.DataFrame), pd.DataFrame
24 | )
25 |
26 | check(
27 | assert_type(
28 | df.merge(df2, left_on=pd.Series([1, 2, 3]), right_on=pd.Series([1, 2, 3])),
29 | pd.DataFrame,
30 | ),
31 | pd.DataFrame,
32 | )
33 | check(
34 | assert_type(
35 | df.merge(df2, left_on=pd.Index([1, 2, 3]), right_on=pd.Series([1, 2, 3])),
36 | pd.DataFrame,
37 | ),
38 | pd.DataFrame,
39 | )
40 | check(
41 | assert_type(
42 | df.merge(df2, left_on=pd.Index([1, 2, 3]), right_on=pd.Index([1, 2, 3])),
43 | pd.DataFrame,
44 | ),
45 | pd.DataFrame,
46 | )
47 |
48 | check(
49 | assert_type(
50 | df.merge(df2, left_on=np.array([1, 2, 3]), right_on=pd.Series([1, 2, 3])),
51 | pd.DataFrame,
52 | ),
53 | pd.DataFrame,
54 | )
55 | check(
56 | assert_type(
57 | df.merge(df2, left_on=np.array([1, 2, 3]), right_on=pd.Index([1, 2, 3])),
58 | pd.DataFrame,
59 | ),
60 | pd.DataFrame,
61 | )
62 | check(
63 | assert_type(
64 | df.merge(df2, left_on=np.array([1, 2, 3]), right_on=np.array([1, 2, 3])),
65 | pd.DataFrame,
66 | ),
67 | pd.DataFrame,
68 | )
69 |
70 | check(
71 | assert_type(pd.merge(df, df2, on=pd.Series([1, 2, 3])), pd.DataFrame),
72 | pd.DataFrame,
73 | )
74 | check(
75 | assert_type(pd.merge(df, df2, on=pd.Index([1, 2, 3])), pd.DataFrame),
76 | pd.DataFrame,
77 | )
78 | check(
79 | assert_type(pd.merge(df, df2, on=np.array([1, 2, 3])), pd.DataFrame),
80 | pd.DataFrame,
81 | )
82 |
83 | check(
84 | assert_type(
85 | pd.merge(
86 | df, df2, left_on=pd.Series([1, 2, 3]), right_on=pd.Series([1, 2, 3])
87 | ),
88 | pd.DataFrame,
89 | ),
90 | pd.DataFrame,
91 | )
92 | check(
93 | assert_type(
94 | pd.merge(
95 | df, df2, left_on=pd.Index([1, 2, 3]), right_on=pd.Series([1, 2, 3])
96 | ),
97 | pd.DataFrame,
98 | ),
99 | pd.DataFrame,
100 | )
101 | check(
102 | assert_type(
103 | pd.merge(
104 | df, df2, left_on=pd.Index([1, 2, 3]), right_on=pd.Index([1, 2, 3])
105 | ),
106 | pd.DataFrame,
107 | ),
108 | pd.DataFrame,
109 | )
110 |
111 | check(
112 | assert_type(
113 | pd.merge(
114 | df, df2, left_on=np.array([1, 2, 3]), right_on=pd.Series([1, 2, 3])
115 | ),
116 | pd.DataFrame,
117 | ),
118 | pd.DataFrame,
119 | )
120 | check(
121 | assert_type(
122 | pd.merge(
123 | df, df2, left_on=np.array([1, 2, 3]), right_on=pd.Index([1, 2, 3])
124 | ),
125 | pd.DataFrame,
126 | ),
127 | pd.DataFrame,
128 | )
129 | check(
130 | assert_type(
131 | pd.merge(
132 | df, df2, left_on=np.array([1, 2, 3]), right_on=np.array([1, 2, 3])
133 | ),
134 | pd.DataFrame,
135 | ),
136 | pd.DataFrame,
137 | )
138 |
--------------------------------------------------------------------------------
/tests/test_testing.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os.path
4 |
5 | import pandas as pd
6 | from pandas.testing import (
7 | assert_frame_equal,
8 | assert_series_equal,
9 | )
10 | from typing_extensions import assert_type
11 |
12 | from tests import (
13 | TYPE_CHECKING_INVALID_USAGE,
14 | check,
15 | ensure_clean,
16 | )
17 |
18 |
19 | def test_types_assert_series_equal() -> None:
20 | s1 = pd.Series([0, 1, 1, 0])
21 | s2 = pd.Series([0, 1, 1, 0])
22 | assert_series_equal(left=s1, right=s2)
23 | assert_series_equal(
24 | s1,
25 | s2,
26 | check_freq=False,
27 | check_categorical=True,
28 | check_flags=True,
29 | check_datetimelike_compat=True,
30 | )
31 | if TYPE_CHECKING_INVALID_USAGE:
32 | assert_series_equal( # type: ignore[call-overload] # pyright: ignore[reportCallIssue]
33 | s1,
34 | s2,
35 | check_dtype=True,
36 | check_less_precise=True,
37 | check_names=True,
38 | )
39 | assert_series_equal(s1, s2, check_like=True)
40 | # GH 417
41 | assert_series_equal(s1, s2, check_index=False)
42 |
43 |
44 | def test_assert_frame_equal():
45 | df1 = pd.DataFrame({"x": [1, 2, 3]})
46 | df2 = pd.DataFrame({"x": [1, 2, 3]})
47 | # GH 56
48 | assert_frame_equal(df1, df2, check_index_type=False)
49 |
50 |
51 | def test_ensure_clean():
52 | with ensure_clean() as path:
53 | check(assert_type(path, str), str)
54 | pd.DataFrame({"x": [1, 2, 3]}).to_csv(path)
55 | assert not os.path.exists(path)
56 |
--------------------------------------------------------------------------------
/tests/test_utility.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from typing_extensions import assert_type
3 |
4 | from tests import check
5 |
6 |
7 | def test_show_version():
8 | """Test show_versions method types with split case for pandas and python versions."""
9 | check(assert_type(pd.show_versions(True), None), type(None))
10 | check(assert_type(pd.show_versions(False), None), type(None))
11 |
12 |
13 | def test_dummies():
14 | df = pd.DataFrame(
15 | pd.Series(["a", "b", "a", "b", "c", "a", "a"], dtype="category"), columns=["A"]
16 | )
17 | dummies = pd.get_dummies(df)
18 | check(assert_type(dummies, pd.DataFrame), pd.DataFrame)
19 | check(assert_type(pd.from_dummies(dummies), pd.DataFrame), pd.DataFrame)
20 |
21 | df2 = pd.DataFrame(
22 | pd.Series(["a", "b", "a", "b", "c", "a", "a"], dtype="category"),
23 | columns=[("A",)],
24 | )
25 | check(
26 | assert_type(pd.get_dummies(df2, prefix={("A",): "bar"}), pd.DataFrame),
27 | pd.DataFrame,
28 | )
29 |
30 |
31 | def test_get_dummies_args():
32 | df = pd.DataFrame(
33 | {
34 | "A": pd.Series(["a", "b", "a", "b", "c", "a", "a"], dtype="category"),
35 | "B": pd.Series([1, 2, 1, 2, 3, 1, 1]),
36 | }
37 | )
38 | check(
39 | assert_type(
40 | pd.get_dummies(df, prefix="foo", prefix_sep="-", sparse=True), pd.DataFrame
41 | ),
42 | pd.DataFrame,
43 | )
44 | check(
45 | assert_type(
46 | pd.get_dummies(
47 | df, prefix=["foo"], dummy_na=True, drop_first=True, dtype="bool"
48 | ),
49 | pd.DataFrame,
50 | ),
51 | pd.DataFrame,
52 | )
53 | check(
54 | assert_type(
55 | pd.get_dummies(df, prefix={"A": "foo", "B": "baz"}, columns=["A", "B"]),
56 | pd.DataFrame,
57 | ),
58 | pd.DataFrame,
59 | )
60 |
61 |
62 | def test_from_dummies_args():
63 | df = pd.DataFrame(
64 | {
65 | "A": pd.Series(["a", "b", "a", "b", "c", "a", "a"], dtype="category"),
66 | }
67 | )
68 | dummies = pd.get_dummies(df, drop_first=True)
69 |
70 | check(
71 | assert_type(
72 | pd.from_dummies(dummies, sep="_", default_category="a"),
73 | pd.DataFrame,
74 | ),
75 | pd.DataFrame,
76 | )
77 |
--------------------------------------------------------------------------------