├── MANIFEST.in ├── dfsql ├── data_sources │ ├── __init__.py │ └── base_data_source.py ├── engine.py ├── exceptions.py ├── __about__.py ├── config.py ├── cache.py ├── __init__.py ├── table.py ├── extensions.py ├── commands.py ├── utils.py └── functions.py ├── requirements.txt ├── .github └── workflows │ ├── test.yml │ └── pypi.yml ├── .gitignore ├── setup.py ├── README.md ├── tests ├── test_cache.py ├── test_functions.py ├── conftest.py ├── test_extensions.py ├── test_interface.py └── test_data_sources │ └── test_file_data_source.py └── LICENSE /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | include LICENSE 3 | 4 | -------------------------------------------------------------------------------- /dfsql/data_sources/__init__.py: -------------------------------------------------------------------------------- 1 | from dfsql.data_sources.base_data_source import DataSource 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | numpy >= 1.18.5 3 | confi >= 0.0.4.1 4 | mindsdb_sql >= 0.0.17 5 | -------------------------------------------------------------------------------- /dfsql/engine.py: -------------------------------------------------------------------------------- 1 | from dfsql.config import Configuration 2 | 3 | pd = None 4 | if Configuration.USE_MODIN: 5 | import modin.pandas as pd 6 | else: 7 | import pandas as pd 8 | -------------------------------------------------------------------------------- /dfsql/exceptions.py: -------------------------------------------------------------------------------- 1 | class DfsqlException(Exception): 2 | pass 3 | 4 | 5 | class SQLParsingException(DfsqlException): 6 | pass 7 | 8 | 9 | class CommandException(DfsqlException): 10 | pass 11 | 12 | 13 | class QueryExecutionException(DfsqlException): 14 | pass 15 | 16 | -------------------------------------------------------------------------------- /dfsql/__about__.py: -------------------------------------------------------------------------------- 1 | __title__ = 'dfsql' 2 | __package_name__ = 'dfsql' 3 | __version__ = '0.6.8' 4 | __description__ = "SQL interface to Pandas" 5 | __email__ = "jorge@mindsdb.com" 6 | __author__ = 'MindsDB Inc' 7 | __github__ = 'https://github.com/mindsdb/dfsql' 8 | __pypi__ = 'https://pypi.org/project/dfsql' 9 | __license__ = 'GPL-3.0' 10 | __copyright__ = 'Copyright 2020- mindsdb' 11 | -------------------------------------------------------------------------------- /dfsql/config.py: -------------------------------------------------------------------------------- 1 | from confi import BaseEnvironConfig, ConfigField, ConfigError, BooleanConfig 2 | from distutils.util import strtobool 3 | import logging 4 | 5 | 6 | def true_if_modin_installed(): 7 | try: 8 | import modin 9 | logging.info( 10 | "Detected Modin and an explicit USE_MODIN value was not provided. Modin will be used for dfsql operations.") 11 | return True 12 | except ImportError: 13 | return False 14 | 15 | 16 | class Configuration(BaseEnvironConfig): 17 | USE_MODIN = BooleanConfig(default=true_if_modin_installed) 18 | -------------------------------------------------------------------------------- /dfsql/cache.py: -------------------------------------------------------------------------------- 1 | from functools import lru_cache 2 | 3 | 4 | class BaseCache: 5 | def get(self, table): 6 | pass 7 | 8 | def clear(self): 9 | pass 10 | 11 | 12 | class DoNothingCache(BaseCache): 13 | pass 14 | 15 | 16 | class MemoryCache(BaseCache): 17 | def __init__(self, maxsize=None): 18 | decorated_get = lru_cache(maxsize=maxsize)(self.get) 19 | setattr(self, 'get', decorated_get) 20 | 21 | def clear(self): 22 | self.get.cache_clear() 23 | 24 | def get(self, table): 25 | df = table.fetch_and_preprocess() 26 | return df 27 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: run unit tests 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | test: 8 | runs-on: ${{ matrix.os }} 9 | strategy: 10 | matrix: 11 | os: [ubuntu-latest, windows-latest, macos-latest] 12 | python-version: [3.7.1,3.8] 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Set up Python ${{ matrix.python-version }} 16 | uses: actions/setup-python@v2 17 | with: 18 | python-version: ${{ matrix.python-version }} 19 | - name: Install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | pip install --no-cache-dir -e .[test] 23 | - name: Run unit tests 24 | run: | 25 | pytest --capture=tee-sys --timeout=600 26 | shell: bash 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.ipy* 3 | *.test.* 4 | .cache* 5 | storage/* 6 | mindsdb_storage/* 7 | config/personal_config.py 8 | *.jar 9 | data/* 10 | mindsdb.egg-info 11 | clean_data 12 | .pypirc 13 | 14 | # Byte-compiled / optimized / DLL files 15 | __pycache__/ 16 | *.py[cod] 17 | *$py.class 18 | 19 | # Distribution / packaging 20 | .Python 21 | env/ 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | 38 | # visual studio code 39 | .DStore 40 | .DS_Store 41 | .idea 42 | 43 | # virtualenv 44 | .venv 45 | venv/ 46 | ENV/ 47 | 48 | # pyenv 49 | .python-version 50 | 51 | # Installer logs 52 | pip-log.txt 53 | pip-delete-this-directory.txt 54 | 55 | dask-worker-space/ 56 | *.csv 57 | 58 | testdrive_* -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | about = {} 4 | with open("dfsql/__about__.py") as fp: 5 | exec(fp.read(), about) 6 | 7 | 8 | with open('requirements.txt') as req_file: 9 | requirements = req_file.read().splitlines() 10 | 11 | modin_requirement = 'modin[all]==0.11.2' 12 | setuptools.setup( 13 | name=about['__title__'], 14 | version=about['__version__'], 15 | url=about['__github__'], 16 | download_url=about['__pypi__'], 17 | license=about['__license__'], 18 | author=about['__author__'], 19 | author_email=about['__email__'], 20 | description=about['__description__'], 21 | packages=setuptools.find_packages(), 22 | install_requires=requirements, 23 | extras_require=dict( 24 | test=['pytest>=5.4.3', 'requests >= 2.22.0', 'pytest-timeout>=1.4.2'], 25 | modin=[modin_requirement]), 26 | classifiers=[ 27 | "Programming Language :: Python :: 3.6", 28 | "Programming Language :: Python :: 3.7", 29 | "Programming Language :: Python :: 3.8", 30 | "Programming Language :: Python :: 3.9", 31 | "Operating System :: OS Independent", 32 | ], 33 | python_requires=">=3.6" 34 | ) 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # dfsql - SQL interface to Pandas. 2 | 3 | # Installation 4 | ```pip install dfsql``` 5 | 6 | # Example 7 | ``` 8 | >>> import pandas as pd 9 | >>> from dfsql import sql_query 10 | 11 | >>> df = pd.DataFrame({ 12 | ... "animal": ["cat", "dog", "cat", "dog"], 13 | ... "height": [23, 100, 25, 71] 14 | ... }) 15 | >>> df.head() 16 | animal height 17 | 0 cat 23 18 | 1 dog 100 19 | 2 cat 25 20 | 3 dog 71 21 | >>> sql_query("SELECT animal, height FROM animals_df WHERE height > 50", animals_df=df) 22 | animal height 23 | 0 dog 100 24 | 1 dog 71 25 | ``` 26 | 27 | # Quickstart/Tutorial 28 | 29 | Head over to the [testdrive notebook](https://github.com/mindsdb/dfsql/blob/stable/testdrive.ipynb) to see all available features. 30 | 31 | # Configuring Modin usage 32 | 33 | dfsql supports executing queries using Modin for enchanced performance. 34 | 35 | By default Modin will be used if it's installed. 36 | 37 | To override this behavior and use Pandas set the `USE_MODIN` environment variable to `False` or `0` before importing dfsql: 38 | ``` 39 | (venv) user:~/mindsdb/dfsql$ export USE_MODIN=0 40 | (venv) user:~/mindsdb/dfsql$ python 41 | Python 3.8.5 (default, Jan 27 2021, 15:41:15) 42 | [GCC 9.3.0] on linux 43 | Type "help", "copyright", "credits" or "license" for more information. 44 | >>> import dfsql 45 | >>> dfsql.config.Configuration.as_dict() 46 | {'USE_MODIN': 0} 47 | ``` 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /dfsql/__init__.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import os 3 | import shutil 4 | import time 5 | from dfsql.__about__ import __version__ 6 | from dfsql.config import Configuration 7 | from dfsql.exceptions import DfsqlException 8 | from dfsql.data_sources import DataSource 9 | from pandas import DataFrame as PandasDataFrame 10 | 11 | 12 | def sql_query(sql, *args, ds_kwargs=None, custom_functions=None, reduce_output=True, **kwargs): 13 | ds_args = ds_kwargs or {} 14 | custom_functions = custom_functions or {} 15 | from_tables = kwargs 16 | if not from_tables or not isinstance(from_tables, dict): 17 | raise DfsqlException(f"Wrong from_tables value. Expected to be a dict of table names and dataframes, got: {str(from_tables)}") 18 | ds = None 19 | tmpdir = None 20 | try: 21 | tmpdir = os.path.join(tempfile.gettempdir(), 'dfsql_temp_' + str(round(time.time() * 1000000))) 22 | ds = DataSource(*args, metadata_dir=tmpdir, custom_functions=custom_functions, **ds_args) 23 | for table_name, dataframe in from_tables.items(): 24 | if table_name not in sql: 25 | raise DfsqlException(f"Table {table_name} found in from_tables, but not in the SQL query.") 26 | tmp_fpath = os.path.join(tmpdir, f'{table_name}.csv') 27 | PandasDataFrame(dataframe.values, columns=dataframe.columns, index=dataframe.index).to_csv(tmp_fpath, index=False) 28 | ds.add_table_from_file(tmp_fpath) 29 | 30 | result = ds.query(sql, reduce_output=reduce_output) 31 | return result 32 | finally: 33 | if ds: 34 | ds.clear_metadata(ds.metadata_dir) 35 | if tmpdir: 36 | shutil.rmtree(tmpdir) 37 | 38 | -------------------------------------------------------------------------------- /dfsql/table.py: -------------------------------------------------------------------------------- 1 | import re 2 | from dfsql.engine import pd 3 | import numpy as np 4 | import os 5 | 6 | 7 | def preprocess_dataframe(df): 8 | df.index = range(len(df)) 9 | df = df.convert_dtypes() 10 | return df 11 | 12 | 13 | class Table: 14 | def __init__(self, name, *args, cache=None, **kwargs): 15 | self.name = name 16 | self.cache = cache 17 | 18 | def __hash__(self): 19 | return hash(self.name) 20 | 21 | def fetch_dataframe(self): 22 | pass 23 | 24 | def fetch_and_preprocess(self): 25 | df = self.fetch_dataframe() 26 | df = preprocess_dataframe(df) 27 | return df 28 | 29 | @property 30 | def dataframe(self): 31 | if self.cache: 32 | return self.cache.get(self) 33 | 34 | return self.fetch_and_preprocess() 35 | 36 | def to_json(self): 37 | return dict( 38 | type=self.__class__.__name__, 39 | name=self.name, 40 | ) 41 | 42 | @staticmethod 43 | def from_json(json): 44 | cls = { 45 | 'Table': Table, 46 | 'FileTable': FileTable 47 | }[json['type']] 48 | return cls(**json) 49 | 50 | 51 | class FileTable(Table): 52 | def __init__(self, *args, fpath, **kwargs): 53 | super().__init__(*args, **kwargs) 54 | self.fpath = str(fpath) 55 | 56 | def fetch_dataframe(self): 57 | return pd.read_csv(self.fpath) 58 | 59 | @classmethod 60 | def from_file(cls, path): 61 | fpath = os.path.join(path) 62 | fname = '.'.join(os.path.basename(fpath).split('.')[:-1]) 63 | 64 | table = cls(name=fname, fpath=fpath) 65 | df = table.fetch_dataframe() 66 | 67 | return table 68 | 69 | def to_json(self): 70 | json = super().to_json() 71 | json['fpath'] = self.fpath 72 | return json 73 | -------------------------------------------------------------------------------- /tests/test_cache.py: -------------------------------------------------------------------------------- 1 | from dfsql.cache import MemoryCache 2 | 3 | 4 | class TestCache: 5 | def test_cache(self, data_source): 6 | cache = MemoryCache() 7 | data_source.set_cache(cache) 8 | assert data_source.cache is cache 9 | 10 | cache_info = cache.get.cache_info() 11 | assert cache_info.hits == 0 12 | assert cache_info.misses == 0 13 | assert cache_info.currsize == 0 14 | 15 | sql = "SELECT * FROM titanic" 16 | data_source.query(sql) 17 | 18 | cache_info = cache.get.cache_info() 19 | assert cache_info.currsize > 0 20 | assert cache_info.hits == 0 21 | assert cache_info.misses == 1 22 | 23 | data_source.query(sql) 24 | cache_info = cache.get.cache_info() 25 | assert cache_info.currsize > 0 26 | assert cache_info.hits == 1 27 | assert cache_info.misses == 1 28 | 29 | data_source.query(sql) 30 | cache_info = cache.get.cache_info() 31 | assert cache_info.currsize > 0 32 | assert cache_info.hits == 2 33 | assert cache_info.misses == 1 34 | 35 | def test_maxsize(self, data_source): 36 | cache = MemoryCache(maxsize=1) 37 | data_source.set_cache(cache) 38 | assert data_source.cache is cache 39 | 40 | cache_info = cache.get.cache_info() 41 | assert cache_info.hits == 0 42 | assert cache_info.misses == 0 43 | assert cache_info.currsize == 0 44 | 45 | sql = "SELECT * FROM titanic" 46 | data_source.query(sql) 47 | 48 | assert cache_info.hits == 0 49 | assert cache_info.misses == 0 50 | assert cache_info.currsize == 0 51 | 52 | cache = MemoryCache(maxsize=None) 53 | data_source.set_cache(cache) 54 | assert data_source.cache is cache 55 | 56 | sql = "SELECT * FROM titanic" 57 | data_source.query(sql) 58 | cache_info = cache.get.cache_info() 59 | assert cache_info.hits == 0 60 | assert cache_info.misses == 1 61 | assert cache_info.currsize > 0 62 | -------------------------------------------------------------------------------- /tests/test_functions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from dfsql.engine import pd 3 | 4 | from dfsql.exceptions import QueryExecutionException 5 | from dfsql.functions import And 6 | 7 | 8 | class TestFunctionBase: 9 | 10 | def test_and_modin_series(self): 11 | args = [ 12 | pd.Series([True, False]), 13 | pd.Series([True, False]), 14 | ] 15 | 16 | And()(*args) == pd.Series([True, False]).all() 17 | 18 | args = [ 19 | pd.Series([False, False]), 20 | pd.Series([True, False]), 21 | ] 22 | 23 | And()(*args) == pd.Series([False, False]).all() 24 | 25 | def test_and_modin_dataframe(self): 26 | args = [ 27 | pd.DataFrame([[True, False]]), 28 | pd.DataFrame([[True, False]]), 29 | ] 30 | 31 | And()(*args) == pd.DataFrame([[True, False]]).all() 32 | 33 | args = [ 34 | pd.DataFrame([[False, False]]), 35 | pd.DataFrame([[True, False]]), 36 | ] 37 | 38 | And()(*args) == pd.DataFrame([[False, False]]).all() 39 | 40 | def test_and_modin_bools(self): 41 | args = [ 42 | True, 43 | False 44 | ] 45 | 46 | And()(*args) == False 47 | 48 | args = [ 49 | True, 50 | True 51 | ] 52 | 53 | And()(*args) == True 54 | 55 | def test_and_modin_ints(self): 56 | args = [ 57 | 0, 58 | 1 59 | ] 60 | 61 | And()(*args) == False 62 | 63 | args = [ 64 | 1, 65 | 1 66 | ] 67 | 68 | And()(*args) == True 69 | 70 | def test_and_three_args(self): 71 | with pytest.raises(QueryExecutionException): 72 | And()([1, 1, 1]) 73 | 74 | def test_and_invalid_ints(self): 75 | with pytest.raises(QueryExecutionException): 76 | And()([1, 2]) 77 | 78 | def test_and_mixed_args(self): 79 | with pytest.raises(QueryExecutionException): 80 | And()([False, 1]) 81 | 82 | with pytest.raises(QueryExecutionException): 83 | And()([pd.Series([False]), 1]) 84 | 85 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pandas as pd 3 | import requests 4 | import os 5 | 6 | 7 | @pytest.fixture() 8 | def csv_file(tmpdir): 9 | # Titanic dataset first 10 lines of train 10 | p = tmpdir.join('titanic.csv') 11 | content = """passenger_id,survived,p_class,name,sex,age,sib_sp,parch,ticket,fare,cabin,embarked 12 | 1,0,3,"Braund, Mr. Owen Harris",male,22,1,0,A/5 21171,7.25,,S 13 | 2,1,1,"Cumings, Mrs. John Bradley (Florence Briggs Thayer)",female,38,1,0,PC 17599,71.2833,C85,C 14 | 3,1,3,"Heikkinen, Miss. Laina",female,26,0,0,STON/O2. 3101282,7.925,,S 15 | 4,1,1,"Futrelle, Mrs. Jacques Heath (Lily May Peel)",female,35,1,0,113803,53.1,C123,S 16 | 5,0,3,"Allen, Mr. William Henry",male,35,0,0,373450,8.05,,S 17 | 6,0,3,"Moran, Mr. James",male,,0,0,330877,8.4583,,Q 18 | 7,0,1,"McCarthy, Mr. Timothy J",male,54,0,0,17463,51.8625,E46,S 19 | 8,0,3,"Palsson, Master. Gosta Leonard",male,2,3,1,349909,21.075,,S 20 | 9,1,3,"Johnson, Mrs. Oscar W (Elisabeth Vilhelmina Berg)",female,27,0,2,347742,11.1333,,S 21 | """ 22 | p.write_text(content, encoding='utf-8') 23 | return p 24 | 25 | 26 | @pytest.fixture() 27 | def config(monkeypatch): 28 | from dfsql.config import Configuration 29 | 30 | class TestConfig(Configuration): 31 | pass 32 | 33 | TestConfig.USE_MODIN = True 34 | 35 | monkeypatch.setattr('dfsql.config.Configuration', TestConfig) 36 | return TestConfig 37 | 38 | 39 | @pytest.fixture() 40 | def data_source(config, csv_file, tmpdir): 41 | from dfsql import DataSource 42 | dir_path = str(csv_file.dirpath()) 43 | ds = DataSource.from_dir(metadata_dir=str(tmpdir), files_dir_path=dir_path) 44 | return ds 45 | 46 | 47 | @pytest.fixture(scope='session', autouse=True) 48 | def root_directory(request): 49 | """ 50 | :return: 51 | """ 52 | return str(request.config.rootdir) 53 | 54 | 55 | @pytest.fixture(scope='module') 56 | def googleplay_csv(root_directory): 57 | path = os.path.join(root_directory, 'tests', 'googleplaystore.csv') 58 | url = 'https://raw.githubusercontent.com/jasonchang0/kaggle-google-apps/master/google-play-store-apps/googleplaystore.csv' 59 | 60 | if not os.path.exists(path): 61 | req = requests.get(url) 62 | url_content = req.content 63 | csv_file = open(path, 'wb') 64 | 65 | csv_file.write(url_content) 66 | csv_file.close() 67 | 68 | return path 69 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yml: -------------------------------------------------------------------------------- 1 | name: Build and publish to pypi 2 | 3 | on: 4 | push: 5 | branches: 6 | - stable 7 | 8 | jobs: 9 | test: 10 | runs-on: ${{ matrix.os }} 11 | strategy: 12 | matrix: 13 | os: [ubuntu-latest, windows-latest, macos-latest] 14 | python-version: [3.7.1, 3.8] 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python ${{ matrix.python-version }} 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install --no-cache-dir -e .[test] 25 | - name: Run unit tests 26 | run: | 27 | pytest 28 | shell: bash 29 | 30 | deploy_pypi: 31 | runs-on: ubuntu-latest 32 | needs: test 33 | steps: 34 | - uses: actions/checkout@v2 35 | - name: Set up Python 36 | uses: actions/setup-python@v2 37 | with: 38 | python-version: 3.6 39 | - name: Install dependencies 40 | run: | 41 | python -m pip install --upgrade pip 42 | pip install setuptools wheel twine 43 | - name: Build manylinux wheels 44 | run: | 45 | SETUP_PLAT_NAME=linux python3 setup.py sdist bdist_wheel --plat-name manylinux1_x86_64 46 | SETUP_PLAT_NAME=linux python3 setup.py sdist bdist_wheel --plat-name manylinux1_i686 47 | - name: Publish manylinux to PyPI 48 | env: 49 | TWINE_USERNAME: __token__ 50 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 51 | run: | 52 | twine upload dist/*manylinux*.whl 53 | - name: Build and upload regular wheels 54 | env: 55 | TWINE_USERNAME: __token__ 56 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 57 | run: | 58 | python setup.py sdist 59 | twine upload dist/* 60 | test_installation: 61 | needs: deploy_pypi 62 | runs-on: ${{ matrix.os }} 63 | strategy: 64 | matrix: 65 | os: [ ubuntu-latest, windows-latest, macos-latest ] 66 | python-version: [ 3.6, 3.7.1, 3.8 ] 67 | steps: 68 | - name: Set up Python ${{ matrix.python-version }} 69 | uses: actions/setup-python@v2 70 | with: 71 | python-version: ${{ matrix.python-version }} 72 | - name: Install dfsql 73 | run: pip install dfsql 74 | - name: Import dfsql 75 | run: python -c "import dfsql;print(dfsql.__version__)" 76 | -------------------------------------------------------------------------------- /dfsql/extensions.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | from dfsql import sql_query 4 | from dfsql.engine import pd as pd_engine 5 | import warnings 6 | import pandas 7 | from pandas.core.accessor import CachedAccessor 8 | 9 | 10 | @pandas.api.extensions.register_dataframe_accessor("sql") 11 | class SQLAccessor: 12 | def __init__(self, pandas_obj): 13 | self._obj = pd_engine.DataFrame(pandas_obj) 14 | 15 | def maybe_add_from_to_query(self, sql_query, table_name): 16 | """Inserts "FROM temp" into every SELECT clause in query that does not have a FROM clause.""" 17 | sql_query = sql_query.replace("(", " ( ").replace(")", " ) ").replace('\n', ' ').replace(',', ' , ') 18 | 19 | _RE_COMBINE_WHITESPACE = re.compile(r"\s+") 20 | sql_query = _RE_COMBINE_WHITESPACE.sub(" ", sql_query).strip() 21 | 22 | insert_positions = [] 23 | for m in re.finditer('select', sql_query.lower()): 24 | select_pos = m.start() 25 | 26 | str_after_select = sql_query[select_pos:].lower() 27 | words_after_select = str_after_select.split(' ') 28 | 29 | keywords = ['where', 'group', 'having', 'order', 'limit', 'offset'] 30 | need_to_insert_from = True 31 | insert_pos = len(str_after_select) 32 | 33 | parentheses_count = 0 34 | for word in words_after_select: 35 | if word == 'from': 36 | need_to_insert_from = False 37 | break 38 | 39 | if word == '(': 40 | parentheses_count += 1 41 | elif word == ')': 42 | if parentheses_count == 0: 43 | insert_pos = str_after_select.find(word) 44 | break 45 | else: 46 | parentheses_count -= 1 47 | if word in keywords: 48 | insert_pos = str_after_select.find(word) 49 | break 50 | if not need_to_insert_from: 51 | continue 52 | insert_pos = select_pos + insert_pos 53 | 54 | insert_positions.append(insert_pos) 55 | insert_text = f' from {table_name} ' 56 | new_query = '' 57 | last_pos = None 58 | for pos in insert_positions: 59 | new_query += sql_query[last_pos:pos] + insert_text 60 | last_pos = pos 61 | new_query += sql_query[last_pos:] 62 | return new_query 63 | 64 | def __call__(self, sql, *args, **kwargs): 65 | table_name = 'temp' 66 | sql = self.maybe_add_from_to_query(sql, table_name=table_name) 67 | kwargs.update({table_name: self._obj}) 68 | return sql_query(sql, *args, **kwargs) 69 | 70 | try: 71 | import modin.pandas as mpd 72 | 73 | def register_modin_accessor(name, cls): 74 | def decorator(accessor): 75 | if hasattr(cls, name): 76 | warnings.warn( 77 | f"registration of accessor {repr(accessor)} under name " 78 | f"{repr(name)} for type {repr(cls)} is overriding a preexisting " 79 | f"attribute with the same name.", 80 | UserWarning, 81 | stacklevel=2, 82 | ) 83 | 84 | setattr(cls, name, CachedAccessor(name, accessor)) 85 | return accessor 86 | 87 | return decorator 88 | 89 | 90 | def register_modin_dataframe_accessor(name): 91 | from modin.pandas import DataFrame 92 | return register_modin_accessor(name, DataFrame) 93 | 94 | register_modin_dataframe_accessor("sql")(SQLAccessor) 95 | except ImportError: 96 | warnings.warn('Modin not found, dfsql Modin dataframe extensions not loaded', UserWarning) 97 | -------------------------------------------------------------------------------- /tests/test_extensions.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | import numpy as np 4 | from dfsql.exceptions import QueryExecutionException, DfsqlException 5 | 6 | engines = [ 7 | pytest.param(pd, id="pandas"), 8 | ] 9 | try: 10 | import modin.pandas as mpd 11 | engines.append(pytest.param(mpd, id="modin")) 12 | except ImportError: 13 | pass 14 | 15 | @pytest.mark.parametrize( 16 | "engine", 17 | engines 18 | ) 19 | class TestExtensions: 20 | def test_df_sql_simple_select(self, config, engine, csv_file): 21 | import dfsql.extensions 22 | 23 | df = engine.read_csv(csv_file) 24 | 25 | 26 | sql_queries = [ 27 | "SELECT passenger_id FROM temp", 28 | "SELECT passenger_id", 29 | ] 30 | for sql in sql_queries: 31 | query_result = df.sql(sql) 32 | assert query_result.name == 'passenger_id' 33 | 34 | values_left = df['passenger_id'].values 35 | values_right = query_result.values 36 | assert (values_left == values_right).all() 37 | 38 | def test_df_sql_reduce_output(self, config, engine, csv_file): 39 | import dfsql.extensions 40 | df = engine.read_csv(csv_file) 41 | sql = 'SELECT passenger_id LIMIT 1' 42 | 43 | query_result = df.sql(sql) 44 | assert isinstance(query_result, np.int64) 45 | 46 | query_result = df.sql(sql, reduce_output=False) 47 | assert isinstance(query_result, pd.DataFrame) 48 | 49 | def test_df_sql_nested_select_in(self, config, engine, csv_file): 50 | import dfsql.extensions 51 | 52 | df = pd.read_csv(csv_file) 53 | 54 | sql_queries = [ 55 | "SELECT survived, p_class, passenger_id WHERE passenger_id IN (SELECT passenger_id WHERE survived = 1)", 56 | "SELECT survived, p_class, passenger_id FROM temp WHERE passenger_id IN (SELECT passenger_id WHERE survived = 1)", 57 | "SELECT survived, p_class, passenger_id WHERE passenger_id IN (SELECT passenger_id FROM temp WHERE survived = 1)", 58 | "SELECT survived, p_class, passenger_id FROM temp WHERE passenger_id IN (SELECT passenger_id FROM temp WHERE survived = 1)" 59 | ] 60 | 61 | for sql in sql_queries: 62 | query_result = df.sql(sql) 63 | 64 | expected_df = df[df.survived == 1][['survived', 'p_class', 'passenger_id']] 65 | 66 | assert query_result.shape == expected_df.shape 67 | values_left = expected_df.dropna().values 68 | values_right = query_result.dropna().values 69 | assert (values_left == values_right).all() 70 | 71 | def test_df_sql_nested_select_from(self, config, engine, csv_file): 72 | import dfsql.extensions 73 | 74 | df = pd.read_csv(csv_file)[['passenger_id', 'fare']] 75 | sql_queries = [ 76 | "SELECT * FROM (SELECT passenger_id, fare FROM temp) as t1", 77 | "SELECT * FROM (SELECT passenger_id, fare) as t1", 78 | ] 79 | 80 | for sql in sql_queries: 81 | query_result = df.sql(sql) 82 | 83 | assert query_result.shape == df.shape 84 | values_left = df.dropna().values 85 | values_right = query_result.dropna().values 86 | 87 | assert (values_left == values_right).all() 88 | 89 | def test_df_sql_groupby(self, config, engine, csv_file): 90 | import dfsql.extensions 91 | 92 | df = pd.read_csv(csv_file) 93 | expected_out = df['survived'].nunique() 94 | sql = "SELECT COUNT(DISTINCT survived) as uniq_survived" 95 | 96 | query_result = df.sql(sql) 97 | 98 | assert query_result == expected_out 99 | 100 | 101 | -------------------------------------------------------------------------------- /dfsql/commands.py: -------------------------------------------------------------------------------- 1 | from dfsql.engine import pd 2 | import re 3 | from dfsql.exceptions import CommandException 4 | 5 | 6 | class Command: 7 | name = None 8 | default_args = {} 9 | 10 | def __init__(self, args): 11 | self.args = self.substitute_defaults(args) 12 | self.validate_args(self.args) 13 | 14 | def validate_args(self, args): 15 | pass 16 | 17 | def substitute_defaults(self, args): 18 | if args: 19 | for i, arg in enumerate(args): 20 | if arg is None and self.default_args.get(i): 21 | args[i] = self.default_args[i] 22 | return args 23 | 24 | @classmethod 25 | def from_string(cls, text): 26 | return None 27 | 28 | def execute(self, data_source): 29 | pass 30 | 31 | 32 | class CreateTableCommand(Command): 33 | name = 'CREATE TABLE' 34 | default_args = {} 35 | 36 | def validate_args(self, args): 37 | if len(args) > 2: 38 | raise CommandException(f"Too many arguments for command {self.name}") 39 | 40 | if not isinstance(args[0], str): 41 | raise CommandException(f"First argument must be a file path, got instead: {args[0]}.") 42 | 43 | if len(args) > 1 and not isinstance(args[1], bool): 44 | raise CommandException(f"Second argument (clean_data) must be a boolean, got instead: {args[1]}") 45 | 46 | @classmethod 47 | def from_string(cls, text): 48 | if not text.startswith(cls.name): 49 | return None 50 | 51 | pattern = r'^CREATE TABLE \((\S+)?\);?$' 52 | 53 | matches = re.match(pattern, text) 54 | if not matches: 55 | return None 56 | args = [(arg.strip(' \'\"') if arg is not None else None) for arg in matches.groups()] 57 | args[0] = str(args[0]) 58 | return cls(args) 59 | 60 | def execute(self, data_source): 61 | fpath = self.args[0] 62 | data_source.add_table_from_file(fpath) 63 | return 'OK' 64 | 65 | 66 | class DropTableCommand(Command): 67 | name = 'DROP TABLE' 68 | 69 | def validate_args(self, args): 70 | if not isinstance(args[0], str): 71 | raise CommandException(f"Expected only argument for {self.name} to be a string table name, got instead: {args[0]}.") 72 | 73 | @classmethod 74 | def from_string(cls, text): 75 | if not text.startswith(cls.name): 76 | return None 77 | 78 | pattern = r'^DROP TABLE (\S+);?$' 79 | 80 | matches = re.match(pattern, text) 81 | if not matches: 82 | return None 83 | args = [(arg.strip(' \'\"') if arg is not None else None) for arg in matches.groups()] 84 | args[0] = str(args[0]) 85 | return cls(args) 86 | 87 | def execute(self, data_source): 88 | name = self.args[0] 89 | data_source.drop_table(name) 90 | return 'OK' 91 | 92 | 93 | class ShowTablesCommand(Command): 94 | name = 'SHOW TABLES' 95 | 96 | def validate_args(self, args): 97 | if args: 98 | raise CommandException(f"No arguments expected for command {self.name}") 99 | 100 | @classmethod 101 | def from_string(cls, text): 102 | if not text.startswith(cls.name): 103 | return None 104 | 105 | pattern = r'^SHOW TABLES\s*;?$' 106 | 107 | matches = re.match(pattern, text) 108 | if not matches: 109 | return None 110 | args = None 111 | return cls(args) 112 | 113 | def execute(self, data_source): 114 | rows = [] 115 | for tname, table in data_source.tables.items(): 116 | rows.append((table.name, table.fpath)) 117 | return pd.DataFrame(rows, columns=['name', 'fpath']) 118 | 119 | 120 | command_types = [CreateTableCommand, DropTableCommand, ShowTablesCommand] 121 | 122 | 123 | def try_parse_command(sql_query): 124 | for command_type in command_types: 125 | command = command_type.from_string(sql_query) 126 | 127 | if command: 128 | return command 129 | -------------------------------------------------------------------------------- /dfsql/utils.py: -------------------------------------------------------------------------------- 1 | from dfsql.engine import pd 2 | from dfsql.exceptions import QueryExecutionException 3 | 4 | 5 | def raise_bad_inputs(func): 6 | raise QueryExecutionException(f'Invalid inputs for function {func.name}') 7 | 8 | 9 | def raise_bad_outputs(func): 10 | raise QueryExecutionException(f'Invalid outputs produced by function {func.name}') 11 | 12 | 13 | def is_modin(thing): 14 | return (isinstance(thing, pd.Series) or isinstance(thing, pd.DataFrame)) 15 | 16 | 17 | def is_booly(thing): 18 | if ((is_modin(thing)) 19 | or isinstance(thing, bool) 20 | or (int(thing) in (0, 1))): 21 | return True 22 | return False 23 | 24 | 25 | def is_numeric(thing): 26 | if ((is_modin(thing) and thing.dtype.name != 'object') 27 | or isinstance(thing, int) or isinstance(thing, float)): 28 | return True 29 | return False 30 | 31 | 32 | def is_stringy(thing): 33 | if ((is_modin(thing) and thing.dtype.name in ('string', 'object')) 34 | or isinstance(thing, str)): 35 | return True 36 | return False 37 | 38 | 39 | class TwoArgsMixin: 40 | def assert_args(self, args): 41 | if len(args) != 2: 42 | raise_bad_inputs(self) 43 | 44 | 45 | class OneArgMixin: 46 | def assert_args(self, args): 47 | if len(args) != 1: 48 | raise_bad_inputs(self) 49 | 50 | 51 | class BoolInputMixin: 52 | def assert_args(self, args): 53 | if not all([is_booly(arg) for arg in args]): 54 | raise_bad_inputs(self) 55 | 56 | 57 | class BoolOutputMixin: 58 | def assert_output(self, output): 59 | if not is_booly(output): 60 | raise_bad_outputs(self) 61 | 62 | 63 | class NumericInputMixin: 64 | def assert_args(self, args): 65 | if not all([is_numeric(arg) for arg in args]): 66 | raise_bad_inputs(self) 67 | 68 | 69 | class NumericOutputMixin: 70 | def assert_output(self, output): 71 | if not is_numeric(output): 72 | raise_bad_outputs(self) 73 | 74 | 75 | class StringInputMixin: 76 | def assert_args(self, args): 77 | if not all([is_stringy(arg) for arg in args]): 78 | raise_bad_inputs(self) 79 | 80 | 81 | class StringOutputMixin: 82 | def assert_output(self, output): 83 | if not is_stringy(output): 84 | raise_bad_outputs(self) 85 | 86 | 87 | class CaseInsensitiveKey(str): 88 | def __init__(self, key): 89 | self.key = key 90 | 91 | def __hash__(self): 92 | return hash(self.key.lower()) 93 | 94 | def __eq__(self, other): 95 | if isinstance(other, CaseInsensitiveKey): 96 | return self.key.lower() == other.key.lower() 97 | elif isinstance(other, str): 98 | return self.key.lower() == other.lower() 99 | 100 | def __str__(self): 101 | return self.key 102 | 103 | def __repr__(self): 104 | return self.key.__repr__() 105 | 106 | 107 | """https://stackoverflow.com/questions/2082152/case-insensitive-dictionary""" 108 | 109 | 110 | class CaseInsensitiveDict(dict): 111 | @classmethod 112 | def _k(cls, key): 113 | return key.lower() if isinstance(key, str) else key 114 | 115 | def __init__(self, *args, **kwargs): 116 | super(CaseInsensitiveDict, self).__init__(*args, **kwargs) 117 | self._convert_keys() 118 | 119 | def __getitem__(self, key): 120 | return super(CaseInsensitiveDict, self).__getitem__(self.__class__._k(key)) 121 | 122 | def __setitem__(self, key, value): 123 | super(CaseInsensitiveDict, self).__setitem__(self.__class__._k(key), value) 124 | 125 | def __delitem__(self, key): 126 | return super(CaseInsensitiveDict, self).__delitem__(self.__class__._k(key)) 127 | 128 | def __contains__(self, key): 129 | return super(CaseInsensitiveDict, self).__contains__(self.__class__._k(key)) 130 | 131 | def has_key(self, key): 132 | return super(CaseInsensitiveDict, self).has_key(self.__class__._k(key)) 133 | 134 | def pop(self, key, *args, **kwargs): 135 | return super(CaseInsensitiveDict, self).pop(self.__class__._k(key), *args, **kwargs) 136 | 137 | def get(self, key, *args, **kwargs): 138 | return super(CaseInsensitiveDict, self).get(self.__class__._k(key), *args, **kwargs) 139 | 140 | def setdefault(self, key, *args, **kwargs): 141 | return super(CaseInsensitiveDict, self).setdefault(self.__class__._k(key), *args, **kwargs) 142 | 143 | def update(self, E={}, **F): 144 | super(CaseInsensitiveDict, self).update(self.__class__(E)) 145 | super(CaseInsensitiveDict, self).update(self.__class__(**F)) 146 | 147 | def _convert_keys(self): 148 | for k in list(self.keys()): 149 | v = super(CaseInsensitiveDict, self).pop(k) 150 | self.__setitem__(k, v) 151 | 152 | 153 | def pd_get_column_case_insensitive(df, column): 154 | column_names = df.columns 155 | series = [df[c] for c in column_names] 156 | cols_dict = CaseInsensitiveDict(dict(zip(column_names, series))) 157 | return cols_dict.get(column) 158 | 159 | 160 | def get_df_column(df, column, case_sensitive): 161 | if case_sensitive: 162 | if column in df.columns: 163 | return df[column] 164 | else: 165 | column = pd_get_column_case_insensitive(df, column) 166 | if column is not None: 167 | return column 168 | -------------------------------------------------------------------------------- /tests/test_interface.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | from dfsql.exceptions import QueryExecutionException, DfsqlException 5 | 6 | 7 | class TestQuickInterface: 8 | def test_simple_select(self, csv_file): 9 | from dfsql.extensions import sql_query 10 | 11 | df = pd.read_csv(csv_file) 12 | 13 | sql = "SELECT passenger_id FROM whatever_table AS new_table" 14 | 15 | query_result = sql_query(sql, whatever_table=df) 16 | assert query_result.name == 'passenger_id' 17 | values_left = df['passenger_id'].values 18 | values_right = query_result.values 19 | assert (values_left == values_right).all() 20 | 21 | # Run query again to ensure that everything was cleaned up properly 22 | query_result = sql_query(sql, whatever_table=df) 23 | assert query_result.name == 'passenger_id' 24 | values_left = df['passenger_id'].values 25 | values_right = query_result.values 26 | assert (values_left == values_right).all() 27 | 28 | def test_select_join(self, csv_file): 29 | from dfsql.extensions import sql_query 30 | df = pd.read_csv(csv_file) 31 | merge_df = pd.merge(df, df, how='inner', left_on=['passenger_id'], right_on=['p_class'])[ 32 | ['passenger_id_x', 'p_class_y']] 33 | merge_df.columns = ['passenger_id', 'p_class'] 34 | 35 | # Use one table for self join 36 | sql = "SELECT passenger_id, p_class FROM titanic AS t1 INNER JOIN titanic AS t2 ON t1.passenger_id = t2.p_class" 37 | query_result = sql_query(sql, titanic=df) 38 | 39 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 40 | values_left = merge_df[['passenger_id', 'p_class']].values 41 | values_right = query_result.values 42 | assert (values_left == values_right).all().all() 43 | 44 | # Use two separate tables 45 | sql = "SELECT passenger_id, p_class FROM t1 INNER JOIN t2 ON t1.passenger_id = t2.p_class" 46 | query_result = sql_query(sql, t1=df, t2=df) 47 | 48 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 49 | values_left = merge_df[['passenger_id', 'p_class']].values 50 | values_right = query_result.values 51 | assert (values_left == values_right).all().all() 52 | 53 | def test_error_table_not_found(self, csv_file): 54 | from dfsql.extensions import sql_query 55 | df = pd.read_csv(csv_file) 56 | 57 | sql = "SELECT passenger_id FROM whatever_table INNER JOIN missing_table ON id" 58 | with pytest.raises(QueryExecutionException): 59 | sql_query(sql, whatever_table=df) 60 | 61 | def test_error_wrong_table_name(self, csv_file): 62 | from dfsql.extensions import sql_query 63 | 64 | df = pd.read_csv(csv_file) 65 | 66 | sql = "SELECT passenger_id FROM whatever_table" 67 | 68 | with pytest.raises(DfsqlException): 69 | sql_query(sql, wrong_table=df) 70 | 71 | # Run again to make sure it works after a failure 72 | query_result = sql_query(sql, whatever_table=df) 73 | assert query_result.name == 'passenger_id' 74 | values_left = df['passenger_id'].values 75 | values_right = query_result.values 76 | assert (values_left == values_right).all() 77 | 78 | def test_error_no_tables(self): 79 | from dfsql.extensions import sql_query 80 | sql = "SELECT passenger_id FROM whatever_table" 81 | 82 | with pytest.raises(DfsqlException): 83 | sql_query(sql, None) 84 | 85 | with pytest.raises(DfsqlException): 86 | sql_query(sql, something={}) 87 | 88 | with pytest.raises(DfsqlException): 89 | sql_query(sql, something=[]) 90 | 91 | def test_error_extra_tables(self, csv_file): 92 | from dfsql.extensions import sql_query 93 | df = pd.read_csv(csv_file) 94 | sql = "SELECT passenger_id FROM whatever_table" 95 | 96 | with pytest.raises(DfsqlException): 97 | sql_query(sql, whatever_table=df, extra_table=df) 98 | 99 | def test_custom_functions(self, csv_file): 100 | from dfsql.extensions import sql_query 101 | df = pd.read_csv(csv_file) 102 | sql = "SELECT sex, mode(survived) AS mode_survived FROM titanic GROUP BY sex" 103 | 104 | func = lambda x: x.value_counts(dropna=False).index[0] 105 | 106 | query_result = sql_query(sql, titanic=df, custom_functions={'mode': func}) 107 | 108 | df = df.groupby(['sex']).agg({'survived': func}).reset_index() 109 | df.columns = ['sex', 'mode_survived'] 110 | 111 | assert (query_result.columns == df.columns).all() 112 | assert query_result.shape == df.shape 113 | 114 | values_left = df.values 115 | values_right = query_result.values 116 | assert (values_left == values_right).all().all() 117 | 118 | def test_caps_column_names_dataframe(self, tmpdir): 119 | from dfsql.extensions import sql_query 120 | 121 | csv = """ 122 | ROUTE,DATE,RIDES 123 | 2,2021-02-27,3626 124 | 2,2021-02-28,5012 125 | """ 126 | 127 | p = tmpdir.join('caps_df.csv') 128 | p.write_text(csv, encoding='utf-8') 129 | 130 | df = pd.read_csv(p) 131 | sql = """ 132 | SELECT `DATE` AS __timestamp, 133 | AVG(`RIDES`) AS `AVG(RIDES)` 134 | FROM tab 135 | GROUP BY `DATE` 136 | ORDER BY `AVG(RIDES)` DESC 137 | """ 138 | 139 | expected_output = df.groupby(['DATE']).agg({'RIDES': 'mean'}).reset_index() 140 | expected_output = expected_output.sort_values(by='RIDES', ascending=False) 141 | expected_output.columns = ['__timestamp', '`AVG(RIDES)`'] 142 | 143 | query_result = sql_query(sql, tab=df) 144 | assert query_result.shape == expected_output.shape 145 | values_left = expected_output.dropna().values 146 | values_right = query_result.dropna().values 147 | 148 | assert (values_left == values_right).all() 149 | -------------------------------------------------------------------------------- /dfsql/functions.py: -------------------------------------------------------------------------------- 1 | import re 2 | from dfsql.engine import pd 3 | from collections import Iterable 4 | 5 | from dfsql.utils import (is_modin, is_numeric, is_booly, is_stringy, raise_bad_inputs, raise_bad_outputs, 6 | TwoArgsMixin, OneArgMixin, StringInputMixin, NumericInputMixin, BoolInputMixin, 7 | BoolOutputMixin, StringOutputMixin, NumericOutputMixin) 8 | 9 | 10 | class BaseFunction: 11 | name = None 12 | 13 | # Fixes an issue with Modin internals trying to get the __name__ of aggregation functions 14 | @property 15 | def __name__(self): 16 | return self.name 17 | 18 | def assert_args(self, args): 19 | super().assert_args(args) 20 | 21 | def assert_output(self, out): 22 | super().assert_output(out) 23 | 24 | def get_output(self, args): 25 | return None 26 | 27 | def __call__(self, *args): 28 | self.assert_args(args) 29 | output = self.get_output(args) 30 | self.assert_output(output) 31 | return output 32 | 33 | # Explanation on how this function definition works: 34 | # https://stackoverflow.com/a/40187463/1571481 35 | 36 | # Boolean functions 37 | 38 | 39 | class And(BaseFunction, TwoArgsMixin, BoolInputMixin, BoolOutputMixin): 40 | name = 'and' 41 | 42 | def get_output(self, args): 43 | if is_modin(args[0]) and is_modin(args[1]): 44 | return (args[0] * args[1]).astype(bool) 45 | return args[0] and args[1] 46 | 47 | 48 | class Or(BaseFunction, TwoArgsMixin, BoolInputMixin, BoolOutputMixin): 49 | name = 'or' 50 | 51 | def get_output(self, args): 52 | if is_modin(args[0]) and is_modin(args[1]): 53 | return (args[0] + args[1]).astype(bool) 54 | return args[0] or args[1] 55 | 56 | 57 | class Not(BaseFunction, BoolOutputMixin): 58 | name = 'not' 59 | 60 | def assert_args(self, args): 61 | if len(args) != 1: 62 | raise_bad_inputs(self) 63 | 64 | if not (is_modin(args[0]) 65 | or isinstance(args[0], bool) 66 | or (args[0] in (0, 1))): 67 | raise_bad_inputs(self) 68 | 69 | def get_output(self, args): 70 | if is_modin(args[0]): 71 | return ~args[0] 72 | return not args[0] 73 | 74 | 75 | class Is(BaseFunction, TwoArgsMixin, BoolOutputMixin): 76 | name = 'is' 77 | 78 | def get_output(self, args): 79 | if is_modin(args[0]) or is_modin(args[1]): 80 | if args[0] is None or args[1] is None: 81 | # IS NULL 82 | target = args[0] 83 | if args[0] is None: 84 | target = args[1] 85 | 86 | return pd.isnull(target) 87 | elif args[0] is True or args[0] is False or args[1] is True or args[1] is False and is_modin(args[0]): 88 | # IS [TRUE|FALSE] 89 | return args[0] == args[1] 90 | 91 | return args[0] is args[1] 92 | 93 | 94 | class IsNot(Is): 95 | name = 'is not' 96 | 97 | def get_output(self, args): 98 | out = super().get_output(args) 99 | if is_modin(out): 100 | return ~out 101 | else: 102 | return not out 103 | 104 | 105 | class Equals(BaseFunction, TwoArgsMixin, BoolOutputMixin): 106 | name = '=' 107 | 108 | def get_output(self, args): 109 | return args[0] == args[1] 110 | 111 | 112 | class NotEquals(BaseFunction, TwoArgsMixin, BoolOutputMixin): 113 | name = '!=' 114 | 115 | def get_output(self, args): 116 | return args[0] != args[1] 117 | 118 | 119 | class Greater(BaseFunction, TwoArgsMixin, BoolOutputMixin): 120 | name = '>' 121 | 122 | def get_output(self, args): 123 | return args[0] > args[1] 124 | 125 | 126 | class GreaterEqual(BaseFunction, TwoArgsMixin, BoolOutputMixin): 127 | name = '>=' 128 | 129 | def get_output(self, args): 130 | return args[0] >= args[1] 131 | 132 | 133 | class Less(BaseFunction, TwoArgsMixin, BoolOutputMixin): 134 | name = '<' 135 | 136 | def get_output(self, args): 137 | return args[0] < args[1] 138 | 139 | 140 | class LessEqual(BaseFunction, TwoArgsMixin, BoolOutputMixin): 141 | name = '<=' 142 | 143 | def get_output(self, args): 144 | return args[0] <= args[1] 145 | 146 | 147 | class In(BaseFunction, BoolOutputMixin): 148 | name = 'in' 149 | 150 | def assert_args(self, args): 151 | if not isinstance(args[1], Iterable): 152 | raise_bad_inputs(self) 153 | 154 | def get_output(self, args): 155 | if is_modin(args[0]): 156 | return args[0].isin(args[1].values) 157 | return args[0] in args[1] 158 | 159 | 160 | # class IsNull(BaseFunction, OneArgMixin, BoolOutputMixin): 161 | # name = 'is null' 162 | # 163 | # def get_output(self, args): 164 | # return pd.isnull(args[0]) 165 | # 166 | # 167 | # class IsNotNull(BaseFunction, OneArgMixin, BoolOutputMixin): 168 | # name = 'is not null' 169 | # 170 | # def get_output(self, args): 171 | # return ~pd.isnull(args[0]) 172 | # 173 | # 174 | # class IsTrue(BaseFunction, OneArgMixin, BoolOutputMixin): 175 | # name = 'is true' 176 | # 177 | # def get_output(self, args): 178 | # if is_modin(args[0]): 179 | # return args[0] == True 180 | # return args[0] is True 181 | # 182 | # 183 | # class IsFalse(BaseFunction, OneArgMixin, BoolOutputMixin): 184 | # name = 'is false' 185 | # 186 | # def get_output(self, args): 187 | # if is_modin(args[0]): 188 | # return args[0] == False 189 | # return args[0] is False 190 | 191 | # Arithmetic functions 192 | 193 | 194 | class Plus(BaseFunction, TwoArgsMixin, NumericInputMixin, NumericOutputMixin): 195 | name = '+' 196 | 197 | def get_output(self, args): 198 | return pd.to_numeric(args[0] + args[1]) 199 | 200 | 201 | class Minus(BaseFunction, NumericOutputMixin): 202 | name = '-' 203 | 204 | def assert_args(self, args): 205 | if not (len(args) == 1 or len(args) == 2): 206 | raise_bad_inputs(self) 207 | 208 | if len(args) == 2: 209 | if not ((is_modin(args[0]) and is_modin(args[1])) 210 | or (is_numeric(args[0]) and is_numeric(args[1]))): 211 | raise_bad_inputs(self) 212 | 213 | if len(args) == 1: 214 | if not (is_modin(args[0]) or (is_numeric(args[0]))): 215 | raise_bad_inputs(self) 216 | 217 | def get_output(self, args): 218 | if len(args) == 1: 219 | return pd.to_numeric(-args[0]) 220 | return pd.to_numeric(args[0] - args[1]) 221 | 222 | 223 | class Multiply(BaseFunction, TwoArgsMixin, NumericInputMixin, NumericOutputMixin): 224 | name = '*' 225 | 226 | def get_output(self, args): 227 | return pd.to_numeric(args[0] * args[1]) 228 | 229 | 230 | class Divide(BaseFunction, TwoArgsMixin, NumericInputMixin, NumericOutputMixin): 231 | name = '/' 232 | 233 | def get_output(self, args): 234 | return pd.to_numeric(args[0] / args[1]) 235 | 236 | 237 | class Modulo(BaseFunction, TwoArgsMixin, NumericInputMixin, NumericOutputMixin): 238 | name = '%' 239 | 240 | def get_output(self, args): 241 | return pd.to_numeric(args[0] % args[1]) 242 | 243 | 244 | class Power(BaseFunction, TwoArgsMixin, NumericInputMixin, NumericOutputMixin): 245 | name = '^' 246 | 247 | def get_output(self, args): 248 | return pd.to_numeric(args[0] ** args[1]) 249 | 250 | # String functions 251 | 252 | 253 | class StringConcat(BaseFunction, TwoArgsMixin, StringInputMixin, StringOutputMixin): 254 | name = "||" 255 | 256 | def get_output(self, args): 257 | return args[0] + args[1] 258 | 259 | 260 | class StringLower(BaseFunction, OneArgMixin, StringInputMixin, StringOutputMixin): 261 | name = "lower" 262 | 263 | def get_output(self, args): 264 | if isinstance(args[0], str): 265 | return args[0].lower() 266 | return args[0].apply(lambda x: x.lower()) 267 | 268 | 269 | class StringUpper(BaseFunction, OneArgMixin, StringInputMixin, StringOutputMixin): 270 | name = "upper" 271 | 272 | def get_output(self, args): 273 | if isinstance(args[0], str): 274 | return args[0].upper() 275 | return args[0].apply(lambda x: x.upper()) 276 | 277 | 278 | class Like(BaseFunction, TwoArgsMixin, StringInputMixin, BoolOutputMixin): 279 | name = "like" 280 | 281 | def get_output(self, args): 282 | def matcher(inp, pattern): 283 | match = re.match(pattern, inp) 284 | return True if match else False 285 | 286 | if is_modin(args[0]): 287 | return args[0].apply(matcher, args=(args[1],)) 288 | return matcher(args[0], args[1]) 289 | 290 | # Aggregate functions 291 | 292 | 293 | class AggregateFunction(BaseFunction, OneArgMixin): 294 | string_repr = None # for pandas group by 295 | 296 | def assert_output(self, output): 297 | pass 298 | 299 | @classmethod 300 | def string_or_callable(cls): 301 | if cls.string_repr: 302 | return cls.string_repr 303 | return cls() 304 | 305 | 306 | class Mean(AggregateFunction): 307 | name = 'avg' 308 | string_repr = 'mean' 309 | 310 | 311 | class Sum(AggregateFunction): 312 | name = 'sum' 313 | string_repr = 'sum' 314 | 315 | 316 | class Count(AggregateFunction): 317 | name = 'count' 318 | string_repr = 'count' 319 | 320 | 321 | class CountDistinct(AggregateFunction): 322 | name = 'count_distinct' 323 | string_repr = 'nunique' 324 | 325 | 326 | class Max(AggregateFunction): 327 | name = 'max' 328 | string_repr = 'max' 329 | 330 | 331 | class Min(AggregateFunction): 332 | name = 'min' 333 | string_repr = 'min' 334 | 335 | OPERATIONS = ( 336 | And, Or, Not, 337 | 338 | Equals, NotEquals, Greater, GreaterEqual, Less, LessEqual, Is, IsNot, 339 | 340 | Plus, Minus, Multiply, Divide, Modulo, Power, 341 | 342 | StringConcat, StringLower, StringUpper, Like, 343 | 344 | In, 345 | 346 | # IsNull, IsNotNull, IsTrue, IsFalse 347 | ) 348 | 349 | OPERATION_MAPPING = { 350 | op.name: op for op in OPERATIONS 351 | } 352 | OPERATION_MAPPING['<>'] = NotEquals 353 | 354 | AGGREGATE_FUNCTIONS = ( 355 | Sum, Mean, Count, CountDistinct, Max, Min, 356 | ) 357 | 358 | AGGREGATE_MAPPING = { 359 | op.name: op for op in AGGREGATE_FUNCTIONS 360 | } 361 | 362 | 363 | def is_supported(op_name): 364 | return op_name.lower() in OPERATION_MAPPING or op_name.lower() in AGGREGATE_MAPPING 365 | -------------------------------------------------------------------------------- /dfsql/data_sources/base_data_source.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dfsql.engine import pd 3 | import json 4 | 5 | from dfsql.cache import MemoryCache 6 | from dfsql.exceptions import QueryExecutionException 7 | from dfsql.functions import OPERATION_MAPPING, AGGREGATE_MAPPING 8 | from dfsql.commands import try_parse_command 9 | from mindsdb_sql import parse_sql 10 | from mindsdb_sql.parser.ast import (Select, Identifier, Constant, Operation, Function, Join, BinaryOperation, TypeCast, 11 | Tuple, NullConstant, Star) 12 | from dfsql.table import Table, FileTable 13 | from dfsql.utils import CaseInsensitiveDict, pd_get_column_case_insensitive, get_df_column, CaseInsensitiveKey 14 | 15 | 16 | def get_modin_operation(sql_op): 17 | op = OPERATION_MAPPING.get(sql_op.lower()) 18 | if not op: 19 | raise(QueryExecutionException(f'Unsupported operation: {sql_op}')) 20 | return op() 21 | 22 | 23 | def get_aggregation_operation(sql_op): 24 | op = AGGREGATE_MAPPING.get(sql_op.lower()) 25 | if not op: 26 | raise(QueryExecutionException(f'Unsupported operation: {sql_op}')) 27 | return op.string_or_callable() 28 | 29 | 30 | def cast_type(obj, type_name): 31 | if not hasattr(obj, 'astype'): 32 | obj = pd.Series(obj) 33 | return obj.astype(type_name) 34 | 35 | 36 | class DataSource: 37 | def __init__(self, 38 | metadata_dir, 39 | tables=None, 40 | cache=None, 41 | custom_functions=None, 42 | case_sensitive=True): 43 | self.metadata_dir = metadata_dir 44 | 45 | if not os.path.exists(self.metadata_dir): 46 | os.makedirs(self.metadata_dir, exist_ok=True) 47 | 48 | self.case_sensitive = case_sensitive 49 | 50 | 51 | tables = {t.name: t for t in tables} if tables else {} 52 | 53 | if not self.case_sensitive: 54 | tables = CaseInsensitiveDict(tables) 55 | 56 | self.tables = None 57 | self.load_metadata() 58 | if self.tables and not self.case_sensitive: 59 | self.tables = CaseInsensitiveDict(self.tables) 60 | 61 | if self.tables and tables: 62 | raise QueryExecutionException(f'Table metadata already exists in directory {metadata_dir}, but tables also passed to DataSource constructor. ' 63 | f'\nEither load the previous metadata by omitting the tables argument, or explicitly overwrite old metadata by using DataSource.create_new(metadata_dir, tables).') 64 | elif not self.tables: 65 | self.tables = tables 66 | 67 | self.save_metadata() 68 | 69 | self.set_cache(cache or MemoryCache()) 70 | 71 | self._query_scope = set() 72 | 73 | self.custom_functions = custom_functions or {} 74 | 75 | 76 | def set_cache(self, cache): 77 | self.cache = cache 78 | for tname, table in self.tables.items(): 79 | table.cache = self.cache 80 | 81 | @property 82 | def query_scope(self): 83 | """Stores aliases and tables available to a select during execution""" 84 | return self._query_scope 85 | 86 | def clear_query_scope(self): 87 | self._query_scope = set() 88 | 89 | @classmethod 90 | def create_new(cls, metadata_dir, tables=None): 91 | cls.clear_metadata(metadata_dir) 92 | return cls(metadata_dir, tables=tables) 93 | 94 | @classmethod 95 | def clear_metadata(cls, metadata_dir): 96 | if os.path.exists(os.path.join(metadata_dir, 'datasource_tables.json')): 97 | os.remove(os.path.join(metadata_dir, 'datasource_tables.json')) 98 | 99 | def add_table_from_file(self, path): 100 | table = FileTable.from_file(path) 101 | self.add_table(table) 102 | 103 | @staticmethod 104 | def from_dir(metadata_dir, files_dir_path, *args, **kwargs): 105 | metadata_dir = str(metadata_dir) 106 | files_dir_path = str(files_dir_path) 107 | files = os.listdir(files_dir_path) 108 | ds = DataSource(*args, metadata_dir=metadata_dir, **kwargs) 109 | for f in files: 110 | if f.endswith('.csv'): 111 | fpath = os.path.join(files_dir_path, f) 112 | ds.add_table_from_file(fpath) 113 | 114 | if not ds.tables: 115 | raise(QueryExecutionException(f'Directory {files_dir_path} does not contain any spreadsheet files')) 116 | return ds 117 | 118 | def load_metadata(self): 119 | if not os.path.exists(os.path.join(self.metadata_dir, 'datasource_tables.json')): 120 | return 121 | 122 | new_tables = {} 123 | with open(os.path.join(self.metadata_dir, 'datasource_tables.json'), 'r') as f: 124 | table_data = json.load(f) 125 | 126 | for tname, table_json in table_data.items(): 127 | new_tables[tname] = Table.from_json(table_json) 128 | 129 | self.tables = new_tables 130 | 131 | def save_metadata(self, overwrite=True): 132 | if not os.path.exists(self.metadata_dir): 133 | os.makedirs(self.metadata_dir) 134 | 135 | if not os.access(self.metadata_dir, os.W_OK): 136 | raise QueryExecutionException(f'Directory {self.metadata_dir} not writable') 137 | 138 | tables_dump = { 139 | tname: table.to_json() for tname, table in self.tables.items() 140 | } 141 | 142 | if not overwrite and os.path.exists(os.path.join(self.metadata_dir, 'datasource_tables.json')): 143 | raise QueryExecutionException('Table metadata already exists, but overwrite is False.') 144 | 145 | with open(os.path.join(self.metadata_dir, 'datasource_tables.json'), 'w') as f: 146 | f.write(json.dumps(tables_dump)) 147 | 148 | def __contains__(self, table_name): 149 | return table_name in self.tables 150 | 151 | def register_function(self, name, func): 152 | self.custom_functions[name] = func 153 | 154 | def add_table(self, table): 155 | if self.tables.get(table.name): 156 | raise QueryExecutionException(f'Table {table.name} already exists in data source, use DROP TABLE to remove it if you want to recreate it.') 157 | self.tables[table.name] = table 158 | self.save_metadata() 159 | 160 | def drop_table(self, name): 161 | del self.tables[name] 162 | self.save_metadata() 163 | 164 | def execute_command(self, command): 165 | return command.execute(self) 166 | 167 | def query(self, sql, reduce_output=True): 168 | command = try_parse_command(sql) 169 | if command: 170 | return self.execute_command(command) 171 | query = parse_sql(sql) 172 | return self.execute_query(query, reduce_output=reduce_output) 173 | 174 | def execute_table_identifier(self, query): 175 | table_name = query.to_string(alias=False) 176 | if table_name not in self: 177 | raise QueryExecutionException(f'Unknown table {table_name}') 178 | else: 179 | df = self.tables[table_name].dataframe 180 | 181 | if query.alias: 182 | self.query_scope.add(query.alias.to_string(alias=False)) 183 | self.query_scope.add(table_name) 184 | return df 185 | 186 | def execute_constant(self, query): 187 | if isinstance(query, NullConstant): 188 | return None 189 | value = query.value 190 | return value 191 | 192 | def execute_operation(self, query, df): 193 | args = [self.execute_select_target(arg, df) for arg in query.args] 194 | op_func = self.custom_functions.get(query.op.lower()) 195 | if not op_func: 196 | op_func = get_modin_operation(query.op.lower()) 197 | result = op_func(*args) 198 | return result 199 | 200 | def execute_column_identifier(self, query, df): 201 | name_components = query.parts 202 | 203 | if len(name_components) == 1: 204 | full_column_name = name_components[0] 205 | column = get_df_column(df, full_column_name, case_sensitive=self.case_sensitive) 206 | if column is not None: 207 | return column 208 | elif len(name_components) == 2: 209 | table_name, column_name = name_components 210 | 211 | # If it's a join or a subquery 212 | join_column_name = f'{table_name}.{column_name}' 213 | column = get_df_column(df, join_column_name, case_sensitive=self.case_sensitive) 214 | if column is not None: 215 | return column 216 | 217 | if table_name and not table_name in self.query_scope: 218 | raise QueryExecutionException(f"Table name {table_name} not in scope.") 219 | 220 | column = get_df_column(df, column_name, case_sensitive=self.case_sensitive) 221 | if column is not None: 222 | column.name = query.parts_to_str() 223 | return column 224 | else: 225 | raise QueryExecutionException(f"Too many name components: {query.parts}") 226 | raise QueryExecutionException(f"Column {query.parts_to_str()} not found.") 227 | 228 | def execute_type_cast(self, query, df): 229 | type_name = query.type_name 230 | arg = self.execute_select_target(query.arg, df) 231 | return cast_type(arg, type_name) 232 | 233 | def execute_select_target(self, query, df): 234 | if isinstance(query, Identifier): 235 | return self.execute_column_identifier(query, df) 236 | elif isinstance(query, Operation): 237 | return self.execute_operation(query, df) 238 | elif isinstance(query, TypeCast): 239 | return self.execute_type_cast(query, df) 240 | 241 | return self.execute_query(query, reduce_output=True) 242 | 243 | def resolve_select_target_col_name(self, target): 244 | if not target.alias: 245 | col_name = target.to_string(alias=False) 246 | else: 247 | col_name = target.alias.to_string(alias=False) 248 | return col_name 249 | 250 | def execute_select_targets(self, targets, source_df): 251 | out_names = [] 252 | 253 | iterable_names = [] 254 | iterable_columns = [] 255 | 256 | scalar_names = [] 257 | scalar_values = [] 258 | # Expand star 259 | for i, target in enumerate(targets): 260 | if isinstance(target, Star): 261 | targets = targets[:i] + [Identifier(colname) for colname in 262 | source_df.columns] + targets[i + 1:] 263 | break 264 | 265 | for target in targets: 266 | col_name = self.resolve_select_target_col_name(target) 267 | out_names.append(col_name) 268 | 269 | select_target_result = self.execute_select_target(target, source_df) 270 | if isinstance(select_target_result, pd.Series): 271 | iterable_names.append(col_name) 272 | iterable_columns.append(select_target_result) 273 | else: 274 | scalar_names.append(col_name) 275 | scalar_values.append(select_target_result) 276 | 277 | # Add columns first, then scalars, so the dataframe has proper index in the end 278 | out_columns = {} 279 | for i, col_name in enumerate(iterable_names): 280 | out_columns[col_name] = list(iterable_columns[i]) 281 | out_df = pd.DataFrame.from_dict(out_columns) 282 | for i, col_name in enumerate(scalar_names): 283 | if out_df.empty: 284 | out_df[col_name] = [scalar_values[i]] 285 | else: 286 | out_df[col_name] = scalar_values[i] 287 | out_df = out_df[out_names] 288 | return out_df 289 | 290 | def execute_select_groupby_targets(self, targets, source_df, group_by, original_df_columns): 291 | target_column_names = [] # Original names of columns to be returned by group by 292 | agg = {} # Agg dict for pandas aggregation 293 | 294 | column_renames = {} # Aliases for columns to be returned 295 | 296 | df_columns = original_df_columns 297 | df_original_column_names_lookup = dict(zip(df_columns, df_columns)) 298 | if not self.case_sensitive: 299 | column_renames = CaseInsensitiveDict(column_renames) 300 | df_original_column_names_lookup = CaseInsensitiveDict(df_original_column_names_lookup) 301 | 302 | # Obtain columns that aggregation happens by 303 | group_by_cols = [] # Columns that aggregation happens over. Only these can be among targets and not under an agg func 304 | for g in group_by: 305 | if isinstance(g, Identifier) or isinstance(g, Operation): 306 | string_repr = g.to_string(alias=False) 307 | if not self.case_sensitive: 308 | string_repr = CaseInsensitiveKey(string_repr) 309 | group_by_cols.append(string_repr) 310 | elif isinstance(g, Operation): 311 | if self.case_sensitive: 312 | group_by_cols.append(str(g)) 313 | else: 314 | group_by_cols.append(CaseInsensitiveKey(str(g))) 315 | elif g == Constant(True): # Special case of implicit aggregation 316 | continue 317 | else: 318 | raise QueryExecutionException(f'Dont know how to handle group by column: {str(g)}') 319 | 320 | # Obtain column names, column aliases and aggregations to perform 321 | for target in targets: 322 | col_name = target.to_string(alias=False) 323 | if not self.case_sensitive: 324 | col_name = CaseInsensitiveKey(col_name) 325 | 326 | if target.alias: 327 | column_renames[col_name] = target.alias.to_string(alias=False) 328 | 329 | target_column_names.append(col_name) 330 | 331 | if col_name in agg: 332 | raise QueryExecutionException(f'Duplicate column name {col_name}. Provide an alias to resolve ambiguity.') 333 | 334 | if isinstance(target, Function): 335 | if col_name in group_by_cols: 336 | # It's not a function to be executed, it's a transformed column from group by clause, leave it be 337 | continue 338 | 339 | if len(target.args) > 1: 340 | raise QueryExecutionException(f'Only one argument functions supported for aggregations, found: {str(target)}') 341 | 342 | arg = target.args[0] 343 | if not isinstance(arg, Identifier): 344 | raise QueryExecutionException(f'The argument of an aggregate function must be a column, found: {str(arg)}') 345 | arg_col = arg.parts_to_str() 346 | 347 | func_name = target.op.lower() 348 | if target.distinct: 349 | func_name = f'{target.op.lower()}_distinct' 350 | modin_op = self.custom_functions.get(func_name) 351 | if not modin_op: 352 | modin_op = get_aggregation_operation(func_name) 353 | 354 | arg_col_name = df_original_column_names_lookup[arg_col] 355 | agg[str(col_name)] = (arg_col_name, modin_op) 356 | elif col_name not in group_by_cols: 357 | # Not a function to be executed and not found among groupbys, sus 358 | raise QueryExecutionException(f'Column {col_name} not found in GROUP BY clause') 359 | 360 | if isinstance(source_df, pd.Series): 361 | source_df = pd.DataFrame(source_df) 362 | 363 | if isinstance(source_df, pd.DataFrame): 364 | # If it's an implicit aggregation 365 | temp_df = pd.DataFrame(source_df) 366 | temp_df['__dummy__'] = 0 367 | source_df = temp_df.groupby('__dummy__') 368 | 369 | # Perform aggregation 370 | aggregate_result = source_df.agg(**agg) 371 | 372 | out_df_column_names = [] 373 | out_df_column_values = [] 374 | for col_index in aggregate_result.reset_index().columns: 375 | if col_index not in target_column_names: 376 | continue 377 | column_name = column_renames.get(col_index, col_index) 378 | out_df_column_names.append(column_name) 379 | out_df_column_values.append(aggregate_result.reset_index()[col_index].values) 380 | 381 | out_dict = {col: values for col, values in zip(out_df_column_names, out_df_column_values)} 382 | out_df = pd.DataFrame.from_dict(out_dict) 383 | return out_df 384 | 385 | def execute_order_by(self, order_by, df): 386 | fields = [s.field.parts_to_str() for s in order_by] 387 | sort_orders = [s.direction != 'DESC' for s in order_by] 388 | df = df.sort_values(by=fields, ascending=sort_orders) 389 | return df 390 | 391 | def execute_select(self, query, reduce_output=False): 392 | from_table = [] 393 | if query.from_table: 394 | from_table = self.execute_from_query(query.from_table) 395 | 396 | source_df = from_table 397 | 398 | if query.where: 399 | index = self.execute_operation(query.where, source_df) 400 | source_df = source_df[index.values] 401 | 402 | if query.group_by is None: 403 | # Check for implicit group by 404 | non_agg_functions = [] 405 | agg_functions = [] 406 | for target in query.targets: 407 | if isinstance(target, Function) and target.op.lower() in AGGREGATE_MAPPING: 408 | agg_functions.append(target) 409 | else: 410 | non_agg_functions.append(target) 411 | 412 | if not non_agg_functions and agg_functions: 413 | query.group_by = [Constant(True)] 414 | elif non_agg_functions and agg_functions: 415 | raise(QueryExecutionException(f'Can\'t process a mix of aggregation functions and non-aggregation functions with no GROUP BY clause.')) 416 | 417 | if query.group_by is not None: 418 | original_df_columns = source_df.columns 419 | group_by_df = self.execute_groupby_queries(query.group_by, source_df) 420 | out_df = self.execute_select_groupby_targets(query.targets, group_by_df, query.group_by, original_df_columns) 421 | else: 422 | out_df = self.execute_select_targets(query.targets, source_df) 423 | 424 | if query.having: 425 | if query.group_by is None: 426 | raise QueryExecutionException('Can\'t execute HAVING clause with no GROUP BY clause.') 427 | index = self.execute_operation(query.having, out_df) 428 | out_df = out_df[index] 429 | 430 | if query.distinct: 431 | out_df = out_df.drop_duplicates() 432 | 433 | if query.offset: 434 | offset = self.execute_query(query.offset) 435 | out_df = out_df.iloc[offset:, :] 436 | 437 | if query.order_by: 438 | out_df = self.execute_order_by(query.order_by, out_df) 439 | 440 | if query.limit: 441 | limit = self.execute_query(query.limit) 442 | out_df = out_df.iloc[:limit, :] 443 | 444 | self.clear_query_scope() 445 | 446 | #Postprocess column names 447 | new_cols = [] 448 | for col in out_df.columns: 449 | if col.startswith('`') and col.endswith('`') and not '.' in col: 450 | new_cols.append(col.strip('`')) 451 | else: 452 | new_cols.append(col) 453 | out_df.columns = new_cols 454 | 455 | # Turn tables into Series or constants if needed, for final returning 456 | if reduce_output: 457 | if out_df.shape == (1, 1): # Just one value returned 458 | return out_df.values[0][0] 459 | elif out_df.shape[1] == 1: 460 | return out_df[out_df.columns[0]] 461 | return out_df 462 | 463 | def execute_join(self, query): 464 | join_type = query.join_type 465 | join_type = {'INNER JOIN': 'inner', 'LEFT JOIN': 'left', 'RIGHT JOIN': 'right', 'FULL JOIN': 'outer'}[join_type] 466 | 467 | left = query.left 468 | if isinstance(left, Identifier): 469 | left = self.execute_table_identifier(left) 470 | else: 471 | left = self.execute_query(left) 472 | 473 | right = query.right 474 | if isinstance(right, Identifier): 475 | right = self.execute_table_identifier(right) 476 | else: 477 | right = self.execute_query(right) 478 | 479 | condition = query.condition 480 | if isinstance(condition, BinaryOperation): 481 | left_on = condition.args[0] 482 | right_on = condition.args[1] 483 | else: 484 | raise QueryExecutionException(f'Invalid join condition {condition.op}') 485 | left_name = query.left.alias.to_string(alias=False) if query.left.alias else query.left.to_string(alias=False) 486 | right_name = query.right.alias.to_string(alias=False) if query.right.alias else query.right.to_string(alias=False) 487 | left_on, right_on = left_on if left_on.parts[0] in left_name else right_on, \ 488 | right_on if right_on.parts[0] in right_name else left_on 489 | 490 | left_on = left_on.parts[-1] 491 | right_on = right_on.parts[-1] 492 | out_df = pd.merge(left, right, how=join_type, left_on=[left_on], right_on=[right_on], suffixes=('_x', '_y')) 493 | renaming = {f'{left_on}_x': left_on, f'{right_on}_y': right_on} 494 | 495 | for col in out_df.columns: 496 | if col in renaming: 497 | continue 498 | 499 | if '_x' in col: 500 | pure_col_name = col.replace('_x', '') 501 | renaming[col] = f'{left_name}.{pure_col_name}' 502 | elif '_y' in col: 503 | pure_col_name = col.replace('_y', '') 504 | renaming[col] = f'{right_name}.{pure_col_name}' 505 | 506 | out_df = out_df.rename(renaming, axis=1) 507 | return out_df 508 | 509 | def execute_from_query(self, query): 510 | if isinstance(query, Identifier): 511 | df = self.execute_table_identifier(query) 512 | elif isinstance(query, Join): 513 | df = self.execute_join(query) 514 | else: 515 | df = self.execute_query(query) 516 | 517 | if query.alias: 518 | self.query_scope.add(query.alias.to_string(alias=False)) 519 | 520 | return df 521 | 522 | def execute_groupby_queries(self, queries, df): 523 | col_names = [] 524 | 525 | if len(queries) == 1 and queries[0] == Constant(True): 526 | return df 527 | 528 | for query in queries: 529 | if isinstance(query, Identifier): 530 | column = self.execute_column_identifier(query, df) 531 | col_names.append(column.name) 532 | elif isinstance(query, Operation): 533 | expr_result = self.execute_operation(query, df) 534 | temp_col_name = query.alias if hasattr(query, 'alias') and query.alias else str(query) 535 | df[temp_col_name] = expr_result 536 | col_names.append(temp_col_name) 537 | else: 538 | raise QueryExecutionException(f"Don't know how to aggregate by {str(query)}") 539 | return df.groupby(col_names) 540 | 541 | def execute_query(self, query, reduce_output=False): 542 | if isinstance(query, Select): 543 | return self.execute_select(query, reduce_output=reduce_output) 544 | elif isinstance(query, Constant): 545 | return self.execute_constant(query) 546 | elif isinstance(query, Tuple): 547 | return pd.Series([self.execute_query(item) for item in query.items]) 548 | else: 549 | raise QueryExecutionException(f'No idea how to execute query statement {type(query)}') 550 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /tests/test_data_sources/test_file_data_source.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from dfsql.data_sources import DataSource 3 | from dfsql.engine import pd 4 | import numpy as np 5 | import os 6 | import json 7 | 8 | from dfsql.exceptions import QueryExecutionException 9 | from dfsql.functions import AggregateFunction 10 | from dfsql.table import Table 11 | 12 | 13 | @pytest.fixture() 14 | def data_source_googleplay(googleplay_csv, tmpdir): 15 | ds = DataSource(metadata_dir=str(tmpdir)) 16 | ds.add_table_from_file(googleplay_csv) 17 | return ds 18 | 19 | 20 | class TestDataSource: 21 | def test_created_from_dir(self, csv_file): 22 | dir_path = csv_file.dirpath() 23 | ds = DataSource.from_dir(metadata_dir=dir_path, files_dir_path=dir_path) 24 | assert ds.tables and len(ds.tables) == 1 25 | table = ds.tables['titanic'] 26 | assert table.name == csv_file.purebasename 27 | assert pd.read_csv(csv_file).shape == table.dataframe.shape 28 | 29 | def test_add_from_file(self, csv_file): 30 | ds = DataSource(metadata_dir=csv_file.dirpath()) 31 | assert not ds.tables and len(ds.tables) == 0 32 | ds.add_table_from_file(str(csv_file)) 33 | table = ds.tables['titanic'] 34 | assert table.name == csv_file.purebasename 35 | assert pd.read_csv(csv_file).shape == table.dataframe.shape 36 | 37 | def test_save_metadata(self, csv_file): 38 | assert not [f for f in os.listdir(csv_file.dirpath()) if f.endswith('.json')] 39 | ds = DataSource(metadata_dir=csv_file.dirpath()) 40 | assert 'datasource_tables.json' in [f for f in os.listdir(csv_file.dirpath()) if f.endswith('.json')] 41 | json_data = json.load(open(os.path.join(csv_file.dirpath(), 'datasource_tables.json'))) 42 | assert json_data == {} 43 | 44 | ds.add_table_from_file(csv_file) 45 | assert ds.tables['titanic'] 46 | json_data = json.load(open(os.path.join(csv_file.dirpath(), 'datasource_tables.json'))) 47 | assert json_data.get('titanic') and list(json_data.keys()) == ['titanic'] 48 | assert json_data['titanic']['type'] == 'FileTable' 49 | assert json_data['titanic']['name'] == 'titanic' 50 | assert json_data['titanic']['fpath'] == str(csv_file) 51 | 52 | with pytest.raises(QueryExecutionException): 53 | # Can't implicitly overwrite table metadata 54 | DataSource(metadata_dir=csv_file.dirpath(), tables=[Table(name='titanic')]) 55 | 56 | # Metadata is loaded if a data source is created from the same dir 57 | ds2 = DataSource(metadata_dir=csv_file.dirpath()) 58 | assert ds2.tables['titanic'] 59 | 60 | # Metadata is cleared when requested explicitly 61 | ds3 = DataSource.create_new(metadata_dir=csv_file.dirpath()) 62 | assert not ds3.tables 63 | 64 | def test_simple_select(self, data_source): 65 | sql = "SELECT 1 AS result" 66 | assert data_source.query(sql) == 1 67 | 68 | sql = "SELECT 1" 69 | assert data_source.query(sql) == 1 70 | 71 | def test_create_table(self, csv_file): 72 | ds = DataSource(metadata_dir=csv_file.dirpath()) 73 | assert not ds.tables and len(ds.tables) == 0 74 | sql = f"CREATE TABLE ('{str(csv_file)}')" 75 | query_result = ds.query(sql) 76 | assert query_result == 'OK' 77 | assert ds.tables and len(ds.tables) == 1 78 | table = ds.tables['titanic'] 79 | assert table.name == csv_file.purebasename 80 | assert pd.read_csv(csv_file).shape == table.dataframe.shape 81 | 82 | def test_create_table_error_on_recreate(self, csv_file, data_source): 83 | assert data_source.tables['titanic'] 84 | 85 | sql = f"CREATE TABLE ('{str(csv_file)}')" 86 | with pytest.raises(QueryExecutionException): 87 | query_result = data_source.query(sql) 88 | 89 | def test_drop_table(self, data_source): 90 | assert data_source.tables['titanic'] 91 | sql = f"DROP TABLE titanic" 92 | query_result = data_source.query(sql) 93 | assert query_result == 'OK' 94 | assert not data_source.tables and len(data_source.tables) == 0 95 | 96 | def test_select_column(self, csv_file, data_source): 97 | df = pd.read_csv(csv_file) 98 | 99 | sql = "SELECT passenger_id FROM titanic" 100 | 101 | query_result = data_source.query(sql) 102 | 103 | assert query_result.name == 'passenger_id' 104 | 105 | values_left = df['passenger_id'].values 106 | values_right = query_result.values 107 | assert (values_left == values_right).all() 108 | 109 | def test_select_all(self, csv_file, data_source): 110 | df = pd.read_csv(csv_file) 111 | sql = "SELECT * FROM titanic" 112 | query_result = data_source.query(sql) 113 | assert (query_result.columns == df.columns).all() 114 | values_left = df.values 115 | values_right = query_result.values 116 | assert values_left.shape == values_right.shape 117 | 118 | def test_select_table_case_insensitive(self, csv_file, tmpdir): 119 | from dfsql import DataSource 120 | dir_path = csv_file.dirpath() 121 | data_source = DataSource.from_dir(metadata_dir=str(tmpdir), 122 | files_dir_path=dir_path, 123 | case_sensitive=False) 124 | 125 | df = pd.read_csv(csv_file) 126 | sql = "SELECT * FROM TiTaNiC" 127 | query_result = data_source.query(sql) 128 | assert (query_result.columns == df.columns).all() 129 | values_left = df.values 130 | values_right = query_result.values 131 | assert values_left.shape == values_right.shape 132 | 133 | def test_select_column_case_insensitive(self, csv_file, tmpdir): 134 | from dfsql import DataSource 135 | dir_path = csv_file.dirpath() 136 | data_source = DataSource.from_dir(metadata_dir=str(tmpdir), 137 | files_dir_path=dir_path, 138 | case_sensitive=False) 139 | 140 | df = pd.read_csv(csv_file) 141 | df = df[['passenger_id', 'embarked']] 142 | sql = "SELECT PassEnGer_ID, EmBarKED FROM TiTaNiC" 143 | query_result = data_source.query(sql) 144 | values_left = df.values 145 | values_right = query_result.values 146 | assert values_left.shape == values_right.shape 147 | 148 | def test_select_column_alias(self, csv_file, data_source): 149 | df = pd.read_csv(csv_file) 150 | 151 | sql = "SELECT passenger_id AS p1 FROM titanic" 152 | 153 | query_result = data_source.query(sql) 154 | 155 | assert query_result.name == 'p1' 156 | 157 | values_left = df['passenger_id'].values 158 | values_right = query_result.values 159 | assert (values_left == values_right).all() 160 | 161 | def test_select_distinct(self, csv_file, data_source): 162 | sql = "SELECT DISTINCT survived FROM titanic" 163 | query_result = data_source.query(sql) 164 | assert query_result.name == 'survived' 165 | assert list(query_result.values) == [0, 1] 166 | 167 | def test_select_limit_offset(self, csv_file, data_source): 168 | sql = "SELECT passenger_id FROM titanic LIMIT 2 OFFSET 2" 169 | query_result = data_source.query(sql) 170 | 171 | df = pd.read_csv(csv_file)['passenger_id'] 172 | df = df.iloc[2:4] 173 | assert query_result.shape == df.shape 174 | assert (df.values == query_result.values).all().all() 175 | 176 | def test_select_multiple_columns(self, csv_file, data_source): 177 | df = pd.read_csv(csv_file) 178 | 179 | sql = "SELECT passenger_id, survived FROM titanic" 180 | 181 | query_result = data_source.query(sql) 182 | 183 | assert list(query_result.columns) == ['passenger_id', 'survived'] 184 | 185 | values_left = df[['passenger_id', 'survived']].values 186 | values_right = query_result.values 187 | assert (values_left == values_right).all().all() 188 | 189 | def test_select_const(self, csv_file, data_source): 190 | df = pd.read_csv(csv_file) 191 | df['const'] = 1 192 | 193 | sql = "SELECT passenger_id, 1 AS const FROM titanic" 194 | 195 | query_result = data_source.query(sql) 196 | 197 | assert list(query_result.columns) == ['passenger_id', 'const'] 198 | 199 | values_left = df[['passenger_id', 'const']].values 200 | values_right = query_result.values 201 | assert (values_left == values_right).all().all() 202 | 203 | def test_select_operation(self, csv_file, data_source): 204 | df = pd.read_csv(csv_file) 205 | df['col_sum'] = df['passenger_id'] + df['survived'] 206 | df['col_diff'] = df['passenger_id'] - df['survived'] 207 | df = df[['col_sum', 'col_diff']] 208 | sql = "SELECT passenger_id + survived AS col_sum, passenger_id - survived AS col_diff FROM titanic" 209 | query_result = data_source.query(sql) 210 | assert list(query_result.columns) == ['col_sum', 'col_diff'] 211 | values_left = df.values 212 | values_right = query_result.values 213 | assert (values_left == values_right).all().all() 214 | 215 | def test_select_where(self, csv_file, data_source): 216 | df = pd.read_csv(csv_file) 217 | out_df = df[df['survived'] == 1][['passenger_id', 'survived']] 218 | sql = "SELECT passenger_id, survived FROM titanic WHERE survived = 1" 219 | query_result = data_source.query(sql) 220 | assert list(query_result.columns) == ['passenger_id', 'survived'] 221 | values_left = out_df[['passenger_id', 'survived']].values 222 | values_right = query_result.values 223 | assert values_left.shape == values_right.shape 224 | assert (values_left == values_right).all() 225 | 226 | sql = "SELECT passenger_id, survived FROM titanic WHERE titanic.survived = 1" 227 | query_result = data_source.query(sql) 228 | assert list(query_result.columns) == ['passenger_id', 'survived'] 229 | values_left = out_df[['passenger_id', 'survived']].values 230 | values_right = query_result.values 231 | assert values_left.shape == values_right.shape 232 | assert (values_left == values_right).all() 233 | 234 | out_df = df[df.survived == 1] 235 | out_df = out_df[out_df.sex != "male"] 236 | out_df = out_df[out_df.p_class > 0] 237 | out_df = out_df[['passenger_id', 'survived']] 238 | sql = "SELECT passenger_id, survived FROM titanic WHERE survived = 1 AND sex != \"male\" AND p_class > 0" 239 | query_result = data_source.query(sql) 240 | assert list(query_result.columns) == ['passenger_id', 'survived'] 241 | values_left = out_df[['passenger_id', 'survived']].values 242 | values_right = query_result.values 243 | assert values_left.shape == values_right.shape 244 | assert (values_left == values_right).all() 245 | 246 | def test_select_where_alias(self, csv_file, data_source): 247 | sql = "SELECT passenger_id, titanic.survived as ts FROM titanic WHERE titanic.survived = 1" 248 | df = pd.read_csv(csv_file) 249 | out_df = df[df['survived'] == 1][['passenger_id', 'survived']] 250 | out_df.columns = ['passenger_id', 'ts'] 251 | 252 | query_result = data_source.query(sql) 253 | 254 | values_left = out_df.values 255 | values_right = query_result.values 256 | assert values_left.shape == values_right.shape 257 | assert (values_left == values_right).all() 258 | 259 | def test_select_where_empty_result(self, csv_file, data_source): 260 | sql = "SELECT passenger_id, survived FROM titanic WHERE survived = 3" 261 | query_result = data_source.query(sql) 262 | assert query_result.empty 263 | assert list(query_result.columns) == ['passenger_id', 'survived'] 264 | 265 | def test_where_operator_order(self, csv_file, data_source): 266 | df = pd.read_csv(csv_file) 267 | # And surviving females or children 268 | out_df = df[((df.survived == 1) & (df.sex == "female")) | (df.p_class < 1)][['passenger_id', 'survived', 'sex', 'age']] 269 | sql = "SELECT passenger_id, survived, sex, age FROM titanic WHERE survived = 1 AND sex = \"female\" OR p_class < 1" 270 | query_result = data_source.query(sql) 271 | assert list(query_result.columns) == ['passenger_id', 'survived', 'sex', 'age'] 272 | values_left = out_df.values 273 | values_right = query_result.values 274 | assert values_left.shape == values_right.shape 275 | assert (values_left == values_right).all() 276 | 277 | out_df = df[(df.survived == 1) & ((df.sex == "female") | (df.p_class < 1))][ 278 | ['passenger_id', 'survived', 'sex', 'age']] 279 | sql = "SELECT passenger_id, survived, sex, age FROM titanic WHERE survived = 1 AND (sex = \"female\" OR p_class < 1)" 280 | query_result = data_source.query(sql) 281 | assert list(query_result.columns) == ['passenger_id', 'survived', 'sex', 'age'] 282 | values_left = out_df.values 283 | values_right = query_result.values 284 | assert values_left.shape == values_right.shape 285 | assert (values_left == values_right).all() 286 | 287 | def test_select_where_string(self, csv_file, data_source): 288 | df = pd.read_csv(csv_file) 289 | out_df = df[df['sex'] == "male"]['passenger_id'] 290 | sql = "SELECT passenger_id FROM titanic WHERE sex = \"male\"" 291 | query_result = data_source.query(sql) 292 | assert query_result.name == 'passenger_id' 293 | values_left = out_df.values 294 | values_right = query_result.values 295 | assert values_left.shape == values_right.shape 296 | assert (values_left == values_right).all() 297 | 298 | def test_select_groupby_wrong_column(self, csv_file, data_source): 299 | sql = "SELECT survived, p_class, count(passenger_id) AS count_passenger_id FROM titanic GROUP BY survived" 300 | with pytest.raises(QueryExecutionException): 301 | query_result = data_source.query(sql) 302 | 303 | def test_select_aggregation_function_no_groupby(self, csv_file, data_source): 304 | df = pd.read_csv(csv_file) 305 | 306 | tdf = pd.DataFrame({'col_sum': [df['passenger_id'].sum()], 'col_avg': [df['passenger_id'].mean()]}) 307 | sql = "SELECT sum(passenger_id) AS col_sum, avg(passenger_id) AS col_avg FROM titanic" 308 | query_result = data_source.query(sql) 309 | assert list(query_result.columns) == ['col_sum', 'col_avg'] 310 | values_left = tdf.values 311 | values_right = query_result.values 312 | assert (values_left == values_right).all().all() 313 | 314 | sql = "SELECT count(passenger_id) AS count1 FROM titanic" 315 | query_result = data_source.query(sql) 316 | assert (query_result == df['passenger_id'].count()) 317 | 318 | def test_groupby(self, csv_file, data_source): 319 | sql = "SELECT survived, p_class, count(passenger_id) AS count_passenger_id FROM titanic GROUP BY survived, p_class HAVING survived = 1" 320 | query_result = data_source.query(sql) 321 | 322 | df = pd.read_csv(csv_file) 323 | df = df.groupby(['survived', 'p_class']).agg({'passenger_id': 'count'}).reset_index() 324 | df.columns = ['survived', 'p_class', 'count_passenger_id'] 325 | df = df[df['survived'] == 1] 326 | 327 | assert (query_result.columns == df.columns).all() 328 | assert query_result.shape == df.shape 329 | 330 | assert (query_result.survived == 1).all() 331 | values_left = df.values 332 | values_right = query_result.values 333 | assert (values_left == values_right).all().all() 334 | 335 | # Same, but no alias 336 | sql = "SELECT survived, p_class, count(passenger_id) FROM titanic GROUP BY survived, p_class HAVING survived = 1" 337 | query_result = data_source.query(sql) 338 | df.columns = ['survived', 'p_class', 'count(passenger_id)'] 339 | assert (query_result.columns == df.columns).all() 340 | assert query_result.shape == df.shape 341 | 342 | assert (query_result.survived == 1).all() 343 | values_left = df.values 344 | values_right = query_result.values 345 | assert (values_left == values_right).all().all() 346 | 347 | def test_group_by_alias(self, csv_file, data_source): 348 | sql = "SELECT survived as col1, count(passenger_id) AS count_passenger_id FROM titanic GROUP BY survived" 349 | query_result = data_source.query(sql) 350 | 351 | df = pd.read_csv(csv_file) 352 | df = df.groupby(['survived']).agg({'passenger_id': 'count'}).reset_index() 353 | df.columns = ['col1', 'count_passenger_id'] 354 | 355 | assert (query_result.columns == df.columns).all() 356 | assert query_result.shape == df.shape 357 | values_left = df.values 358 | values_right = query_result.values 359 | assert (values_left == values_right).all().all() 360 | 361 | def test_group_by_case_insensitive(self, csv_file, tmpdir): 362 | from dfsql import DataSource 363 | dir_path = csv_file.dirpath() 364 | data_source = DataSource.from_dir(metadata_dir=str(tmpdir), 365 | files_dir_path=dir_path, 366 | case_sensitive=False) 367 | 368 | sql = "SELECT SuRViveD as COL1, count(PASSENGER_ID) AS count_passenger_id FROM titanic GROUP BY SURVIVED" 369 | query_result = data_source.query(sql) 370 | 371 | df = pd.read_csv(csv_file) 372 | df = df.groupby(['survived']).agg({'passenger_id': 'count'}).reset_index() 373 | df.columns = ['COL1', 'count_passenger_id'] 374 | 375 | assert (query_result.columns == df.columns).all() 376 | assert query_result.shape == df.shape 377 | values_left = df.values 378 | values_right = query_result.values 379 | assert (values_left == values_right).all().all() 380 | 381 | def test_groupby_function(self, data_source, csv_file): 382 | df = pd.read_csv(csv_file) 383 | df['lower(name)'] = df.name.str.lower() 384 | df = df.groupby(['lower(name)']).agg({'passenger_id': 'count'}).reset_index() 385 | df = df.rename(columns={'passenger_id': 'count'}) 386 | 387 | sql = "SELECT lower(name), COUNT(passenger_id) as count FROM titanic GROUP BY lower(name)" 388 | 389 | query_result = data_source.query(sql) 390 | assert (query_result.columns == df.columns).all() 391 | assert query_result.shape == df.shape 392 | 393 | values_left = df.values 394 | values_right = query_result.values 395 | assert (values_left == values_right).all().all() 396 | 397 | def test_groupby_function_with_alias(self, data_source, csv_file): 398 | df = pd.read_csv(csv_file) 399 | df['somealias'] = df.name.str.lower() 400 | df = df.groupby(['somealias']).agg({'passenger_id': 'count'}).reset_index() 401 | df = df.rename(columns={'passenger_id': 'count'}) 402 | 403 | sql = "SELECT lower(name) as somealias, COUNT(passenger_id) as count FROM titanic GROUP BY lower(name)" 404 | 405 | query_result = data_source.query(sql) 406 | assert (query_result.columns == df.columns).all() 407 | assert query_result.shape == df.shape 408 | 409 | values_left = df.values 410 | values_right = query_result.values 411 | assert (values_left == values_right).all().all() 412 | 413 | # TODO 414 | # def test_groupby_function_nested(self, data_source, csv_file): 415 | # df = pd.read_csv(csv_file) 416 | # df['somealias'] = df.name.str.lower() 417 | # df = df.groupby(['somealias']).agg({'passenger_id': 'count'}).reset_index() 418 | # df = df.rename(columns={'passenger_id': 'count'}) 419 | # 420 | # sql = "SELECT name as somealias, COUNT(passenger_id) as count FROM titanic GROUP BY upper(lower(name))" 421 | # 422 | # query_result = data_source.query(sql) 423 | # assert (query_result.columns == df.columns).all() 424 | # assert query_result.shape == df.shape 425 | # 426 | # values_left = df.values 427 | # values_right = query_result.values 428 | # assert (values_left == values_right).all().all() 429 | 430 | def test_groupby_custom_aggregate_func(self, csv_file, data_source): 431 | sql = "SELECT sex, mode(survived) AS mode_survived FROM titanic GROUP BY sex" 432 | 433 | class ModeFunc(AggregateFunction): 434 | def get_output(self, args): 435 | return args[0].value_counts(dropna=False).index[0] 436 | 437 | data_source.custom_functions['mode'] = ModeFunc() 438 | 439 | query_result = data_source.query(sql) 440 | df = pd.read_csv(csv_file) 441 | df = df.groupby(['sex']).agg({'survived': lambda x: x.value_counts(dropna=False).index[0]}).reset_index() 442 | df.columns = ['sex', 'mode_survived'] 443 | 444 | assert (query_result.columns == df.columns).all() 445 | assert query_result.shape == df.shape 446 | 447 | values_left = df.values 448 | values_right = query_result.values 449 | assert (values_left == values_right).all().all() 450 | 451 | def test_groupby_register_aggregate_func(self, csv_file, data_source): 452 | sql = "SELECT sex, mode(survived) AS mode_survived FROM titanic GROUP BY sex" 453 | 454 | func = lambda x: x.value_counts(dropna=False).index[0] 455 | data_source.register_function('mode', func) 456 | 457 | query_result = data_source.query(sql) 458 | df = pd.read_csv(csv_file) 459 | df = df.groupby(['sex']).agg({'survived': func}).reset_index() 460 | df.columns = ['sex', 'mode_survived'] 461 | 462 | assert (query_result.columns == df.columns).all() 463 | assert query_result.shape == df.shape 464 | 465 | values_left = df.values 466 | values_right = query_result.values 467 | assert (values_left == values_right).all().all() 468 | 469 | def test_groupby_register_two_aggregate_funcs(self, csv_file, data_source): 470 | sql = "SELECT sex, mode1(survived) AS mode1_survived, mode2(survived) AS mode2_survived FROM titanic GROUP BY sex" 471 | 472 | func = lambda x: x.value_counts(dropna=False).index[0] 473 | data_source.register_function('mode1', func) 474 | data_source.register_function('mode2', func) 475 | 476 | query_result = data_source.query(sql) 477 | df = pd.read_csv(csv_file) 478 | df = df.groupby(['sex']).agg({'survived': func}).reset_index() 479 | df.columns = ['sex', 'mode1_survived'] 480 | df['mode2_survived'] = df['mode1_survived'] 481 | 482 | assert (query_result.columns == df.columns).all() 483 | assert query_result.shape == df.shape 484 | 485 | values_left = df.values 486 | values_right = query_result.values 487 | assert (values_left == values_right).all().all() 488 | 489 | def test_group_by_columns_select(self, csv_file, data_source): 490 | df = pd.read_csv(csv_file) 491 | df = df.groupby(['survived', 'p_class']).agg({'passenger_id': 'count'}).reset_index() 492 | df.columns = ['survived', 'p_class', 'count_passenger_id'] 493 | 494 | sql = "SELECT survived, p_class, count(passenger_id) AS count_passenger_id FROM titanic GROUP BY survived, p_class" 495 | query_result = data_source.query(sql) 496 | assert (query_result.columns == df.columns).all() 497 | assert query_result.shape == df.shape 498 | values_left = df.values 499 | values_right = query_result.values 500 | assert (values_left == values_right).all().all() 501 | 502 | sql = "SELECT p_class, count(passenger_id) FROM titanic GROUP BY survived, p_class" 503 | query_result = data_source.query(sql) 504 | values_left = df.drop(columns=['survived']).values 505 | values_right = query_result.values 506 | assert (values_left == values_right).all().all() 507 | 508 | sql = "SELECT count(passenger_id) FROM titanic GROUP BY survived, p_class" 509 | query_result = data_source.query(sql) 510 | values_left = df.drop(columns=['survived', 'p_class']).values.flatten() 511 | values_right = query_result.values 512 | assert (values_left == values_right).all().all() 513 | 514 | def test_inner_join(self, csv_file, data_source): 515 | df = pd.read_csv(csv_file) 516 | merge_df = pd.merge(df, df, how='inner', left_on=['passenger_id'], right_on=['p_class'])[['passenger_id_x', 'p_class_y']] 517 | merge_df.columns = ['passenger_id', 'p_class'] 518 | sqls = ["SELECT passenger_id, p_class FROM titanic AS t1 INNER JOIN titanic AS t2 ON t1.passenger_id = t2.p_class", 519 | "SELECT passenger_id, p_class FROM titanic AS t1 INNER JOIN titanic AS t2 ON t2.p_class = t1.passenger_id"] 520 | for sql in sqls: 521 | query_result = data_source.query(sql) 522 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 523 | values_left = merge_df[['passenger_id', 'p_class']].values 524 | values_right = query_result.values 525 | assert (values_left == values_right).all().all() 526 | 527 | def test_inner_join_no_aliases(self, csv_file, tmpdir): 528 | p = tmpdir.join('titanic2.csv') 529 | content = """passenger_id,survived,p_class,name,sex,age,sib_sp,parch,ticket,fare,cabin,embarked 530 | 1,0,3,"Braund, Mr. Owen Harris",male,22,1,0,A/5 21171,7.25,,S 531 | 2,1,1,"Cumings, Mrs. John Bradley (Florence Briggs Thayer)",female,38,1,0,PC 17599,71.2833,C85,C 532 | 3,1,3,"Heikkinen, Miss. Laina",female,26,0,0,STON/O2. 3101282,7.925,,S 533 | 4,1,1,"Futrelle, Mrs. Jacques Heath (Lily May Peel)",female,35,1,0,113803,53.1,C123,S 534 | 5,0,3,"Allen, Mr. William Henry",male,35,0,0,373450,8.05,,S 535 | 6,0,3,"Moran, Mr. James",male,,0,0,330877,8.4583,,Q 536 | 7,0,1,"McCarthy, Mr. Timothy J",male,54,0,0,17463,51.8625,E46,S 537 | 8,0,3,"Palsson, Master. Gosta Leonard",male,2,3,1,349909,21.075,,S 538 | 9,1,3,"Johnson, Mrs. Oscar W (Elisabeth Vilhelmina Berg)",female,27,0,2,347742,11.1333,,S 539 | """ 540 | p.write_text(content, encoding='utf-8') 541 | 542 | dir_path = csv_file.dirpath() 543 | data_source = DataSource.from_dir(metadata_dir=dir_path, files_dir_path=dir_path) 544 | assert len(data_source.tables) == 2 545 | 546 | df = pd.read_csv(csv_file) 547 | merge_df = pd.merge(df, df, how='inner', left_on=['passenger_id'], right_on=['p_class'])[['passenger_id_x', 'p_class_y']] 548 | merge_df.columns = ['passenger_id', 'p_class'] 549 | sql = "SELECT passenger_id, p_class FROM titanic INNER JOIN titanic2 ON titanic.passenger_id = titanic2.p_class" 550 | query_result = data_source.query(sql) 551 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 552 | values_left = merge_df[['passenger_id', 'p_class']].values 553 | values_right = query_result.values 554 | assert (values_left == values_right).all().all() 555 | 556 | def test_inner_join_col_access(self, csv_file, data_source): 557 | df = pd.read_csv(csv_file) 558 | merge_df = pd.merge(df, df, how='inner', left_on=['passenger_id'], right_on=['p_class'])[['passenger_id_x', 'p_class_y', 'sex_x']] 559 | merge_df.columns = ['passenger_id', 'p_class', 't1.sex'] 560 | sql = "SELECT passenger_id, p_class, t1.sex FROM titanic AS t1 INNER JOIN titanic AS t2 ON t1.passenger_id = t2.p_class" 561 | query_result = data_source.query(sql) 562 | assert list(query_result.columns) == ['passenger_id', 'p_class', 't1.sex'] 563 | values_left = merge_df.values 564 | values_right = query_result.values 565 | assert (values_left == values_right).all().all() 566 | 567 | sql = "SELECT passenger_id, p_class, t1.sex AS sex FROM titanic AS t1 INNER JOIN titanic AS t2 ON t1.passenger_id = t2.p_class" 568 | query_result = data_source.query(sql) 569 | assert list(query_result.columns) == ['passenger_id', 'p_class', 'sex'] 570 | values_left = merge_df.values 571 | values_right = query_result.values 572 | assert (values_left == values_right).all().all() 573 | 574 | def test_left_right_outer_joins(self, csv_file, data_source): 575 | df = pd.read_csv(csv_file) 576 | merge_df = pd.merge(df, df, how='left', left_on=['passenger_id'], right_on=['p_class'])[['passenger_id_x', 'p_class_y']] 577 | merge_df.columns = ['passenger_id', 'p_class'] 578 | sql = "SELECT passenger_id, p_class FROM titanic AS t1 LEFT JOIN titanic AS t2 ON t1.passenger_id = t2.p_class" 579 | query_result = data_source.query(sql) 580 | assert merge_df.shape == query_result.shape 581 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 582 | values_left = merge_df.dropna().values 583 | values_right = query_result.dropna().values 584 | assert (values_left == values_right).all().all() 585 | 586 | merge_df = pd.merge(df, df, how='right', left_on=['passenger_id'], right_on=['p_class'])[ 587 | ['passenger_id_x', 'p_class_y']] 588 | merge_df.columns = ['passenger_id', 'p_class'] 589 | sql = "SELECT passenger_id, p_class FROM titanic AS t1 RIGHT JOIN titanic AS t2 ON t1.passenger_id = t2.p_class" 590 | query_result = data_source.query(sql) 591 | assert merge_df.shape == query_result.shape 592 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 593 | values_left = merge_df.dropna().values 594 | values_right = query_result.dropna().values 595 | assert (values_left == values_right).all().all() 596 | 597 | merge_df = pd.merge(df, df, how='outer', left_on=['passenger_id'], right_on=['p_class'])[ 598 | ['passenger_id_x', 'p_class_y']] 599 | merge_df.columns = ['passenger_id', 'p_class'] 600 | sql = "SELECT passenger_id, p_class FROM titanic AS t1 FULL JOIN titanic AS t2 ON t1.passenger_id = t2.p_class" 601 | query_result = data_source.query(sql) 602 | assert merge_df.shape == query_result.shape 603 | assert list(query_result.columns) == ['passenger_id', 'p_class'] 604 | values_left = merge_df.dropna().values 605 | values_right = query_result.dropna().values 606 | assert (values_left == values_right).all().all() 607 | 608 | def test_subquery_simple(self, csv_file, data_source): 609 | sql = "SELECT * FROM (SELECT * FROM titanic) AS t1" 610 | query_result = data_source.query(sql) 611 | df = pd.read_csv(csv_file) 612 | 613 | assert query_result.shape == df.shape 614 | values_left = df.dropna().values 615 | values_right = query_result.dropna().values 616 | assert (values_left == values_right).all() 617 | 618 | def test_subquery_groupby(self, csv_file, data_source): 619 | sql = "SELECT survived, p_class, count(passenger_id) AS count FROM (SELECT * FROM titanic WHERE survived = 1) AS t1 GROUP BY survived, p_class" 620 | query_result = data_source.query(sql) 621 | 622 | df = pd.read_csv(csv_file) 623 | df = df[df.survived == 1] 624 | df = df.groupby(['survived', 'p_class']).agg({'passenger_id': 'count'}).reset_index() 625 | 626 | assert query_result.shape == df.shape 627 | values_left = df.dropna().values 628 | values_right = query_result.dropna().values 629 | assert (values_left == values_right).all() 630 | 631 | def test_subquery_where(self, csv_file, data_source): 632 | sql = "SELECT survived, p_class, passenger_id FROM titanic WHERE passenger_id IN (SELECT passenger_id FROM titanic WHERE survived = 1)" 633 | query_result = data_source.query(sql) 634 | 635 | df = pd.read_csv(csv_file) 636 | df = df[df.survived == 1] 637 | df = df[['survived', 'p_class', 'passenger_id']] 638 | 639 | assert query_result.shape == df.shape 640 | values_left = df.dropna().values 641 | values_right = query_result.dropna().values 642 | assert (values_left == values_right).all() 643 | 644 | def test_subquery_select(self, csv_file, data_source): 645 | sql = "SELECT survived, (SELECT passenger_id FROM titanic LIMIT 1) AS pid FROM titanic" 646 | query_result = data_source.query(sql) 647 | assert (query_result['pid'] == 1).all() 648 | 649 | def test_show_tables(self, csv_file, data_source): 650 | sql = "SHOW TABLES" 651 | query_result = data_source.query(sql) 652 | assert (query_result.values == np.array([['titanic', str(csv_file)]])).all() 653 | 654 | data_source.drop_table('titanic') 655 | query_result = data_source.query(sql) 656 | assert query_result.empty 657 | 658 | def test_cast(self, csv_file, data_source): 659 | sql = "SELECT CAST (4 AS str) AS result" 660 | query_result = data_source.query(sql) 661 | assert query_result == "4" and isinstance(query_result, str) 662 | 663 | sql = "SELECT CAST (\"4\" AS int) AS result" 664 | query_result = data_source.query(sql) 665 | assert query_result == 4 and isinstance(query_result, np.int64) 666 | 667 | sql = "SELECT CAST (\"4\" AS float) AS result" 668 | query_result = data_source.query(sql) 669 | assert query_result == 4.0 and isinstance(query_result, np.float64) 670 | 671 | def test_count_distinct(self, csv_file, data_source): 672 | sql = "SELECT COUNT(DISTINCT survived) AS uniq_survived FROM titanic" 673 | query_result = data_source.query(sql) 674 | 675 | assert query_result == 2 676 | 677 | def test_large_where_and(self, data_source_googleplay, googleplay_csv): 678 | df = pd.read_csv(googleplay_csv) 679 | 680 | out_df = df[(df.Category == "FAMILY") & (df.Price == '0')][['App', 'Category']] 681 | sql = "SELECT App, Category FROM googleplaystore WHERE Category = \"FAMILY\" AND Price = \"0\"" 682 | query_result = data_source_googleplay.query(sql) 683 | 684 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 685 | 686 | def test_large_not(self, data_source_googleplay, googleplay_csv): 687 | df = pd.read_csv(googleplay_csv) 688 | 689 | out_df = df[~(df.Category == "FAMILY")][['App', 'Category']] 690 | sql = "SELECT App, Category FROM googleplaystore WHERE NOT Category = \"FAMILY\"" 691 | query_result = data_source_googleplay.query(sql) 692 | 693 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 694 | 695 | def test_large_order_by(self, data_source_googleplay, googleplay_csv): 696 | df = pd.read_csv(googleplay_csv) 697 | 698 | out_df = df.sort_values(by='App')[['App', 'Category']] 699 | sql = "SELECT App, Category FROM googleplaystore ORDER BY App" 700 | query_result = data_source_googleplay.query(sql) 701 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 702 | 703 | out_df = df.sort_values(by='App', ascending=False)[['App', 'Category']] 704 | sql = "SELECT App, Category FROM googleplaystore ORDER BY App DESC" 705 | query_result = data_source_googleplay.query(sql) 706 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 707 | 708 | out_df = df.sort_values(by=['App', 'Category'])[['App', 'Category']] 709 | sql = "SELECT App, Category FROM googleplaystore ORDER BY App, Category" 710 | query_result = data_source_googleplay.query(sql) 711 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 712 | 713 | out_df = df.sort_values(by=['App', 'Category'], ascending=[False, False])[['App', 'Category']] 714 | sql = "SELECT App, Category FROM googleplaystore ORDER BY App DESC, Category DESC" 715 | query_result = data_source_googleplay.query(sql) 716 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 717 | 718 | out_df = df.sort_values(by=['App', 'Category'], ascending=[False, True])[['App', 'Category']] 719 | sql = "SELECT App, Category FROM googleplaystore ORDER BY App DESC, Category ASC" 720 | query_result = data_source_googleplay.query(sql) 721 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 722 | 723 | out_df = df.groupby(['Category']).agg({'App': 'count'}).reset_index() 724 | out_df.columns = ['Category', 'count_app'] 725 | out_df = out_df.sort_values(by=['count_app'], ascending=[False])[:10] 726 | sql = "SELECT Category, count(App) AS count_app FROM googleplaystore GROUP BY Category ORDER BY count_app DESC LIMIT 10" 727 | query_result = data_source_googleplay.query(sql) 728 | assert (out_df.dropna().values == query_result.dropna().values).all().all() 729 | 730 | def test_string_concat(self, data_source, csv_file): 731 | sql = "SELECT \"a\" || \"b\"" 732 | query_result = data_source.query(sql) 733 | assert query_result == 'ab' 734 | 735 | sql = "SELECT \"b\" || \"a\"" 736 | query_result = data_source.query(sql) 737 | assert query_result == 'ba' 738 | 739 | df = pd.read_csv(csv_file) 740 | out_series = df['name'] + df['embarked'] 741 | sql = "SELECT name || embarked FROM titanic" 742 | query_result = data_source.query(sql) 743 | assert (query_result.values == out_series.values).all() 744 | 745 | df = pd.read_csv(csv_file) 746 | out_series = df['embarked'] + df['name'] 747 | sql = "SELECT embarked || name FROM titanic" 748 | query_result = data_source.query(sql) 749 | assert (query_result.values == out_series.values).all() 750 | 751 | out_series = df['name'] + 'a' 752 | sql = "SELECT name || \"a\" FROM titanic" 753 | query_result = data_source.query(sql) 754 | assert (query_result.values == out_series.values).all() 755 | 756 | out_series = "a" + df['name'] 757 | sql = "SELECT \"a\" || name FROM titanic" 758 | query_result = data_source.query(sql) 759 | assert (query_result.values == out_series.values).all() 760 | 761 | def test_string_upper_lower(self, data_source, csv_file): 762 | sql = "SELECT upper(\"a\")" 763 | query_result = data_source.query(sql) 764 | assert query_result == 'A' 765 | 766 | sql = "SELECT lower(\"A\")" 767 | query_result = data_source.query(sql) 768 | assert query_result == 'a' 769 | 770 | df = pd.read_csv(csv_file) 771 | out_series = df['name'].apply(lambda x: x.upper()) 772 | sql = "SELECT upper(name) FROM titanic" 773 | query_result = data_source.query(sql) 774 | assert (query_result.values == out_series.values).all() 775 | 776 | out_series = df['name'].apply(lambda x: x.lower()) 777 | sql = "SELECT lower(name) FROM titanic" 778 | query_result = data_source.query(sql) 779 | assert (query_result.values == out_series.values).all() 780 | 781 | def test_string_like(self, data_source, csv_file): 782 | sql = "SELECT \"a\" LIKE \".*\" " 783 | query_result = data_source.query(sql) 784 | assert query_result == True 785 | 786 | df = pd.read_csv(csv_file) 787 | sql = "SELECT name FROM titanic WHERE name LIKE \".*\"" 788 | query_result = data_source.query(sql) 789 | assert (query_result.values == df['name'].values).all() 790 | 791 | sql = "SELECT name FROM titanic WHERE name LIKE \".*Owen.*\"" 792 | query_result = data_source.query(sql) 793 | assert query_result == 'Braund, Mr. Owen Harris' 794 | 795 | def test_in(self, data_source, csv_file): 796 | sql = "SELECT name FROM titanic WHERE name IN (\"Braund, Mr. Owen Harris\", \"Cumings, Mrs. John Bradley (Florence Briggs Thayer)\")" 797 | query_result = data_source.query(sql) 798 | assert (query_result.values == np.array(['Braund, Mr. Owen Harris', 'Cumings, Mrs. John Bradley (Florence Briggs Thayer)'])).all() 799 | 800 | def test_custom_function_select(self, data_source, csv_file): 801 | def custom(x): 802 | return x + '_custom_addition' 803 | 804 | data_source.register_function('custom', custom) 805 | sql = "SELECT custom(\"a\")" 806 | query_result = data_source.query(sql) 807 | assert query_result == 'a_custom_addition' 808 | 809 | df = pd.read_csv(csv_file) 810 | sql = "SELECT custom(name) FROM titanic" 811 | query_result = data_source.query(sql) 812 | assert (query_result.values == df.name.values + '_custom_addition').all() 813 | 814 | def test_custom_function_where(self, data_source, csv_file): 815 | df = pd.read_csv(csv_file) 816 | 817 | def did_survive(survived): 818 | return survived == 1 819 | 820 | data_source.register_function('did_survive', did_survive) 821 | sql = "SELECT passenger_id FROM titanic WHERE did_survive(survived)" 822 | query_result = data_source.query(sql) 823 | assert (query_result.values == df[df.survived == 1]['passenger_id'].values).all() 824 | 825 | def test_is_null(self, data_source_googleplay, googleplay_csv): 826 | df = pd.read_csv(googleplay_csv) 827 | 828 | out_df = df[df.Rating.isnull()]['App'] 829 | sql = "SELECT App FROM googleplaystore WHERE Rating IS NULL" 830 | query_result = data_source_googleplay.query(sql) 831 | assert (out_df.dropna().values == query_result.dropna().values).all() 832 | 833 | out_df = df[~df.Rating.isnull()]['App'] 834 | sql = "SELECT App FROM googleplaystore WHERE Rating IS NOT NULL" 835 | query_result = data_source_googleplay.query(sql) 836 | assert (out_df.dropna().values == query_result.dropna().values).all() 837 | 838 | def test_is_true(self, data_source_googleplay, googleplay_csv): 839 | df = pd.read_csv(googleplay_csv) 840 | 841 | out_df = df[df.Price == '0']['App'] 842 | sql = "SELECT App FROM googleplaystore WHERE (Price = '0') IS TRUE" 843 | query_result = data_source_googleplay.query(sql) 844 | assert (out_df.dropna().values == query_result.dropna().values).all() 845 | 846 | out_df = df[df.Price != '0']['App'] 847 | sql = "SELECT App FROM googleplaystore WHERE (Price = '0') IS NOT TRUE" 848 | query_result = data_source_googleplay.query(sql) 849 | assert (out_df.dropna().values == query_result.dropna().values).all() 850 | 851 | def test_is_false(self, data_source_googleplay, googleplay_csv): 852 | df = pd.read_csv(googleplay_csv) 853 | 854 | out_df = df[df.Price != '0']['App'] 855 | sql = "SELECT App FROM googleplaystore WHERE (Price = '0') IS FALSE" 856 | query_result = data_source_googleplay.query(sql) 857 | assert (out_df.dropna().values == query_result.dropna().values).all() 858 | 859 | out_df = df[df.Price == '0']['App'] 860 | sql = "SELECT App FROM googleplaystore WHERE (Price = '0') IS NOT FALSE" 861 | query_result = data_source_googleplay.query(sql) 862 | assert (out_df.dropna().values == query_result.dropna().values).all() 863 | 864 | def test_subquery_alias(self, googleplay_csv, data_source_googleplay): 865 | df = pd.read_csv(googleplay_csv) 866 | out_df = df.App 867 | sql = "SELECT tab_alias.app FROM (SELECT App as app FROM googleplaystore) AS tab_alias" 868 | query_result = data_source_googleplay.query(sql) 869 | assert query_result.name == 'tab_alias.app' 870 | assert (out_df.dropna().values == query_result.dropna().values).all() 871 | 872 | def test_subquery_alias_case_insensitive(self, root_directory, googleplay_csv, tmpdir): 873 | from dfsql import DataSource 874 | dir_path = os.path.join(root_directory, 'tests') 875 | data_source_googleplay = DataSource.from_dir(metadata_dir=str(tmpdir), 876 | files_dir_path=dir_path, 877 | case_sensitive=False) 878 | 879 | df = pd.read_csv(googleplay_csv) 880 | out_df = df.App 881 | sql = "SELECT tab_alias.app FROM (SELECT App as APP FROM googleplaystore) AS tab_alias" 882 | query_result = data_source_googleplay.query(sql) 883 | assert query_result.name == 'tab_alias.app' 884 | assert (out_df.dropna().values == query_result.dropna().values).all() 885 | 886 | def test_multi_word_identifier(self, googleplay_csv, data_source_googleplay): 887 | df = pd.read_csv(googleplay_csv) 888 | out_df = df[['App', 'Content Rating']] 889 | sql = "SELECT App, `Content Rating` FROM googleplaystore" 890 | query_result = data_source_googleplay.query(sql) 891 | assert (query_result.columns == out_df.columns).all() 892 | assert (out_df.dropna().values == query_result.dropna().values).all() 893 | --------------------------------------------------------------------------------