├── MANIFEST.in ├── panoptes_client ├── tests │ ├── __init__.py │ ├── test_set_member_subject.py │ ├── test_linkresolver.py │ ├── test_project.py │ ├── test_subject_set.py │ ├── test_inaturalist.py │ ├── test_bearer_expiry.py │ ├── test_subject.py │ ├── test_http_retries.py │ ├── test_linkcollection.py │ └── test_workflow.py ├── project_role.py ├── aggregation.py ├── set_member_subject.py ├── collection_role.py ├── __init__.py ├── subject_workflow_status.py ├── classification.py ├── organization.py ├── workflow_version.py ├── inaturalist.py ├── utils.py ├── user.py ├── collection.py ├── project_preferences.py ├── subject_set.py ├── exportable.py ├── project.py ├── caesar.py ├── subject.py └── workflow.py ├── .hound.yml ├── .flake8.ini ├── Dockerfile.stable ├── Dockerfile.stable2 ├── docs ├── modules.rst ├── index.rst ├── panoptes_client.rst ├── Makefile ├── conf.py └── user_guide.rst ├── .github ├── dependabot.yml └── workflows │ ├── run-tests.yml │ ├── publish-to-pypi.yml │ ├── publish-to-test-pypi.yml │ └── codeql-analysis.yml ├── Dockerfile.dev ├── Dockerfile.dev2 ├── .readthedocs.yaml ├── setup.py ├── docker-compose.yml ├── .gitignore ├── CONTRIBUTING.md ├── README.md ├── CHANGELOG.md └── LICENSE /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /panoptes_client/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.hound.yml: -------------------------------------------------------------------------------- 1 | python: 2 | enabled: true 3 | config_file: .flake8.ini 4 | -------------------------------------------------------------------------------- /.flake8.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = BLK100,E402,E501,E722,E741,W503,E128,B006,B001 3 | max-line-length = 120 -------------------------------------------------------------------------------- /Dockerfile.stable: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | RUN apk --no-cache add libmagic 4 | 5 | RUN pip install panoptes-client 6 | -------------------------------------------------------------------------------- /Dockerfile.stable2: -------------------------------------------------------------------------------- 1 | FROM python:2.7-alpine 2 | 3 | RUN apk --no-cache add libmagic 4 | 5 | RUN pip install panoptes-client 6 | -------------------------------------------------------------------------------- /docs/modules.rst: -------------------------------------------------------------------------------- 1 | panoptes_client 2 | =============== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | panoptes_client 8 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "10:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /Dockerfile.dev: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | RUN apk --no-cache add libmagic 4 | 5 | WORKDIR /usr/src/panoptes-python-client 6 | 7 | COPY setup.py . 8 | 9 | RUN pip install .[testing,docs] 10 | 11 | COPY . . 12 | 13 | RUN pip install -U .[testing,docs] 14 | -------------------------------------------------------------------------------- /Dockerfile.dev2: -------------------------------------------------------------------------------- 1 | FROM python:2.7-alpine 2 | 3 | RUN apk --no-cache add libmagic 4 | 5 | WORKDIR /usr/src/panoptes-python-client 6 | 7 | COPY setup.py . 8 | 9 | RUN pip install .[testing,docs] 10 | 11 | COPY . . 12 | 13 | RUN pip install -U .[testing,docs] 14 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3" 7 | 8 | python: 9 | install: 10 | - method: pip 11 | path: . 12 | extra_requirements: 13 | - docs 14 | sphinx: 15 | configuration: docs/conf.py 16 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_set_member_subject.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from panoptes_client.set_member_subject import SetMemberSubject 4 | 5 | 6 | class TestSetMemberSubject(unittest.TestCase): 7 | def test_find_id(self): 8 | sms = SetMemberSubject.find(1000) 9 | self.assertEqual(sms.id, '1000') 10 | -------------------------------------------------------------------------------- /panoptes_client/project_role.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | from panoptes_client.panoptes import PanoptesObject, LinkResolver 4 | 5 | 6 | class ProjectRole(PanoptesObject): 7 | _api_slug = 'project_roles' 8 | _link_slug = 'project_roles' 9 | _edit_attributes = () 10 | 11 | LinkResolver.register(ProjectRole) 12 | -------------------------------------------------------------------------------- /panoptes_client/aggregation.py: -------------------------------------------------------------------------------- 1 | from panoptes_client.panoptes import PanoptesObject 2 | 3 | 4 | class Aggregation(PanoptesObject): 5 | _api_slug = 'aggregations' 6 | _link_slug = 'aggregations' 7 | _edit_attributes = ( 8 | { 9 | 'links': ( 10 | 'workflow', 11 | 'user', 12 | ) 13 | }, 14 | ) 15 | -------------------------------------------------------------------------------- /panoptes_client/set_member_subject.py: -------------------------------------------------------------------------------- 1 | from panoptes_client.panoptes import PanoptesObject, LinkResolver 2 | 3 | 4 | class SetMemberSubject(PanoptesObject): 5 | _api_slug = 'set_member_subjects' 6 | _link_slug = 'set_member_subjects' 7 | _edit_attributes = () 8 | 9 | LinkResolver.register(SetMemberSubject) 10 | LinkResolver.register(SetMemberSubject, 'set_member_subject') 11 | -------------------------------------------------------------------------------- /panoptes_client/collection_role.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | from panoptes_client.panoptes import PanoptesObject, LinkResolver 4 | 5 | 6 | class CollectionRole(PanoptesObject): 7 | _api_slug = 'collection_roles' 8 | _link_slug = 'collection_roles' 9 | _edit_attributes = ( 10 | 'roles', 11 | { 12 | 'links': ( 13 | 'collection', 14 | 'user', 15 | ), 16 | }, 17 | ) 18 | 19 | 20 | LinkResolver.register(CollectionRole) 21 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_linkresolver.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import unittest 4 | import sys 5 | 6 | if sys.version_info <= (3, 0): 7 | from mock import Mock 8 | else: 9 | from unittest.mock import Mock 10 | 11 | from panoptes_client.panoptes import LinkResolver 12 | 13 | 14 | class TestLinkResolver(unittest.TestCase): 15 | def test_set_new_link(self): 16 | parent = Mock() 17 | parent.raw = {'links': {}} 18 | 19 | target = Mock() 20 | 21 | resolver = LinkResolver(parent) 22 | resolver.newlink = target 23 | self.assertEqual(parent.raw['links'].get('newlink', None), target) 24 | -------------------------------------------------------------------------------- /.github/workflows/run-tests.yml: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | push: 7 | branches: 8 | - master 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: Set up Python 3.9 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: '3.9' 20 | cache: 'pip' # caching pip dependencies 21 | cache-dependency-path: setup.py 22 | - name: Install dependencies 23 | run: | 24 | pip install -U pip 25 | pip install -U .[testing,docs] 26 | - name: Run tests 27 | run: python -m unittest discover 28 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPi 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | build-and-publish: 7 | name: Build python package and publish to PyPi 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@master 11 | - name: Set up Python 3.9 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: 3.9 15 | - name: Install dependencies 16 | run: | 17 | python -m pip install --upgrade pip 18 | pip install setuptools wheel 19 | - name: Build 20 | run: python setup.py sdist bdist_wheel 21 | - name: Publish to PyPi 22 | uses: pypa/gh-action-pypi-publish@release/v1 23 | with: 24 | user: __token__ 25 | password: ${{ secrets.PYPI_API_TOKEN }} -------------------------------------------------------------------------------- /panoptes_client/tests/test_project.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import unittest 4 | 5 | from panoptes_client import Project 6 | from panoptes_client.panoptes import PanoptesAPIException 7 | 8 | 9 | class TestProject(unittest.TestCase): 10 | def test_find_id(self): 11 | p = Project.find(1) 12 | self.assertEqual(p.id, '1') 13 | 14 | def test_find_slug(self): 15 | p = Project.find(slug='zooniverse/snapshot-supernova') 16 | self.assertEqual(p.id, '1') 17 | 18 | def test_find_unknown_id(self): 19 | p = Project.find(0) 20 | self.assertEqual(p, None) 21 | 22 | def test_find_unknown_slug(self): 23 | with self.assertRaises(PanoptesAPIException): 24 | Project.find(slug='invalid_slug') 25 | -------------------------------------------------------------------------------- /panoptes_client/__init__.py: -------------------------------------------------------------------------------- 1 | from panoptes_client.classification import Classification 2 | from panoptes_client.collection import Collection 3 | from panoptes_client.collection_role import CollectionRole 4 | from panoptes_client.organization import Organization 5 | from panoptes_client.panoptes import Panoptes 6 | from panoptes_client.project import Project 7 | from panoptes_client.project_preferences import ProjectPreferences 8 | from panoptes_client.project_role import ProjectRole 9 | from panoptes_client.subject import Subject 10 | from panoptes_client.subject_set import SubjectSet 11 | from panoptes_client.user import User 12 | from panoptes_client.workflow import Workflow 13 | from panoptes_client.subject_workflow_status import SubjectWorkflowStatus 14 | from panoptes_client.caesar import Caesar 15 | from panoptes_client.inaturalist import Inaturalist 16 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-test-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish to Test PyPi 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | build-and-publish: 7 | name: Build python package and publish to Test PyPi 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@master 11 | - name: Set up Python 3.9 12 | uses: actions/setup-python@v2 13 | with: 14 | python-version: 3.9 15 | - name: Install dependencies 16 | run: | 17 | python -m pip install --upgrade pip 18 | pip install setuptools wheel 19 | - name: Build 20 | run: python setup.py sdist bdist_wheel 21 | - name: Publish to Test PyPi 22 | uses: pypa/gh-action-pypi-publish@release/v1 23 | with: 24 | user: __token__ 25 | password: ${{ secrets.TEST_PYPI_API_TOKEN }} 26 | repository_url: https://test.pypi.org/legacy/ -------------------------------------------------------------------------------- /panoptes_client/subject_workflow_status.py: -------------------------------------------------------------------------------- 1 | from panoptes_client.panoptes import PanoptesObject 2 | 3 | 4 | class SubjectWorkflowStatus(PanoptesObject): 5 | """ 6 | Retrieve SubjectWorkflowStatus responses from Panoptes i.e. the retirement 7 | status (current state, retirement date, retirement reason) of a 8 | subject/workflow pair. 9 | 10 | Example use: 11 | 12 | Get the status of a given subject: 13 | subject_workflow_status = next( 14 | SubjectWorkflowStatus.where(subject_id='30089908') 15 | ) 16 | 17 | The .where(kwargs) method works with: 18 | - id (i.e. the id of the SubjectWorkflowStatus, which is *not* the same as 19 | the subject_id) 20 | - subject_id 21 | - workflow_id 22 | 23 | Remember that one subject may be classified on many workflows, and hence 24 | may have many SubjectWorkflowStatus' (one per subject/workflow pair). 25 | """ 26 | _api_slug = 'subject_workflow_statuses' 27 | _edit_attributes = {} 28 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Panoptes Client documentation master file, created by 2 | sphinx-quickstart on Thu Oct 6 15:06:45 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Panoptes Client's documentation! 7 | =========================================== 8 | 9 | This package is the Python SDK for `Panoptes 10 | `_, the platform behind the 11 | `Zooniverse `_. This module is intended to allow 12 | programmatic management of projects, providing high level access to the API for 13 | common project management tasks. 14 | 15 | You can find the `source code for this package on GitHub 16 | `_. We welcome bug 17 | reports, feature requests, and pull requests. 18 | 19 | User guide 20 | ---------- 21 | 22 | .. toctree:: 23 | :maxdepth: 4 24 | 25 | user_guide 26 | 27 | Module reference 28 | ---------------- 29 | 30 | .. toctree:: 31 | :maxdepth: 2 32 | 33 | panoptes_client 34 | 35 | 36 | Indices and tables 37 | ================== 38 | 39 | * :ref:`genindex` 40 | * :ref:`modindex` 41 | * :ref:`search` 42 | 43 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | from pathlib import Path 3 | this_directory = Path(__file__).parent 4 | long_description = (this_directory / "README.md").read_text() 5 | 6 | setup( 7 | name='panoptes_client', 8 | url='https://github.com/zooniverse/panoptes-python-client', 9 | author='Adam McMaster / Zooniverse', 10 | author_email='contact@zooniverse.org', 11 | description=( 12 | 'This package is the Python SDK for Panoptes, the platform behind the Zooniverse. This module is intended to allow programmatic management of projects, providing high level access to the API for common project management tasks.' 13 | ), 14 | long_description=long_description, 15 | long_description_content_type='text/markdown', 16 | version='1.7.1', 17 | packages=find_packages(), 18 | include_package_data=True, 19 | install_requires=[ 20 | 'requests>=2.4.2', 21 | 'future>=0.16', 22 | 'python-magic>=0.4', 23 | 'redo>=1.7', 24 | 'six>=1.9', 25 | ], 26 | extras_require={ 27 | 'testing': [ 28 | 'mock>=2.0', 29 | ], 30 | 'docs': [ 31 | 'sphinx', 32 | ], 33 | ':python_version == "2.7"': ['futures'], 34 | } 35 | ) 36 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | dev: 5 | build: 6 | context: ./ 7 | dockerfile: Dockerfile.dev 8 | volumes: 9 | - ${HOME}/.panoptes/:/root/.panoptes/ 10 | - ${HOME}:${HOME} 11 | environment: 12 | - PANOPTES_DEBUG=true 13 | 14 | stable: 15 | build: 16 | context: ./ 17 | dockerfile: Dockerfile.stable 18 | volumes: 19 | - ${HOME}/.panoptes/:/root/.panoptes/ 20 | - ${HOME}:${HOME} 21 | 22 | tests: 23 | build: 24 | context: ./ 25 | dockerfile: Dockerfile.dev 26 | volumes: 27 | - ${HOME}/.panoptes/:/root/.panoptes/ 28 | - ${HOME}:${HOME} 29 | command: python -m unittest discover 30 | 31 | dev2: 32 | build: 33 | context: ./ 34 | dockerfile: Dockerfile.dev2 35 | volumes: 36 | - ${HOME}/.panoptes/:/root/.panoptes/ 37 | - ${HOME}:${HOME} 38 | environment: 39 | - PANOPTES_DEBUG=true 40 | 41 | tests2: 42 | build: 43 | context: ./ 44 | dockerfile: Dockerfile.dev2 45 | volumes: 46 | - ${HOME}/.panoptes/:/root/.panoptes/ 47 | - ${HOME}:${HOME} 48 | command: python -m unittest discover 49 | 50 | stable2: 51 | build: 52 | context: ./ 53 | dockerfile: Dockerfile.stable2 54 | volumes: 55 | - ${HOME}/.panoptes/:/root/.panoptes/ 56 | - ${HOME}:${HOME} 57 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_subject_set.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import unittest 4 | import sys 5 | 6 | if sys.version_info <= (3, 0): 7 | from mock import patch, Mock 8 | else: 9 | from unittest.mock import patch, Mock 10 | 11 | from panoptes_client.subject_set import SubjectSet 12 | 13 | 14 | class TestSubjectSet(unittest.TestCase): 15 | def test_create(self): 16 | with patch('panoptes_client.panoptes.Panoptes') as pc: 17 | pc.client().post = Mock(return_value=( 18 | { 19 | 'subject_sets': [{ 20 | 'id': 0, 21 | 'display_name': '', 22 | }], 23 | }, 24 | '', 25 | )) 26 | subject_set = SubjectSet() 27 | subject_set.links.project = 1234 28 | subject_set.display_name = 'Name' 29 | subject_set.save() 30 | 31 | pc.client().post.assert_called_with( 32 | '/subject_sets', 33 | json={ 34 | 'subject_sets': { 35 | 'display_name': 'Name', 36 | 'metadata': {}, 37 | 'links': { 38 | 'project': 1234, 39 | } 40 | } 41 | }, 42 | etag=None, 43 | ) 44 | -------------------------------------------------------------------------------- /panoptes_client/classification.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | from panoptes_client.panoptes import LinkResolver, PanoptesObject 4 | 5 | 6 | class Classification(PanoptesObject): 7 | _api_slug = 'classifications' 8 | _link_slug = 'classification' 9 | _edit_attributes = ( ) 10 | 11 | @classmethod 12 | def where(cls, **kwargs): 13 | """ 14 | where(scope=None, **kwargs) 15 | 16 | Like :py:meth:`.PanoptesObject.where`, but also allows setting the 17 | query scope. 18 | 19 | - **scope** can be any of the values given in the `Classification 20 | Collection API documentation `_ 21 | without the leading slash. 22 | 23 | Examples:: 24 | 25 | my_classifications = Classification.where() 26 | my_proj_123_classifications = Classification.where(project_id=123) 27 | 28 | all_proj_123_classifications = Classification.where( 29 | scope='project', 30 | project_id=123, 31 | ) 32 | """ 33 | 34 | scope = kwargs.pop('scope', None) 35 | if not scope: 36 | return super(Classification, cls).where(**kwargs) 37 | return cls.paginated_results(*cls.http_get(scope, params=kwargs)) 38 | 39 | LinkResolver.register(Classification) 40 | -------------------------------------------------------------------------------- /panoptes_client/organization.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | from panoptes_client.panoptes import ( 4 | LinkResolver, 5 | PanoptesObject, 6 | ) 7 | from panoptes_client.project import Project 8 | from panoptes_client.utils import batchable 9 | 10 | 11 | class Organization(PanoptesObject): 12 | _api_slug = 'organizations' 13 | _link_slug = 'organization' 14 | _edit_attributes = ( 15 | 'display_name', 16 | 'description', 17 | 'tags', 18 | 'introduction', 19 | 'primary_language', 20 | ) 21 | 22 | def add(self, projects): 23 | """ 24 | A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to:: 25 | 26 | organization.links.add(projects) 27 | """ 28 | 29 | return self.links.projects.add(projects) 30 | 31 | def remove(self, projects): 32 | """ 33 | A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to:: 34 | 35 | organization.links.remove(projects) 36 | """ 37 | 38 | return self.links.projects.remove(projects) 39 | 40 | def __contains__(self, project): 41 | """ 42 | A wrapper around :py:meth:`.LinkCollection.__contains__`. Equivalent 43 | to:: 44 | 45 | project in organization.links.project 46 | """ 47 | 48 | return project in self 49 | 50 | 51 | LinkResolver.register(Organization) 52 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_inaturalist.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import unittest 4 | import sys 5 | 6 | if sys.version_info <= (3, 0): 7 | from mock import patch, Mock 8 | else: 9 | from unittest.mock import patch, Mock 10 | 11 | from panoptes_client.inaturalist import Inaturalist 12 | 13 | 14 | class TestInaturalist(unittest.TestCase): 15 | 16 | def test_inat_import(self): 17 | with patch('panoptes_client.panoptes.Panoptes.client') as pc: 18 | pc().post = Mock(return_value=200) 19 | Inaturalist.inat_import(16462, 4) 20 | 21 | pc().post.assert_called_with( 22 | '/inaturalist/import', 23 | json={ 24 | 'taxon_id': 16462, 25 | 'subject_set_id': 4, 26 | 'updated_since': None 27 | } 28 | ) 29 | 30 | def test_inat_import_updated_since(self): 31 | with patch('panoptes_client.panoptes.Panoptes.client') as pc: 32 | pc().post = Mock(return_value=200) 33 | Inaturalist.inat_import(16462, 4, '2022-10-31') 34 | 35 | pc().post.assert_called_with( 36 | '/inaturalist/import', 37 | json={ 38 | 'taxon_id': 16462, 39 | 'subject_set_id': 4, 40 | 'updated_since': '2022-10-31' 41 | } 42 | ) 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | .vscode 92 | .DS_Store 93 | -------------------------------------------------------------------------------- /panoptes_client/workflow_version.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | from panoptes_client.panoptes import ( 4 | Panoptes, 5 | PanoptesAPIException, 6 | PanoptesObject, 7 | ) 8 | from panoptes_client.workflow import Workflow 9 | 10 | 11 | class WorkflowVersion(PanoptesObject): 12 | _api_slug = 'versions' 13 | _edit_attributes = tuple() 14 | 15 | @classmethod 16 | def http_get(cls, path, params={}, headers={}): 17 | workflow = params.pop('workflow') 18 | return Panoptes.client().get( 19 | Workflow.url(workflow.id) + cls.url(path), 20 | params, 21 | headers, 22 | ) 23 | 24 | @classmethod 25 | def find(cls, _id, workflow): 26 | """ 27 | Like :py:meth:`.PanoptesObject.find` but also allows lookup by 28 | workflow. 29 | 30 | - **workflow** must be a :py:class:`.Workflow` instance. 31 | """ 32 | 33 | try: 34 | return cls.where(id=_id, workflow=workflow).next() 35 | except StopIteration: 36 | raise PanoptesAPIException( 37 | "Could not find {} with id='{}'".format(cls.__name__, _id) 38 | ) 39 | 40 | def save(self): 41 | """ 42 | Not implemented for this class. It is not possible to modify workflow 43 | versions once they are created. 44 | """ 45 | 46 | raise NotImplementedError( 47 | 'It is not possible to manually create workflow versions. ' 48 | 'Modify the workflow instead.' 49 | ) 50 | 51 | @property 52 | def workflow(self): 53 | """ 54 | The :py:class:`.Workflow` to which this version refers. 55 | """ 56 | 57 | return self.links.item 58 | -------------------------------------------------------------------------------- /panoptes_client/inaturalist.py: -------------------------------------------------------------------------------- 1 | from panoptes_client.panoptes import Panoptes 2 | 3 | 4 | class Inaturalist(object): 5 | """ 6 | The class that interacts with the Panoptes' iNaturalist functionality. 7 | Currently, this includes a single route that allows the importing of 8 | iNaturalist Observations as Zooniverse Subjects. 9 | """ 10 | 11 | def inat_import( 12 | taxon_id, 13 | subject_set_id, 14 | updated_since=None 15 | ): 16 | """ 17 | Begins an import of iNaturalist Observations as Zooniverse Subjects. 18 | Response is a 200 if Panoptes begins the import successfully. 19 | Requires owner or collaborator access to the subject set's linked project. 20 | Takes three arguments: 21 | taxon_id: the iNat taxon ID of a particular species 22 | subject_set_id: the Zoo subject set id subjects should be imported into. 23 | Updated observations will upsert their respective subjects. 24 | updated_since: a date range limiter on the iNat Observations query. 25 | Warning: defaults to None and will import ALL Observations 26 | by default. This will likely be a lot and take a while. 27 | Examples:: 28 | # Import gray squirrel observations updated during or after Halloween 2022 to subject set id 3: 29 | Inaturalist.inat_import(46017, 3, '2022-10-31') 30 | 31 | # Import all royal flycatcher observations to subject set id 4: 32 | Inaturalist.inat_import(16462, 4) 33 | """ 34 | 35 | return Panoptes.client().post( 36 | f'/inaturalist/import', 37 | json={ 38 | 'taxon_id': taxon_id, 39 | 'subject_set_id': subject_set_id, 40 | 'updated_since': updated_since 41 | } 42 | ) 43 | -------------------------------------------------------------------------------- /panoptes_client/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import range 3 | 4 | import functools 5 | 6 | 7 | ITERABLE_TYPES = ( 8 | list, 9 | set, 10 | tuple, 11 | ) 12 | 13 | MISSING_POSITIONAL_ERR = 'Required positional argument (pos 1) not found' 14 | 15 | try: 16 | from numpy import ndarray 17 | ITERABLE_TYPES = ITERABLE_TYPES + (ndarray,) 18 | except ImportError: 19 | pass 20 | 21 | 22 | def isiterable(v): 23 | return isinstance(v, ITERABLE_TYPES) 24 | 25 | 26 | def split(to_batch, batch_size): 27 | if type(to_batch) == set: 28 | to_batch = tuple(to_batch) 29 | for batch in [ 30 | to_batch[i:i + batch_size] 31 | for i in range(0, len(to_batch), batch_size) 32 | ]: 33 | yield batch 34 | 35 | 36 | def batchable(func=None, batch_size=100): 37 | @functools.wraps(func) 38 | def do_batch(*args, **kwargs): 39 | if len(args) <= 1: 40 | raise TypeError(MISSING_POSITIONAL_ERR) 41 | _batch_size = kwargs.pop('batch_size', batch_size) 42 | 43 | _self = args[0] 44 | to_batch = args[1] 45 | args = args[2:] 46 | if not isiterable(to_batch): 47 | to_batch = [to_batch] 48 | 49 | if isinstance(to_batch, set): 50 | to_batch = list(to_batch) 51 | 52 | for batch in split(to_batch, _batch_size): 53 | if _self is None: 54 | func(batch, *args, **kwargs) 55 | else: 56 | func(_self, batch, *args, **kwargs) 57 | 58 | # This avoids us having to call batchable wherever it's used, so we can 59 | # just write: 60 | # @batchable 61 | # def func(self, ...): 62 | # 63 | # Rather than: 64 | # @batchable() 65 | # def func(self, ...): 66 | # 67 | # While still allowing this: 68 | # @batchable(batch_size=10) 69 | # def func(self, ...): 70 | if func is None: 71 | return functools.partial(batchable, batch_size=batch_size) 72 | 73 | return do_batch 74 | -------------------------------------------------------------------------------- /panoptes_client/user.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | from panoptes_client.panoptes import Panoptes, PanoptesObject, LinkResolver 4 | from panoptes_client.utils import isiterable, split 5 | 6 | BATCH_SIZE = 50 7 | 8 | class User(PanoptesObject): 9 | _api_slug = 'users' 10 | _link_slug = 'users' 11 | _edit_attributes = ( 12 | 'valid_email', 13 | ) 14 | 15 | @classmethod 16 | def where(cls, **kwargs): 17 | email = kwargs.get('email') 18 | login = kwargs.get('login') 19 | 20 | if email and login: 21 | raise ValueError( 22 | 'Queries are supported on at most ONE of email and login' 23 | ) 24 | 25 | # This is a workaround for 26 | # https://github.com/zooniverse/Panoptes/issues/2733 27 | kwargs['page_size'] = BATCH_SIZE 28 | 29 | if email: 30 | if not isiterable(email): 31 | email = [email] 32 | 33 | for batch in split(email, BATCH_SIZE): 34 | kwargs['email'] = ",".join(batch) 35 | for user in super(User, cls).where(**kwargs): 36 | yield user 37 | 38 | elif login: 39 | if not isiterable(login): 40 | login = [login] 41 | 42 | for batch in split(login, BATCH_SIZE): 43 | kwargs['login'] = ",".join(batch) 44 | for user in super(User, cls).where(**kwargs): 45 | yield user 46 | 47 | else: 48 | for user in super(User, cls).where(**kwargs): 49 | yield user 50 | 51 | @property 52 | def avatar(self): 53 | """ 54 | A dict containing metadata about the user's avatar. 55 | """ 56 | 57 | return User.http_get('{}/avatar'.format(self.id))[0] 58 | 59 | def me(): 60 | """ 61 | Instantiate User for logged-in user account. 62 | """ 63 | 64 | return User.find(Panoptes.client().logged_in_user_id) 65 | 66 | LinkResolver.register(User) 67 | LinkResolver.register(User, 'owner') 68 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We welcome pull requests from anyone, so if you have something you'd like to 4 | contribute or an idea for improving this project, that's great! Changes should 5 | generally fit into one of the following categories: 6 | 7 | - Bug fixes 8 | - Implementing additional Panoptes API functionality (there's still a lot to do 9 | here!) 10 | - Improvements/enhancements which will be generally useful to many people who 11 | use the API client. 12 | 13 | If you're unsure about whether your changes would be suitable, please feel free 14 | to open an issue to discuss them _before_ spending too much time implementing 15 | them. It's best to start talking about how (or if) you should do something 16 | early, before a lot of work goes into it. 17 | 18 | ## Getting started 19 | 20 | The first thing you should do is fork this repo and clone your fork to your 21 | local computer. Then create a feature branch for your changes (create a separate 22 | branch for each separate contribution, don't lump unrelated changes together). 23 | 24 | I'd **strongly** recommend using Docker Compose to test your development 25 | version: 26 | 27 | ``` 28 | $ docker-compose build dev 29 | $ docker-compose run dev --help 30 | ``` 31 | 32 | When you're ready, push your changes to a branch in your fork and open a pull 33 | request. After opening the PR, you may get some comments from Hound, which is an 34 | automated service which checks coding style and highlights common mistakes. 35 | Please take note of what it says and make any changes to your code as needed. 36 | 37 | ## Releasing new packages 38 | 39 | If you have access to publish new releases on PyPI, this is a general outline of 40 | the process: 41 | 42 | - Bump the version number in setup.py 43 | - Update CHANGELOG.md 44 | - Update README.md if needed 45 | - Build and upload a new package: 46 | 47 | ``` 48 | python setup.py sdist 49 | twine upload -s dist/panoptes_client-* 50 | git tag 51 | git push --tags 52 | ``` 53 | 54 | Note that you'll need to have a GPG key set up so that `twine` can sign the 55 | package. You should also make sure that your public key is published in the key 56 | servers so that people can verify the signature. 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Panoptes Client 2 | 3 | This package is the Python SDK for 4 | [Panoptes](https://github.com/zooniverse/Panoptes), the platform behind the 5 | [Zooniverse](https://www.zooniverse.org/). This module is intended to allow 6 | programmatic management of projects, providing high level access to the API for 7 | common project management tasks. 8 | 9 | [Full documentation is available at Read the 10 | Docs](http://panoptes-python-client.readthedocs.io/). 11 | 12 | ## Installation 13 | 14 | Install latest stable release: 15 | 16 | ``` 17 | $ pip install panoptes-client 18 | ``` 19 | 20 | Or for development or testing, you can install the development version directly 21 | from GitHub: 22 | 23 | ``` 24 | $ pip install -U git+https://github.com/zooniverse/panoptes-python-client.git 25 | ``` 26 | 27 | Upgrade an existing installation: 28 | 29 | ``` 30 | $ pip install -U panoptes-client 31 | ``` 32 | 33 | The Panoptes Client is supported on all versions of Python 2 and 3, from Python 34 | 2.7 onwards. 35 | 36 | ## Usage Examples 37 | 38 | Create a project: 39 | 40 | ```python 41 | from panoptes_client import Panoptes, Project 42 | 43 | Panoptes.connect(username='example', password='example') 44 | 45 | new_project = Project() 46 | new_project.display_name = 'My new project' 47 | new_project.description = 'A great new project!' 48 | new_project.primary_language = 'en' 49 | new_project.private = True 50 | new_project.save() 51 | ``` 52 | 53 | See the documentation for [additional 54 | examples](http://panoptes-python-client.readthedocs.io/en/latest/user_guide.html#usage-examples). 55 | 56 | ## Contributing 57 | 58 | We welcome bug reports and code contributions. Please see 59 | [CONTRIBUTING.md](https://github.com/zooniverse/panoptes-python-client/blob/master/CONTRIBUTING.md) 60 | for information about how you can get involved. 61 | 62 | ### Running the Tests 63 | 64 | You can run the tests with Docker. This will run them under Python 3 and Python 65 | 2: 66 | 67 | ``` 68 | docker-compose build tests && docker-compose run tests 69 | docker-compose build tests2 && docker-compose run tests2 70 | ``` 71 | 72 | Or you can run them directly in Python with: 73 | 74 | ```python 75 | python -m unittest discover 76 | ``` 77 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_bearer_expiry.py: -------------------------------------------------------------------------------- 1 | 2 | from panoptes_client.panoptes import Panoptes 3 | 4 | import datetime 5 | import unittest 6 | import sys 7 | 8 | if sys.version_info <= (3, 0): 9 | from mock import patch 10 | else: 11 | from unittest.mock import patch 12 | 13 | 14 | class MockDate(datetime.datetime): 15 | 16 | _fake = None 17 | 18 | @classmethod 19 | def fake(cls, time): 20 | cls._fake = time 21 | 22 | @classmethod 23 | def now(cls, tz=None): 24 | return cls._fake 25 | 26 | 27 | @patch('panoptes_client.panoptes.datetime', MockDate) 28 | class TestBearer(unittest.TestCase): 29 | 30 | def test_early(self): 31 | target = datetime.datetime(2017, 1, 1, 10, 0, 0) 32 | MockDate.fake(target) 33 | 34 | client = Panoptes() 35 | client.bearer_token = True 36 | 37 | client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0) 38 | 39 | assert client.valid_bearer_token() is True 40 | 41 | def test_early_2(self): 42 | target = datetime.datetime(2017, 1, 1, 11, 58, 0) 43 | MockDate.fake(target) 44 | 45 | client = Panoptes() 46 | client.bearer_token = True 47 | 48 | client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0) 49 | 50 | assert client.valid_bearer_token() is True 51 | 52 | def test_late(self): 53 | target = datetime.datetime(2017, 1, 1, 14, 0, 0) 54 | MockDate.fake(target) 55 | 56 | client = Panoptes() 57 | client.bearer_token = True 58 | 59 | client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0) 60 | 61 | assert client.valid_bearer_token() is False 62 | 63 | def test_late_2(self): 64 | target = datetime.datetime(2017, 1, 1, 12, 0, 1) 65 | MockDate.fake(target) 66 | 67 | client = Panoptes() 68 | client.bearer_token = True 69 | 70 | client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0) 71 | 72 | assert client.valid_bearer_token() is False 73 | 74 | def test_in_buffer(self): 75 | target = datetime.datetime(2017, 1, 1, 11, 59, 0) 76 | MockDate.fake(target) 77 | 78 | client = Panoptes() 79 | client.bearer_token = True 80 | 81 | client.bearer_expires = datetime.datetime(2017, 1, 1, 12, 0, 0) 82 | 83 | assert client.valid_bearer_token() is False 84 | 85 | def test_has_token(self): 86 | client = Panoptes() 87 | client.bearer_token = True 88 | 89 | assert client.has_bearer_token() is True 90 | 91 | def test_has_no_token(self): 92 | client = Panoptes() 93 | 94 | assert client.has_bearer_token() is False 95 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_subject.py: -------------------------------------------------------------------------------- 1 | import io 2 | import unittest 3 | from unittest.mock import patch, mock_open 4 | 5 | from panoptes_client.subject import Subject, UnknownMediaException 6 | import mimetypes 7 | 8 | 9 | class TestSubject(unittest.TestCase): 10 | def setUp(self): 11 | self.subject = Subject() 12 | self.subject.locations = [] 13 | self.subject._media_files = [] 14 | self.subject.modified_attributes = set() 15 | 16 | def test_add_location_with_dict(self): 17 | location_dict = {"image/png": "https://example.com/image.png"} 18 | self.subject.add_location(location_dict) 19 | self.assertIn(location_dict, self.subject.locations) 20 | self.assertIn(None, self.subject._media_files) 21 | self.assertIn("locations", self.subject.modified_attributes) 22 | 23 | def test_add_location_manual_mimetype_file_like(self): 24 | data = b"fake image data" 25 | fake_file = io.BytesIO(data) 26 | self.subject.add_location(fake_file, manual_mimetype="image/jpeg") 27 | self.assertEqual(self.subject.locations[-1], "image/jpeg") 28 | self.assertEqual(self.subject._media_files[-1], data) 29 | self.assertIn("locations", self.subject.modified_attributes) 30 | 31 | @patch("panoptes_client.subject.magic") 32 | def test_add_location_magic_detection(self, mock_magic): 33 | mock_magic.from_buffer.return_value = "image/jpeg" 34 | data = b"fake image data" 35 | fake_file = io.BytesIO(data) 36 | self.subject.add_location(fake_file) 37 | self.assertEqual(self.subject.locations[-1], "image/jpeg") 38 | self.assertEqual(self.subject._media_files[-1], data) 39 | self.assertIn("locations", self.subject.modified_attributes) 40 | mock_magic.from_buffer.assert_called_with(data, mime=True) 41 | 42 | @patch.object(mimetypes, 'guess_type', return_value=("image/jpeg", None)) 43 | def test_add_location_mimetypes_detection(self, mock_guess_type): 44 | import panoptes_client.subject as subject_module 45 | subject_module.MEDIA_TYPE_DETECTION = 'mimetypes' 46 | 47 | m = mock_open(read_data=b"fake image data") 48 | with patch("panoptes_client.subject.open", m, create=True): 49 | self.subject.add_location("dummy.jpg") 50 | 51 | self.assertEqual(self.subject.locations[-1], "image/jpeg") 52 | self.assertEqual(self.subject._media_files[-1], b"fake image data") 53 | self.assertIn("locations", self.subject.modified_attributes) 54 | 55 | def test_add_location_invalid_manual_mimetype(self): 56 | data = b"fake data" 57 | fake_file = io.BytesIO(data) 58 | with self.assertRaises(UnknownMediaException): 59 | self.subject.add_location(fake_file, manual_mimetype="application/javascript") -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "Code scanning - action" 13 | 14 | on: 15 | push: 16 | branches: 17 | - master 18 | pull_request: 19 | schedule: 20 | - cron: '0 9 * * 0' 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze 25 | runs-on: ubuntu-latest 26 | permissions: 27 | actions: read 28 | contents: read 29 | security-events: write 30 | 31 | strategy: 32 | fail-fast: false 33 | matrix: 34 | language: [ 'python' ] 35 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 36 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 37 | 38 | steps: 39 | - name: Checkout repository 40 | uses: actions/checkout@v3 41 | 42 | # Initializes the CodeQL tools for scanning. 43 | - name: Initialize CodeQL 44 | uses: github/codeql-action/init@v2 45 | with: 46 | languages: ${{ matrix.language }} 47 | # If you wish to specify custom queries, you can do so here or in a config file. 48 | # By default, queries listed here will override any specified in a config file. 49 | # Prefix the list here with "+" to use these queries and those in the config file. 50 | 51 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 52 | # queries: security-extended,security-and-quality 53 | 54 | 55 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 56 | # If this step fails, then you should remove it and run the build manually (see below) 57 | - name: Autobuild 58 | uses: github/codeql-action/autobuild@v2 59 | 60 | # ℹ️ Command-line programs to run using the OS shell. 61 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 62 | 63 | # If the Autobuild fails above, remove it and uncomment the following three lines. 64 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 65 | 66 | # - run: | 67 | # echo "Run, Build Application using script" 68 | # ./location_of_script_within_repo/buildscript.sh 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v2 72 | with: 73 | category: "/language:${{matrix.language}}" 74 | -------------------------------------------------------------------------------- /panoptes_client/collection.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import str 3 | 4 | from panoptes_client.panoptes import ( 5 | PanoptesAPIException, 6 | PanoptesObject, 7 | ) 8 | from panoptes_client.subject import Subject 9 | from panoptes_client.utils import batchable 10 | 11 | 12 | class Collection(PanoptesObject): 13 | _api_slug = 'collections' 14 | _link_slug = 'collections' 15 | _edit_attributes = ( 16 | 'name', 17 | 'description', 18 | 'display_name', 19 | 'private', 20 | { 21 | 'links': ( 22 | 'project', 23 | ), 24 | }, 25 | ) 26 | 27 | @classmethod 28 | def find(cls, id='', slug=None): 29 | """ 30 | Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug 31 | as well as ID. 32 | 33 | Examples:: 34 | 35 | collection_1234 = Collection.find(1234) 36 | my_collection = Collection.find(slug="example/my-collection") 37 | """ 38 | 39 | if not id and not slug: 40 | return None 41 | try: 42 | return cls.where(id=id, slug=slug).next() 43 | except StopIteration: 44 | raise PanoptesAPIException( 45 | "Could not find collection with slug='{}'".format(slug) 46 | ) 47 | 48 | @property 49 | def subjects(self): 50 | """ 51 | A generator which yields each :py:class:`.Subject` in this collection. 52 | """ 53 | 54 | return self.links.subjects 55 | 56 | def add(self, subjects): 57 | """ 58 | A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to:: 59 | 60 | collection.links.add(subjects) 61 | """ 62 | 63 | return self.links.subjects.add(subjects) 64 | 65 | def remove(self, subjects): 66 | """ 67 | A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to:: 68 | 69 | collection.links.remove(subjects) 70 | """ 71 | 72 | return self.links.subjects.remove(subjects) 73 | 74 | def set_default_subject(self, subject): 75 | """ 76 | Sets the subject's location media URL as a link. 77 | It displays as the default subject on PFE. 78 | 79 | - **subject** can be a single :py:class:`.Subject` instance or a single 80 | subject ID. 81 | 82 | Examples:: 83 | 84 | collection.set_default_subject(1234) 85 | collection.set_default_subject(Subject(1234)) 86 | """ 87 | if not ( 88 | isinstance(subject, Subject) 89 | or isinstance(subject, (int, str,)) 90 | ): 91 | raise TypeError 92 | if isinstance(subject, Subject): 93 | _subject_id = subject.id 94 | else: 95 | _subject_id = str(subject) 96 | 97 | self.http_post( 98 | '{}/links/default_subject'.format(self.id), 99 | json={'default_subject': _subject_id}, 100 | ) 101 | -------------------------------------------------------------------------------- /docs/panoptes_client.rst: -------------------------------------------------------------------------------- 1 | panoptes\_client package 2 | ======================== 3 | 4 | As a convenience, the following classes can be imported directly from the root 5 | of the ``panoptes_client`` package: 6 | 7 | - :py:class:`.Panoptes` 8 | - :py:class:`.Classification` 9 | - :py:class:`.Collection` 10 | - :py:class:`.Project` 11 | - :py:class:`.ProjectPreferences` 12 | - :py:class:`.Subject` 13 | - :py:class:`.SubjectSet` 14 | - :py:class:`.User` 15 | - :py:class:`.Workflow` 16 | - :py:class:`.Caesar` 17 | 18 | For example:: 19 | 20 | from panoptes_client import Panoptes, Project 21 | 22 | Panoptes.connect(username='example', password='example') 23 | 24 | new_project = Project() 25 | new_project.display_name = 'My new project' 26 | new_project.description = 'A great new project!' 27 | new_project.primary_language = 'en' 28 | new_project.private = True 29 | new_project.save() 30 | 31 | panoptes\_client\.panoptes module 32 | --------------------------------- 33 | 34 | .. automodule:: panoptes_client.panoptes 35 | :members: 36 | :show-inheritance: 37 | 38 | panoptes\_client\.classification module 39 | --------------------------------------- 40 | 41 | .. automodule:: panoptes_client.classification 42 | :members: 43 | :undoc-members: 44 | :show-inheritance: 45 | 46 | panoptes\_client\.collection module 47 | ----------------------------------- 48 | 49 | .. automodule:: panoptes_client.collection 50 | :members: 51 | :undoc-members: 52 | :show-inheritance: 53 | 54 | panoptes\_client\.exportable module 55 | ----------------------------------- 56 | 57 | .. automodule:: panoptes_client.exportable 58 | :members: 59 | :undoc-members: 60 | :show-inheritance: 61 | 62 | panoptes\_client\.project module 63 | -------------------------------- 64 | 65 | .. automodule:: panoptes_client.project 66 | :members: 67 | :undoc-members: 68 | :show-inheritance: 69 | 70 | panoptes\_client\.project\_preferences module 71 | --------------------------------------------- 72 | 73 | .. automodule:: panoptes_client.project_preferences 74 | :members: 75 | :undoc-members: 76 | :show-inheritance: 77 | 78 | panoptes\_client\.subject module 79 | -------------------------------- 80 | 81 | .. automodule:: panoptes_client.subject 82 | :members: 83 | :undoc-members: 84 | :show-inheritance: 85 | 86 | panoptes\_client\.subject\_set module 87 | ------------------------------------- 88 | 89 | .. automodule:: panoptes_client.subject_set 90 | :members: 91 | :undoc-members: 92 | :show-inheritance: 93 | 94 | panoptes\_client\.user module 95 | ----------------------------- 96 | 97 | .. automodule:: panoptes_client.user 98 | :members: 99 | :undoc-members: 100 | :show-inheritance: 101 | 102 | panoptes\_client\.workflow module 103 | --------------------------------- 104 | 105 | .. automodule:: panoptes_client.workflow 106 | :members: 107 | :undoc-members: 108 | :show-inheritance: 109 | 110 | panoptes\_client\.workflow\_version module 111 | ------------------------------------------ 112 | 113 | .. automodule:: panoptes_client.workflow_version 114 | :members: 115 | :undoc-members: 116 | :show-inheritance: 117 | :exclude-members: http_get 118 | 119 | panoptes\_client\.caesar module 120 | ------------------------------- 121 | 122 | .. automodule:: panoptes_client.caesar 123 | :members: 124 | :show-inheritance: 125 | -------------------------------------------------------------------------------- /panoptes_client/project_preferences.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import str 3 | 4 | from panoptes_client.panoptes import PanoptesObject, LinkResolver 5 | from panoptes_client.project import Project 6 | from panoptes_client.user import User 7 | 8 | class ProjectPreferences(PanoptesObject): 9 | """ 10 | Contains the settings for a :py:class:`.User` on a :py:class:`.Project`. 11 | """ 12 | 13 | _api_slug = 'project_preferences' 14 | _link_slug = 'project_preferences' 15 | _edit_attributes = ( 16 | 'preferences', 17 | 'settings', 18 | ) 19 | 20 | @classmethod 21 | def find(cls, id='', user=None, project=None): 22 | """ 23 | Like :py:meth:`.PanoptesObject.find` but can also query by user and 24 | project. 25 | 26 | - **user** and **project** can be either a :py:class:`.User` and 27 | :py:class:`.Project` instance respectively, or they can be given as 28 | IDs. If either argument is given, the other is also required. 29 | """ 30 | 31 | if not id: 32 | if not (user and project): 33 | raise ValueError('Both user and project required') 34 | if ( 35 | isinstance(user, User) 36 | and isinstance(project, Project) 37 | ): 38 | _user_id = user.id 39 | _project_id = project.id 40 | elif ( 41 | isinstance(user, (int, str,)) 42 | and isinstance(project, (int, str,)) 43 | ): 44 | _user_id = user 45 | _project_id = project 46 | else: 47 | raise TypeError 48 | id = cls.where(user_id=_user_id, project_id=_project_id).next().id 49 | return super(ProjectPreferences, cls).find(id) 50 | 51 | @classmethod 52 | def save_settings(cls, project=None, user=None, settings=None): 53 | """ 54 | Save settings for a user without first fetching their preferences. 55 | 56 | - **user** and **project** can be either a :py:class:`.User` and 57 | :py:class:`.Project` instance respectively, or they can be given as 58 | IDs. If either argument is given, the other is also required. 59 | - **settings** is a :py:class:`dict` containing the settings to be 60 | saved. 61 | """ 62 | 63 | if (isinstance(settings, dict)): 64 | _to_update = settings 65 | if ( 66 | isinstance(user, User) 67 | and isinstance(project, Project) 68 | ): 69 | _user_id = user.id 70 | _project_id = project.id 71 | elif ( 72 | isinstance(user, (int, str,)) 73 | and isinstance(project, (int, str,)) 74 | ): 75 | _user_id = user 76 | _project_id = project 77 | else: 78 | raise TypeError 79 | cls.http_post( 80 | 'update_settings', 81 | json={ 82 | 'project_preferences': { 83 | 'user_id': _user_id, 84 | 'project_id': _project_id, 85 | 'settings': _to_update, 86 | } 87 | } 88 | ) 89 | else: 90 | raise TypeError 91 | 92 | @classmethod 93 | def fetch_settings(cls, project=None, user=None): 94 | """ 95 | Fetch project preference settings for a particular project and user(optional). 96 | 97 | - **user** and **project** can be either a :py:class:`.User` and 98 | :py:class:`.Project` instance respectively, or they can be given as 99 | IDs. 100 | - **user** parameter is optional and only **project** is required 101 | 102 | Examples:: 103 | 104 | ProjectPreferences.fetch_settings(Project(1234), User(1234)) 105 | ProjectPreferences.fetch_settings(1234, 1234) 106 | ProjectPreferences.fetch_settings(Project(1234)) 107 | ProjectPreferences.fetch_settings(1234) 108 | """ 109 | 110 | _user_id = None 111 | _project_id = None 112 | 113 | if isinstance(project, Project): 114 | _project_id = project.id 115 | elif isinstance(project, (int, str,)): 116 | _project_id = project 117 | else: 118 | raise TypeError 119 | 120 | if isinstance(user, User): 121 | _user_id = user.id 122 | elif isinstance(user, (int, str,)): 123 | _user_id = user 124 | 125 | params = {'project_id': _project_id} 126 | 127 | if _user_id is not None: 128 | params['user_id'] = _user_id 129 | 130 | return cls.paginated_results(*cls.http_get( 131 | 'read_settings', 132 | params=params 133 | )) 134 | 135 | 136 | LinkResolver.register(ProjectPreferences) 137 | -------------------------------------------------------------------------------- /panoptes_client/subject_set.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import str 3 | from copy import deepcopy 4 | from panoptes_client.subject_workflow_status import SubjectWorkflowStatus 5 | 6 | from panoptes_client.panoptes import ( 7 | LinkCollection, 8 | LinkResolver, 9 | PanoptesAPIException, 10 | PanoptesObject, 11 | ) 12 | from panoptes_client.set_member_subject import SetMemberSubject 13 | from panoptes_client.subject import Subject 14 | from panoptes_client.exportable import Exportable 15 | from panoptes_client.utils import batchable 16 | 17 | from redo import retry 18 | 19 | 20 | class SubjectSetLinkCollection(LinkCollection): 21 | def __contains__(self, obj): 22 | if self._cls == Subject: 23 | if isinstance(obj, Subject): 24 | _subject_id = str(obj.id) 25 | else: 26 | _subject_id = str(obj) 27 | 28 | linked_subject_count = SetMemberSubject.where( 29 | subject_set_id=self._parent.id, 30 | subject_id=_subject_id 31 | ).object_count 32 | 33 | return linked_subject_count == 1 34 | return super(SubjectSetLinkCollection, self).__contains__(obj) 35 | 36 | def add(self, objs): 37 | from panoptes_client.workflow import Workflow 38 | if self._cls == Workflow: 39 | raise NotImplementedError( 40 | 'Workflows and SubjectSets can only be linked via ' 41 | 'Workflow.links' 42 | ) 43 | return super(SubjectSetLinkCollection, self).add(objs) 44 | 45 | def remove(self, objs): 46 | from panoptes_client.workflow import Workflow 47 | if self._cls == Workflow: 48 | raise NotImplementedError( 49 | 'Workflows and SubjectSets can only be unlinked via ' 50 | 'Workflow.links' 51 | ) 52 | return super(SubjectSetLinkCollection, self).remove(objs) 53 | 54 | 55 | class SubjectSet(PanoptesObject, Exportable): 56 | _api_slug = 'subject_sets' 57 | _link_slug = 'subject_sets' 58 | _edit_attributes = ( 59 | 'display_name', 60 | 'metadata', 61 | { 62 | 'links': ( 63 | 'project', 64 | ) 65 | }, 66 | ) 67 | _link_collection = SubjectSetLinkCollection 68 | 69 | def __init__(self, raw={}, etag=None): 70 | super(SubjectSet, self).__init__(raw, etag) 71 | if not self.metadata: 72 | self.metadata = {} 73 | self._original_metadata = {} 74 | 75 | def set_raw(self, raw, etag=None, loaded=True): 76 | super(SubjectSet, self).set_raw(raw, etag, loaded) 77 | if loaded and self.metadata: 78 | self._original_metadata = deepcopy(self.metadata) 79 | 80 | def save(self): 81 | """ 82 | Adds subject set metadata dict to the list of 83 | savable attributes if it has changed. 84 | """ 85 | if not self.metadata == self._original_metadata: 86 | self.modified_attributes.add('metadata') 87 | 88 | super(SubjectSet, self).save() 89 | 90 | @property 91 | def subjects(self): 92 | """ 93 | A generator which yields :py:class:`.Subject` objects which are in this 94 | subject set. 95 | 96 | Examples:: 97 | 98 | for subject in subject_set.subjects: 99 | print(subject.id) 100 | 101 | """ 102 | 103 | for sms in SetMemberSubject.where(subject_set_id=self.id): 104 | yield sms.links.subject 105 | 106 | def set_raw(self, raw, etag=None, loaded=True): 107 | raw.setdefault('links', {}).setdefault('subjects', []) 108 | return super(SubjectSet, self).set_raw(raw, etag, loaded) 109 | 110 | def add(self, subjects): 111 | """ 112 | A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to:: 113 | 114 | subject_set.links.add(subjects) 115 | """ 116 | 117 | # reload the subject set to make sure the online version not stale 118 | self.reload() 119 | 120 | return self.links.subjects.add(subjects) 121 | 122 | def remove(self, subjects): 123 | """ 124 | A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to:: 125 | 126 | subject_set.links.remove(subjects) 127 | """ 128 | 129 | return self.links.subjects.remove(subjects) 130 | 131 | def subject_workflow_statuses(self, workflow_id): 132 | """ 133 | A generator which yields :py:class:`.SubjectWorkflowStatus` objects for subjects in this 134 | subject set and for the supplied workflow id. 135 | 136 | Examples:: 137 | 138 | for status in subject_set.subject_workflow_statuses(1234): 139 | print(status.retirement_reason) 140 | """ 141 | 142 | subject_ids = ', '.join((subject.id for subject in self.subjects)) 143 | for status in SubjectWorkflowStatus.where(subject_ids=subject_ids, workflow_id=workflow_id): 144 | yield status 145 | 146 | def __contains__(self, subject): 147 | """ 148 | A wrapper around :py:meth:`.LinkCollection.__contains__`. Equivalent 149 | to:: 150 | 151 | subject in subject_set.links.subjects 152 | """ 153 | return subject in self.links.subjects 154 | 155 | 156 | LinkResolver.register(SubjectSet) 157 | LinkResolver.register(SubjectSet, 'subject_set') 158 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_http_retries.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | 4 | if sys.version_info <= (3, 0): 5 | from mock import patch, Mock 6 | else: 7 | from unittest.mock import patch, Mock 8 | 9 | from panoptes_client.panoptes import ( 10 | HTTP_RETRY_LIMIT, 11 | Panoptes, 12 | PanoptesAPIException, 13 | ) 14 | 15 | 16 | class TestRetries(unittest.TestCase): 17 | def setUp(self): 18 | self.http_result = Mock() 19 | self.client = Panoptes() 20 | self.client.valid_bearer_token = Mock() 21 | self.client.valid_bearer_token.return_value = True 22 | self.client.bearer_token = '1234' 23 | self.client.session = Mock() 24 | self.client.session.request = Mock() 25 | self.client.session.request.return_value = self.http_result 26 | 27 | def assert_retry(self, *args, **kwargs): 28 | self.assertTrue(kwargs.get('retry', False)) 29 | result = Mock() 30 | result.status_code = 204 31 | return result 32 | 33 | def assert_no_retry(self, *args, **kwargs): 34 | self.assertFalse(kwargs.get('retry', True)) 35 | result = Mock() 36 | result.status_code = 204 37 | return result 38 | 39 | @patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1) 40 | def test_request_retry_success(self): 41 | self.http_result.status_code = 200 42 | 43 | self.assertEqual( 44 | self.client.http_request('GET', '', retry=True), 45 | self.http_result, 46 | ) 47 | self.assertEqual( 48 | self.client.session.request.call_count, 49 | 1, 50 | ) 51 | 52 | @patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1) 53 | def test_request_retry_no_success(self): 54 | self.http_result.status_code = 500 55 | 56 | with self.assertRaises(PanoptesAPIException): 57 | self.assertEqual( 58 | self.client.http_request('GET', '', retry=True), 59 | self.http_result, 60 | ) 61 | self.assertEqual( 62 | self.client.session.request.call_count, 63 | HTTP_RETRY_LIMIT, 64 | ) 65 | 66 | @patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1) 67 | def test_request_no_retry_success(self): 68 | self.http_result.status_code = 200 69 | 70 | self.assertEqual( 71 | self.client.http_request('GET', '', retry=False), 72 | self.http_result, 73 | ) 74 | self.assertEqual( 75 | self.client.session.request.call_count, 76 | 1, 77 | ) 78 | 79 | @patch('panoptes_client.panoptes.RETRY_BACKOFF_INTERVAL', 1) 80 | def test_request_no_retry_no_success(self): 81 | self.http_result.status_code = 500 82 | 83 | with self.assertRaises(PanoptesAPIException): 84 | self.assertEqual( 85 | self.client.http_request('GET', '', retry=False), 86 | self.http_result, 87 | ) 88 | self.assertEqual( 89 | self.client.session.request.call_count, 90 | 1, 91 | ) 92 | 93 | def test_json_retry(self): 94 | self.client.http_request = self.assert_retry 95 | self.client.json_request('', '', retry=True) 96 | 97 | def test_json_no_retry(self): 98 | self.client.http_request = self.assert_no_retry 99 | self.client.json_request('', '', retry=False) 100 | 101 | def test_get_retry(self): 102 | self.client.json_request = self.assert_retry 103 | self.client.get('', retry=True) 104 | 105 | def test_get_no_retry(self): 106 | self.client.json_request = self.assert_no_retry 107 | self.client.get('', retry=False) 108 | 109 | def test_get_request_retry(self): 110 | self.client.http_request = self.assert_retry 111 | self.client.get_request('', retry=True) 112 | 113 | def test_get_request_no_retry(self): 114 | self.client.http_request = self.assert_no_retry 115 | self.client.get_request('', retry=False) 116 | 117 | def test_put_retry(self): 118 | self.client.json_request = self.assert_retry 119 | self.client.put('', retry=True) 120 | 121 | def test_put_no_retry(self): 122 | self.client.json_request = self.assert_no_retry 123 | self.client.put('', retry=False) 124 | 125 | def test_put_request_retry(self): 126 | self.client.http_request = self.assert_retry 127 | self.client.put_request('', retry=True) 128 | 129 | def test_put_request_no_retry(self): 130 | self.client.http_request = self.assert_no_retry 131 | self.client.put_request('', retry=False) 132 | 133 | def test_post_retry(self): 134 | self.client.json_request = self.assert_retry 135 | self.client.post('', retry=True) 136 | 137 | def test_post_no_retry(self): 138 | self.client.json_request = self.assert_no_retry 139 | self.client.post('', retry=False) 140 | 141 | def test_post_request_retry(self): 142 | self.client.http_request = self.assert_retry 143 | self.client.post_request('', retry=True) 144 | 145 | def test_post_request_no_retry(self): 146 | self.client.http_request = self.assert_no_retry 147 | self.client.post_request('', retry=False) 148 | 149 | def test_delete_retry(self): 150 | self.client.json_request = self.assert_retry 151 | self.client.delete('', retry=True) 152 | 153 | def test_delete_no_retry(self): 154 | self.client.json_request = self.assert_no_retry 155 | self.client.delete('', retry=False) 156 | 157 | def test_delete_request_retry(self): 158 | self.client.http_request = self.assert_retry 159 | self.client.delete_request('', retry=True) 160 | 161 | def test_delete_request_no_retry(self): 162 | self.client.http_request = self.assert_no_retry 163 | self.client.delete_request('', retry=False) 164 | -------------------------------------------------------------------------------- /panoptes_client/exportable.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import csv 4 | import datetime 5 | import functools 6 | import time 7 | 8 | import requests 9 | 10 | from panoptes_client.panoptes import ( 11 | PanoptesAPIException, 12 | Talk, 13 | ) 14 | 15 | 16 | TALK_EXPORT_TYPES = ( 17 | 'talk_comments', 18 | 'talk_tags', 19 | ) 20 | 21 | talk = Talk() 22 | 23 | 24 | class Exportable(object): 25 | """ 26 | Abstract class containing methods for generating and downloading data 27 | exports. 28 | """ 29 | 30 | def get_export( 31 | self, 32 | export_type, 33 | generate=False, 34 | wait=False, 35 | wait_timeout=None, 36 | ): 37 | """ 38 | Downloads a data export over HTTP. Returns a `Requests Response 39 | `_ 40 | object containing the content of the export. 41 | 42 | - **export_type** is a string specifying which type of export should be 43 | downloaded. 44 | - **generate** is a boolean specifying whether to generate a new export 45 | and wait for it to be ready, or to just download the latest export. 46 | - **wait** is a boolean specifying whether to wait for an in-progress 47 | export to finish, if there is one. Has no effect if ``generate`` is 48 | ``True``. 49 | - **wait_timeout** is the number of seconds to wait if ``wait`` is 50 | ``True``. Has no effect if ``wait`` is ``False`` or if ``generate`` 51 | is ``True``. 52 | 53 | The returned :py:class:`.Response` object has two additional attributes 54 | as a convenience for working with the CSV content; **csv_reader** and 55 | **csv_dictreader**, which are wrappers for :py:meth:`.csv.reader` 56 | and :py:class:`csv.DictReader` respectively. These wrappers take care 57 | of correctly decoding the export content for the CSV parser. 58 | 59 | Example:: 60 | 61 | classification_export = Project(1234).get_export('classifications') 62 | for row in classification_export.csv_reader(): 63 | print(row) 64 | 65 | classification_export = Project(1234).get_export('classifications') 66 | for row in classification_export.csv_dictreader(): 67 | print(row) 68 | """ 69 | 70 | if generate: 71 | self.generate_export(export_type) 72 | 73 | if generate or wait: 74 | export = self.wait_export(export_type, wait_timeout) 75 | else: 76 | export = self.describe_export(export_type) 77 | 78 | if export_type in TALK_EXPORT_TYPES: 79 | media_url = export['data_requests'][0]['url'] 80 | else: 81 | media_url = export['media'][0]['src'] 82 | 83 | response = requests.get(media_url, stream=True) 84 | response.csv_reader = functools.partial( 85 | csv.reader, 86 | response.iter_lines(decode_unicode=True), 87 | ) 88 | response.csv_dictreader = functools.partial( 89 | csv.DictReader, 90 | response.iter_lines(decode_unicode=True), 91 | ) 92 | return response 93 | 94 | def wait_export( 95 | self, 96 | export_type, 97 | timeout=None, 98 | ): 99 | """ 100 | Blocks until an in-progress export is ready. 101 | 102 | - **export_type** is a string specifying which type of export to wait 103 | for. 104 | - **timeout** is the maximum number of seconds to wait. 105 | 106 | If ``timeout`` is given and the export is not ready by the time limit, 107 | :py:class:`.PanoptesAPIException` is raised. 108 | """ 109 | 110 | success = False 111 | if timeout: 112 | end_time = datetime.datetime.now() + datetime.timedelta( 113 | seconds=timeout 114 | ) 115 | 116 | while (not timeout) or (datetime.datetime.now() < end_time): 117 | export_description = self.describe_export( 118 | export_type, 119 | ) 120 | 121 | if export_type in TALK_EXPORT_TYPES: 122 | export_metadata = export_description['data_requests'][0] 123 | else: 124 | export_metadata = export_description['media'][0]['metadata'] 125 | 126 | if export_metadata.get('state', '') in ('ready', 'finished'): 127 | success = True 128 | break 129 | 130 | time.sleep(2) 131 | 132 | if not success: 133 | raise PanoptesAPIException( 134 | '{}_export not ready within {} seconds'.format( 135 | export_type, 136 | timeout 137 | ) 138 | ) 139 | 140 | return export_description 141 | 142 | def generate_export(self, export_type): 143 | """ 144 | Start a new export. 145 | 146 | - **export_type** is a string specifying which type of export to start. 147 | 148 | Returns a :py:class:`dict` containing metadata for the new export. 149 | """ 150 | 151 | if export_type in TALK_EXPORT_TYPES: 152 | return talk.post_data_request( 153 | 'project-{}'.format(self.id), 154 | export_type.replace('talk_', '') 155 | ) 156 | 157 | return self.http_post( 158 | self._export_path(export_type), 159 | json={"media": {"content_type": "text/csv"}}, 160 | )[0] 161 | 162 | def describe_export(self, export_type): 163 | """ 164 | Fetch metadata for an export. 165 | 166 | - **export_type** is a string specifying which type of export to look 167 | up. 168 | 169 | Returns a :py:class:`dict` containing metadata for the export. 170 | """ 171 | if export_type in TALK_EXPORT_TYPES: 172 | return talk.get_data_request( 173 | 'project-{}'.format(self.id), 174 | export_type.replace('talk_', '') 175 | )[0] 176 | 177 | return self.http_get( 178 | self._export_path(export_type), 179 | )[0] 180 | 181 | def _export_path(self, export_type): 182 | return '{}/{}_export'.format(self.id, export_type) 183 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 1.7.1 (2025-06-24) 2 | - New: Track logged in user 3 | - Fix: Batch aggregation links updated 4 | - Change: Set user to User.me() by default for new batch aggregation requests 5 | 6 | ## 1.7.0 (2024-03-07) 7 | - Fix: Improve MIME typing on auto detection for subject locations 8 | - New: Add MIME type assignment to subject locations 9 | - New: Add batch Aggregation 10 | - Fix: Paginate `fetch_settings` response on project preferences class 11 | - Update: Replace pkg_resources with importlib.metadata 12 | 13 | ## 1.6.2 (2024-07-11) 14 | - Fix: Documentation fixes 15 | - Fix: Caesar connect respect connection env 16 | - New: Add fetch_settings to project_preferences class 17 | - New: Add support for SubjectSet metadata as editable attribute 18 | - Fix: Remove max limit on dependency versions 19 | 20 | ## 1.6.1 (2023-07-11) 21 | Change: Manifest file now ensures the license is included on PyPI. 22 | Change: When linking subjects to a set, the set is reloaded to avoid stale object errors. 23 | 24 | ## 1.6.0 (2022-12-01) 25 | New: Import iNaturalist observations as subjects 26 | New: Import ML data extracts from Caesar 27 | 28 | ## 1.5.0 (2022-06-16) 29 | - New: Caesar connection/configuration 30 | - New: Enable SubjectSet-specific classification exports 31 | - Fix: Broken installs due to strict version constraints with requests and idna in python 3 32 | - Fix: Fix media uploads for existing subjects 33 | 34 | ## 1.4.0 (2021-06-01) 35 | - New: Subject unretirement in Workflow 36 | - New: Get SubjectWorkflowStatuses via Workflow, Subject, or SubjectSet 37 | 38 | ## 1.3.0 (2020-09-27) 39 | - Change: Allow uploads to Azure cloud, add header to cloud upload PUT requests 40 | 41 | ## 1.2 (2020-06-13) 42 | 43 | - New: Add `SubjectWorkflowStatus` class 44 | - New: Add attached image methods to `Project` 45 | - Fix: Save workflow configuration if changed 46 | - Change: Update requests requirement >=2.4.2,<2.25 47 | - Change: Update mock requirement to >=2.0,<4.1 48 | - Change: Update future requirement from to >=0.16,<0.19 49 | 50 | ## 1.1.1 (2019-02-25) 51 | 52 | - Fix: Can't save new objects with individual links 53 | - Fix: Missing 'six' dependency 54 | 55 | ## 1.1 (2019-02-08) 56 | 57 | - New: Add asynchronous multi-threaded subject creation 58 | - New: Add `LinkCollection` for managing links to multiple objects 59 | - New: Allow `Panoptes` class to act as a context manager 60 | - New: Add `Panoptes.interactive_login` method 61 | - New: Add authentication method selection (via `Panoptes.connect(login=...)`) 62 | - New: Add `CollectionRole` class 63 | - New: Add `Collection.set_default_subject()` 64 | - New: Add `SubjectSet.__contains__()` 65 | - New: Add `PanoptesObject.delete()` method 66 | - New: Add `Organization` class 67 | - New: Add `Project.avatar` 68 | - New: Allow finding `Collection`s by slug 69 | - New: Allow finding `User`s from a list of email addresses 70 | - New: Allow batched `User` lookups by login name 71 | - New: Allow editing `Collection` project links and descriptions 72 | - New: Allow editing `Workflow` `tasks`, `primary_language`, and `mobile_friendly` 73 | - New: Allow editing `User.valid_user` 74 | 75 | - Fix: Fix reloading for `User` class 76 | - Fix: Passing `set`s to batchable methods 77 | 78 | - Change: Use multiple threads for media uploads 79 | - Change: Make global client thread safe 80 | - Change: Retry all `GET` requests on server failures 81 | - Change: Log in immediately rather than waiting for first request 82 | - Change: Raise an exception if media mime type can't be determined 83 | - Change: Log a warning if libmagic is broken 84 | - Change: Use six for string type checking 85 | - Change: Raise exception when linking unsaved objects 86 | - Change: Update requests requirement to >=2.4.2,<2.22 87 | - Change: Update future requirement to >=0.16,<0.18 88 | 89 | ## 1.0.3 (2018-07-30) 90 | 91 | - Fix: TypeError when creating subjects 92 | - Update default client IDs 93 | 94 | ## 1.0.2 (2018-07-25) 95 | 96 | - Fix: Fix saving subjects with updated metadata 97 | - Fix: Fix calling `Subject.save()` when nothing has changed 98 | 99 | ## 1.0.1 (2018-06-14) 100 | 101 | - Fix: Exports are not automatically decompressed on download 102 | - Fix: Unable to `save` a Workflow 103 | - Fix: Fix typo in documentation for Classification 104 | - Fix: Fix saving objects initialised from object links 105 | 106 | ## 1.0 (2017-11-03) 107 | 108 | - New: Add methods for adding Project links 109 | - New: Enable debugging if PANOPTES_DEBUG is set in env 110 | - Fix: Fix accessing list of linked projects 111 | - Move testing dependencies to extras 112 | - Change User.avatar to be a property 113 | - Specify dependency versions 114 | 115 | ## 0.10 (2017-08-04) 116 | 117 | - Fix: Avoid reloading resource after create actions 118 | - Fix: Add buffer to bearer token expiration check 119 | - Remove default export timeout 120 | 121 | ## 0.9 (2017-06-20) 122 | 123 | - New: Add support for non-image media types (requires libmagic) 124 | - New: Allow lazy loading of objects 125 | - New: Add `WorkflowVersion` class and `Workflow.versions` property 126 | - Fix: Don't submit empty JSON by default for GET requests 127 | - Fix: Adding location paths in Python 2 128 | - Fix: Return a list of linked objects instead of a map in Python 3 129 | - Use `SetMemberSubject` for `SubjectSet.subjects` lookup to improve speed 130 | - Set default endpoint to www.zooniverse.org 131 | - Raise TypeError if positional batchable argument is missing 132 | - Convert `Collection.subjects` and `SubjectSet.subjects` to properties 133 | 134 | ## 0.8 (2017-05-11) 135 | 136 | - New: Python 3 compatibility 137 | - Fix: Fix passing sets to batchable methods 138 | - Fix: `AttributeError` in `Workflow.add_subject_sets()` 139 | 140 | ## 0.7 (2017-03-22) 141 | 142 | - New: Add Collection 143 | - New: Allow editing of workflows 144 | - New: Add method to get User's avatar 145 | - New: Add support for iterating over numpy arrays 146 | - New: Add per-Workflow exports 147 | - Fix: setting endpoint in environment variable 148 | - Fix: Stop iterating if there are no objects in the current page 149 | 150 | ## 0.6 (2017-01-11) 151 | 152 | - New: Add Project.collaborators() and ProjectRole 153 | - New: Add admin option 154 | - Fix: Raise PanoptesAPIException instead of StopIteration 155 | - Fix: Make ResultPaginator handle None responses 156 | - Fix: Raise PanoptesAPIException instead of StopIteration in PanoptesObject.where() 157 | 158 | ## 0.5 (2016-11-21) 159 | 160 | - New: Send SubjectSet.remove() requests in batches 161 | - Fix: Raise PanoptesAPIException instead of StopIteration in Project.find() 162 | - Fix: Don't read the image file on every upload attempt 163 | 164 | ## 0.4.1 (2016-09-21) 165 | 166 | - Fix: Bearer token checking only occurs when necessary 167 | 168 | ## 0.4 (2016-09-02) 169 | 170 | - New: Support for all data exports 171 | - New: Project owners can update `ProjectPreference` settings 172 | - New: Removed `subject_sets` method and `SetMemberSubject` (now in links) 173 | - New: Add set to iterable types 174 | - Fix: Only save links if it's been modified 175 | - Fix: Specify minimum requests version 176 | 177 | ## 0.3 (2016-08-04) 178 | 179 | - New: Add User model 180 | - New: Add option for env vars for auth 181 | - New: Add scope kwarg to Classification.where() 182 | - New: Add SetMemberSubject class 183 | - New: Submit subject links in batches in SubjectSet.add() 184 | - New: oauth for client apps 185 | - Fix: Skip trying to read export state if description was empty 186 | - Fix: Don't rely on the response having a Content-Length header 187 | 188 | ## 0.2 (2016-07-21) 189 | 190 | - New: Automatically retry failed image uploads 191 | - New: Project classifications export 192 | - New: Subject retirement in Workflow 193 | - New: Add client ID for panoptes-staging.zooniverse.org 194 | - New: Add ProjectPreferences 195 | - New: Add Classification 196 | - New: Removing subject set links 197 | - Fix: IOError: Too many open files (in subject.py, line 64) #6 198 | 199 | ## 0.1 (2016-06-16) 200 | 201 | - Initial release! 202 | - Allows creating and modifying projects, subjects, subject sets 203 | -------------------------------------------------------------------------------- /panoptes_client/project.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from copy import deepcopy 3 | 4 | from panoptes_client.panoptes import ( 5 | LinkCollection, 6 | LinkResolver, 7 | PanoptesAPIException, 8 | PanoptesObject, 9 | ) 10 | from panoptes_client.project_role import ProjectRole 11 | from panoptes_client.exportable import Exportable 12 | from panoptes_client.utils import batchable 13 | 14 | class ProjectLinkCollection(LinkCollection): 15 | def add(self, objs): 16 | from panoptes_client.workflow import Workflow 17 | from panoptes_client.subject_set import SubjectSet 18 | 19 | result = super(ProjectLinkCollection, self).add(objs) 20 | 21 | # Some classes are copied into the project as new objects 22 | # So we reload to pick those up. 23 | if self._cls in (SubjectSet, Workflow): 24 | self._parent.reload() 25 | 26 | return result 27 | 28 | 29 | class Project(PanoptesObject, Exportable): 30 | _api_slug = 'projects' 31 | _link_slug = 'project' 32 | _edit_attributes = ( 33 | 'display_name', 34 | 'description', 35 | 'tags', 36 | 'introduction', 37 | 'private', 38 | 'primary_language', 39 | 'configuration', 40 | ) 41 | _link_collection = ProjectLinkCollection 42 | 43 | def __init__(self, raw={}, etag=None): 44 | super(Project, self).__init__(raw, etag) 45 | if not self.configuration: 46 | self.configuration = {} 47 | self._original_configuration = {} 48 | 49 | def set_raw(self, raw, etag=None, loaded=True): 50 | super(Project, self).set_raw(raw, etag, loaded) 51 | if loaded and self.configuration: 52 | self._original_configuration = deepcopy(self.configuration) 53 | elif loaded: 54 | self._original_configuration = None 55 | 56 | def save(self): 57 | """ 58 | Adds project configuration to the list of savable attributes 59 | if it has changed. 60 | """ 61 | if not self.configuration == self._original_configuration: 62 | self.modified_attributes.add('configuration') 63 | 64 | super(Project, self).save() 65 | 66 | @classmethod 67 | def find(cls, id='', slug=None): 68 | """ 69 | Similar to :py:meth:`.PanoptesObject.find`, but allows lookup by slug 70 | as well as ID. 71 | 72 | Examples:: 73 | 74 | project_1234 = Project.find(1234) 75 | galaxy_zoo = Project.find(slug="zooniverse/galaxy-zoo") 76 | """ 77 | 78 | if not id and not slug: 79 | return None 80 | try: 81 | return cls.where(id=id, slug=slug).next() 82 | except StopIteration: 83 | raise PanoptesAPIException( 84 | "Could not find project with slug='{}'".format(slug) 85 | ) 86 | 87 | def collaborators(self, *roles): 88 | """ 89 | Returns a list of :py:class:`.User` who are collaborators on this 90 | project. 91 | 92 | Zero or more role arguments can be passed as strings to narrow down the 93 | results. If any roles are given, users who possess at least one of the 94 | given roles are returned. 95 | 96 | Examples:: 97 | 98 | all_collabs = project.collaborators() 99 | moderators = project.collaborators("moderators") 100 | moderators_and_translators = project.collaborators( 101 | "moderators", 102 | "translators", 103 | ) 104 | """ 105 | 106 | return [ 107 | r.links.owner for r in ProjectRole.where(project_id=self.id) 108 | if len(roles) == 0 or len(set(roles) & set(r.roles)) > 0 109 | ] 110 | 111 | @batchable 112 | def _add_links(self, linked_objects, link_type): 113 | object_ids = [] 114 | 115 | for linked_object in linked_objects: 116 | if hasattr(linked_object, 'id'): 117 | object_ids.append(linked_object.id) 118 | else: 119 | object_ids.append(str(linked_object)) 120 | 121 | self.http_post( 122 | '{}/links/{}'.format(self.id, link_type), 123 | json={ 124 | link_type: object_ids 125 | } 126 | ) 127 | 128 | def add_subject_sets(self, subject_sets): 129 | """ 130 | Links the given subject sets to this project. New subject sets are 131 | created as copies of the given sets. 132 | 133 | - **subject_sets** can be a list of :py:class:`.SubjectSet` 134 | instances, a list of subject set IDs, a single 135 | :py:class:`.SubjectSet` instance, or a single subject set ID. 136 | 137 | Examples:: 138 | 139 | project.add_subject_sets(1234) 140 | project.add_subject_sets([1,2,3,4]) 141 | project.add_subject_sets(SubjectSet(1234)) 142 | project.add_subject_sets([SubjectSet(12), SubjectSet(34)]) 143 | """ 144 | 145 | return self._add_links( 146 | subject_sets, 147 | 'subject_sets', 148 | ) 149 | 150 | def add_workflows(self, workflows): 151 | """ 152 | Links the given workflows to this project. New workflows are 153 | created as copies of the given workflows. 154 | 155 | - **workflows** can be a list of :py:class:`.Workflow` instances, 156 | a list of workflow IDs, a single :py:class:`.Workflow` 157 | instance, or a single workflow ID. 158 | 159 | Examples:: 160 | 161 | project.add_workflows(1234) 162 | project.add_workflows([1,2,3,4]) 163 | project.add_workflows(Workflow(1234)) 164 | project.add_workflows([Workflow(12), Workflow(34)]) 165 | """ 166 | return self._add_links( 167 | workflows, 168 | 'workflows', 169 | ) 170 | 171 | @property 172 | def avatar(self): 173 | """ 174 | A dict containing metadata about the project's avatar. 175 | """ 176 | return self.http_get('{}/avatar'.format(self.id))[0] 177 | 178 | @property 179 | def attached_images(self): 180 | return self.http_get('{}/attached_images'.format(self.id))[0] 181 | 182 | def add_attached_image( 183 | self, 184 | src, 185 | content_type='image/png', 186 | external_link=True, 187 | metadata={}, 188 | ): 189 | return self.http_post( 190 | '{}/attached_images'.format(self.id), 191 | json={'media': { 192 | 'src': src, 193 | 'content_type': content_type, 194 | 'external_link': external_link, 195 | 'metadata': metadata, 196 | }}, 197 | ) 198 | 199 | def copy(self, new_subject_set_name=None): 200 | """ 201 | Copy this project to a new project that will be owned by the 202 | currently authenticated user. 203 | 204 | A new_subject_set_name string argument can be passed which will be 205 | used to name a new SubjectSet for the copied project. 206 | This is useful for having an upload target straight after cloning. 207 | 208 | Requirements: the source project must not be live (in development mode) and 209 | project.configuration must have a `template` key set to "true". 210 | 211 | Examples:: 212 | 213 | project.copy() 214 | project.copy("My new subject set for uploading") 215 | """ 216 | payload = {} 217 | if new_subject_set_name: 218 | payload['create_subject_set'] = new_subject_set_name 219 | 220 | response = self.http_post( 221 | '{}/copy'.format(self.id), 222 | json=payload, 223 | ) 224 | 225 | # find the API resource response in the response tuple 226 | resource_response = response[0] 227 | # save the etag from the copied project response 228 | etag = response[1] 229 | # extract the raw copied project resource response 230 | raw_resource_response = resource_response[self._api_slug][0] 231 | 232 | # convert it into a new project model representation 233 | # ensure we provide the etag - without it the resource won't be savable 234 | copied_project = Project(raw_resource_response, etag) 235 | 236 | return copied_project 237 | 238 | 239 | LinkResolver.register(Project) 240 | LinkResolver.register(Project, 'projects') 241 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PanoptesClient.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PanoptesClient.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PanoptesClient" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PanoptesClient" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_linkcollection.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import str 3 | 4 | import unittest 5 | import sys 6 | 7 | if sys.version_info <= (3, 0): 8 | from mock import Mock, patch 9 | else: 10 | from unittest.mock import Mock, patch 11 | 12 | from panoptes_client.panoptes import LinkCollection, ObjectNotSavedException 13 | 14 | 15 | LINKED_OBJECT_IDS = ('1', '2', '3', '4') 16 | 17 | 18 | class MockPanoptesObject(Mock): 19 | def __init__(self, raw=None, etag=None): 20 | r = super(MockPanoptesObject, self).__init__() 21 | self.id = str(raw) 22 | return r 23 | 24 | 25 | class TestLinkCollection(unittest.TestCase): 26 | def link_collection(self, ids=LINKED_OBJECT_IDS, parent=None): 27 | if parent: 28 | mock_parent = parent 29 | else: 30 | mock_parent = Mock() 31 | 32 | mock_slug = Mock() 33 | lc = LinkCollection( 34 | cls=MockPanoptesObject, 35 | slug=mock_slug, 36 | parent=mock_parent, 37 | linked_objects=[str(_id) for _id in ids], 38 | ) 39 | return lc, mock_parent, mock_slug 40 | 41 | def test_contains_id_int(self): 42 | self.assertTrue(1 in self.link_collection()[0]) 43 | 44 | def test_contains_id_str(self): 45 | self.assertTrue('1' in self.link_collection()[0]) 46 | 47 | def test_contains_obj(self): 48 | mock_obj = MockPanoptesObject(1) 49 | self.assertTrue(mock_obj in self.link_collection()[0]) 50 | 51 | def test_not_contains_id_int(self): 52 | self.assertFalse(9 in self.link_collection()[0]) 53 | 54 | def test_not_contains_id_str(self): 55 | self.assertFalse('9' in self.link_collection()[0]) 56 | 57 | def test_not_contains_obj(self): 58 | mock_obj = MockPanoptesObject(9) 59 | self.assertFalse(mock_obj in self.link_collection()[0]) 60 | 61 | def test_getitem_exists(self): 62 | lc = self.link_collection()[0] 63 | for i, _id in zip(range(len(LINKED_OBJECT_IDS)), LINKED_OBJECT_IDS): 64 | self.assertEqual(lc[i].id, _id) 65 | 66 | def test_getitem_doesnt_exist(self): 67 | with self.assertRaises(IndexError): 68 | self.link_collection()[0][len(LINKED_OBJECT_IDS)] 69 | 70 | def test_iter_empty(self): 71 | m = Mock() 72 | for _ in self.link_collection([])[0]: 73 | m() 74 | m.assert_not_called() 75 | 76 | def test_iter_full(self): 77 | m = Mock() 78 | for _ in self.link_collection()[0]: 79 | m() 80 | self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS)) 81 | 82 | def test_add_empty_noop(self): 83 | m = Mock() 84 | lc, parent, slug = self.link_collection([]) 85 | lc.add([]) 86 | parent.http_post.assert_not_called() 87 | for _ in lc: 88 | m() 89 | m.assert_not_called() 90 | 91 | def test_add_id_single(self): 92 | lc, parent, slug = self.link_collection([]) 93 | lc.add(1) 94 | parent.http_post.assert_called_with( 95 | '{}/links/{}'.format(parent.id, slug), 96 | json={slug: ['1']}, 97 | retry=True, 98 | ) 99 | m = Mock() 100 | for obj in lc: 101 | self.assertEqual(obj.id, '1') 102 | m() 103 | self.assertEqual(m.call_count, 1) 104 | 105 | def test_add_id_list(self): 106 | lc, parent, slug = self.link_collection([]) 107 | lc.add(LINKED_OBJECT_IDS) 108 | parent.http_post.assert_called_with( 109 | '{}/links/{}'.format(parent.id, slug), 110 | json={slug: list(LINKED_OBJECT_IDS)}, 111 | retry=True, 112 | ) 113 | m = Mock() 114 | for obj, _id in zip(lc, LINKED_OBJECT_IDS): 115 | self.assertEqual(obj.id, _id) 116 | m() 117 | self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS)) 118 | 119 | def test_add_object_single(self): 120 | lc, parent, slug = self.link_collection([]) 121 | lc.add(MockPanoptesObject(1)) 122 | parent.http_post.assert_called_with( 123 | '{}/links/{}'.format(parent.id, slug), 124 | json={slug: ['1']}, 125 | retry=True, 126 | ) 127 | m = Mock() 128 | for obj in lc: 129 | self.assertEqual(obj.id, '1') 130 | m() 131 | self.assertEqual(m.call_count, 1) 132 | 133 | def test_add_object_list(self): 134 | lc, parent, slug = self.link_collection([]) 135 | lc.add([MockPanoptesObject(_id) for _id in LINKED_OBJECT_IDS]) 136 | parent.http_post.assert_called_with( 137 | '{}/links/{}'.format(parent.id, slug), 138 | json={slug: list(LINKED_OBJECT_IDS)}, 139 | retry=True, 140 | ) 141 | m = Mock() 142 | for obj, _id in zip(lc, LINKED_OBJECT_IDS): 143 | self.assertEqual(obj.id, _id) 144 | m() 145 | self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS)) 146 | 147 | def test_add_readonly(self): 148 | with patch('panoptes_client.panoptes.LinkResolver') as lr: 149 | lr.isreadonly = lambda s: True 150 | lc = self.link_collection()[0] 151 | with self.assertRaises(NotImplementedError): 152 | lc.add(1) 153 | 154 | def test_add_not_saved(self): 155 | parent = MockPanoptesObject() 156 | parent.id = None 157 | lc = self.link_collection(parent=parent)[0] 158 | with self.assertRaises(ObjectNotSavedException): 159 | lc.add(1) 160 | 161 | def test_remove_empty_noop(self): 162 | m = Mock() 163 | lc, parent, slug = self.link_collection() 164 | lc.remove([]) 165 | parent.http_delete.assert_not_called() 166 | for obj, _id in zip(lc, LINKED_OBJECT_IDS): 167 | self.assertEqual(obj.id, _id) 168 | m() 169 | self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS)) 170 | 171 | def test_remove_id_single(self): 172 | m = Mock() 173 | lc, parent, slug = self.link_collection() 174 | lc.remove(LINKED_OBJECT_IDS[0]) 175 | parent.http_delete.assert_called_with( 176 | '{}/links/{}/1'.format(parent.id, slug), 177 | retry=True, 178 | ) 179 | for obj, _id in zip(lc, LINKED_OBJECT_IDS[1:]): 180 | self.assertEqual(obj.id, _id) 181 | m() 182 | self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS) - 1) 183 | 184 | def test_remove_id_list(self): 185 | m = Mock() 186 | removed_ids = LINKED_OBJECT_IDS[:-1] 187 | lc, parent, slug = self.link_collection() 188 | lc.remove(removed_ids) 189 | parent.http_delete.assert_called_with( 190 | '{}/links/{}/{}'.format( 191 | parent.id, 192 | slug, 193 | ",".join(removed_ids), 194 | ), 195 | retry=True, 196 | ) 197 | for obj, _id in zip(lc, LINKED_OBJECT_IDS[-1:]): 198 | self.assertEqual(obj.id, _id) 199 | m() 200 | self.assertEqual(m.call_count, 1) 201 | 202 | def test_remove_object_single(self): 203 | m = Mock() 204 | lc, parent, slug = self.link_collection() 205 | lc.remove(MockPanoptesObject(LINKED_OBJECT_IDS[0])) 206 | parent.http_delete.assert_called_with( 207 | '{}/links/{}/1'.format(parent.id, slug), 208 | retry=True, 209 | ) 210 | for obj, _id in zip(lc, LINKED_OBJECT_IDS[1:]): 211 | self.assertEqual(obj.id, _id) 212 | m() 213 | self.assertEqual(m.call_count, len(LINKED_OBJECT_IDS) - 1) 214 | 215 | def test_remove_object_list(self): 216 | m = Mock() 217 | removed_ids = LINKED_OBJECT_IDS[:-1] 218 | lc, parent, slug = self.link_collection() 219 | lc.remove([MockPanoptesObject(_id) for _id in removed_ids]) 220 | parent.http_delete.assert_called_with( 221 | '{}/links/{}/{}'.format( 222 | parent.id, 223 | slug, 224 | ",".join(removed_ids), 225 | ), 226 | retry=True, 227 | ) 228 | for obj, _id in zip(lc, LINKED_OBJECT_IDS[-1:]): 229 | self.assertEqual(obj.id, _id) 230 | m() 231 | self.assertEqual(m.call_count, 1) 232 | 233 | def test_remove_readonly(self): 234 | with patch('panoptes_client.panoptes.LinkResolver') as lr: 235 | lr.isreadonly = lambda s: True 236 | lc = self.link_collection()[0] 237 | with self.assertRaises(NotImplementedError): 238 | lc.remove(1) 239 | 240 | def test_remove_not_saved(self): 241 | parent = MockPanoptesObject() 242 | parent.id = None 243 | lc = self.link_collection(parent=parent)[0] 244 | with self.assertRaises(ObjectNotSavedException): 245 | lc.remove(1) 246 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Panoptes Client documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Oct 6 15:06:45 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | import datetime 20 | import os 21 | import importlib.metadata 22 | import sys 23 | sys.path.insert(0, os.path.abspath('..')) 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | # 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | 'sphinx.ext.autodoc', 36 | 'sphinx.ext.viewcode', 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | # The suffix(es) of source filenames. 43 | # You can specify multiple suffix as a list of string: 44 | # 45 | # source_suffix = ['.rst', '.md'] 46 | source_suffix = '.rst' 47 | 48 | # The encoding of source files. 49 | # 50 | # source_encoding = 'utf-8-sig' 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # General information about the project. 56 | project = u'Panoptes Client' 57 | copyright = u'2016-{}, Zooniverse'.format(datetime.datetime.now().year) 58 | author = u'Zooniverse' 59 | 60 | # The version info for the project you're documenting, acts as replacement for 61 | # |version| and |release|, also used in various other places throughout the 62 | # built documents. 63 | # 64 | # The full version, including alpha/beta/rc tags. 65 | release = importlib.metadata.version("panoptes_client") 66 | # The short X.Y version. 67 | version = '.'.join(release.split('.')[:2]) 68 | 69 | 70 | # The language for content autogenerated by Sphinx. Refer to documentation 71 | # for a list of supported languages. 72 | # 73 | # This is also used if you do content translation via gettext catalogs. 74 | # Usually you set "language" from the command line for these cases. 75 | language = None 76 | 77 | # There are two options for replacing |today|: either, you set today to some 78 | # non-false value, then it is used: 79 | # 80 | # today = '' 81 | # 82 | # Else, today_fmt is used as the format for a strftime call. 83 | # 84 | # today_fmt = '%B %d, %Y' 85 | 86 | # List of patterns, relative to source directory, that match files and 87 | # directories to ignore when looking for source files. 88 | # This patterns also effect to html_static_path and html_extra_path 89 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 90 | 91 | # The reST default role (used for this markup: `text`) to use for all 92 | # documents. 93 | # 94 | # default_role = None 95 | 96 | # If true, '()' will be appended to :func: etc. cross-reference text. 97 | # 98 | # add_function_parentheses = True 99 | 100 | # If true, the current module name will be prepended to all description 101 | # unit titles (such as .. function::). 102 | # 103 | # add_module_names = True 104 | 105 | # If true, sectionauthor and moduleauthor directives will be shown in the 106 | # output. They are ignored by default. 107 | # 108 | # show_authors = False 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | # modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | # keep_warnings = False 118 | 119 | # If true, `todo` and `todoList` produce output, else they produce nothing. 120 | todo_include_todos = False 121 | 122 | 123 | # -- Options for HTML output ---------------------------------------------- 124 | 125 | # The theme to use for HTML and HTML Help pages. See the documentation for 126 | # a list of builtin themes. 127 | # 128 | html_theme = 'classic' 129 | 130 | # Theme options are theme-specific and customize the look and feel of a theme 131 | # further. For a list of options available for each theme, see the 132 | # documentation. 133 | # 134 | # html_theme_options = {} 135 | 136 | # Add any paths that contain custom themes here, relative to this directory. 137 | # html_theme_path = [] 138 | 139 | # The name for this set of Sphinx documents. 140 | # " v documentation" by default. 141 | # 142 | # html_title = u'Panoptes Client v0.4.1' 143 | 144 | # A shorter title for the navigation bar. Default is the same as html_title. 145 | # 146 | # html_short_title = None 147 | 148 | # The name of an image file (relative to this directory) to place at the top 149 | # of the sidebar. 150 | # 151 | # html_logo = None 152 | 153 | # The name of an image file (relative to this directory) to use as a favicon of 154 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 155 | # pixels large. 156 | # 157 | # html_favicon = None 158 | 159 | # Add any paths that contain custom static files (such as style sheets) here, 160 | # relative to this directory. They are copied after the builtin static files, 161 | # so a file named "default.css" will overwrite the builtin "default.css". 162 | html_static_path = ['_static'] 163 | 164 | # Add any extra paths that contain custom files (such as robots.txt or 165 | # .htaccess) here, relative to this directory. These files are copied 166 | # directly to the root of the documentation. 167 | # 168 | # html_extra_path = [] 169 | 170 | # If not None, a 'Last updated on:' timestamp is inserted at every page 171 | # bottom, using the given strftime format. 172 | # The empty string is equivalent to '%b %d, %Y'. 173 | # 174 | # html_last_updated_fmt = None 175 | 176 | # If true, SmartyPants will be used to convert quotes and dashes to 177 | # typographically correct entities. 178 | # 179 | # html_use_smartypants = True 180 | 181 | # Custom sidebar templates, maps document names to template names. 182 | # 183 | # html_sidebars = {} 184 | 185 | # Additional templates that should be rendered to pages, maps page names to 186 | # template names. 187 | # 188 | # html_additional_pages = {} 189 | 190 | # If false, no module index is generated. 191 | # 192 | # html_domain_indices = True 193 | 194 | # If false, no index is generated. 195 | # 196 | # html_use_index = True 197 | 198 | # If true, the index is split into individual pages for each letter. 199 | # 200 | # html_split_index = False 201 | 202 | # If true, links to the reST sources are added to the pages. 203 | # 204 | # html_show_sourcelink = True 205 | 206 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 207 | # 208 | # html_show_sphinx = True 209 | 210 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 211 | # 212 | # html_show_copyright = True 213 | 214 | # If true, an OpenSearch description file will be output, and all pages will 215 | # contain a tag referring to it. The value of this option must be the 216 | # base URL from which the finished HTML is served. 217 | # 218 | # html_use_opensearch = '' 219 | 220 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 221 | # html_file_suffix = None 222 | 223 | # Language to be used for generating the HTML full-text search index. 224 | # Sphinx supports the following languages: 225 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 226 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 227 | # 228 | # html_search_language = 'en' 229 | 230 | # A dictionary with options for the search language support, empty by default. 231 | # 'ja' uses this config value. 232 | # 'zh' user can custom change `jieba` dictionary path. 233 | # 234 | # html_search_options = {'type': 'default'} 235 | 236 | # The name of a javascript file (relative to the configuration directory) that 237 | # implements a search results scorer. If empty, the default will be used. 238 | # 239 | # html_search_scorer = 'scorer.js' 240 | 241 | # Output file base name for HTML help builder. 242 | htmlhelp_basename = 'PanoptesClientdoc' 243 | 244 | # -- Options for LaTeX output --------------------------------------------- 245 | 246 | latex_elements = { 247 | # The paper size ('letterpaper' or 'a4paper'). 248 | # 249 | # 'papersize': 'letterpaper', 250 | 251 | # The font size ('10pt', '11pt' or '12pt'). 252 | # 253 | # 'pointsize': '10pt', 254 | 255 | # Additional stuff for the LaTeX preamble. 256 | # 257 | # 'preamble': '', 258 | 259 | # Latex figure (float) alignment 260 | # 261 | # 'figure_align': 'htbp', 262 | } 263 | 264 | # Grouping the document tree into LaTeX files. List of tuples 265 | # (source start file, target name, title, 266 | # author, documentclass [howto, manual, or own class]). 267 | latex_documents = [ 268 | (master_doc, 'PanoptesClient.tex', u'Panoptes Client Documentation', 269 | u'Zooniverse', 'manual'), 270 | ] 271 | 272 | # The name of an image file (relative to this directory) to place at the top of 273 | # the title page. 274 | # 275 | # latex_logo = None 276 | 277 | # For "manual" documents, if this is true, then toplevel headings are parts, 278 | # not chapters. 279 | # 280 | # latex_use_parts = False 281 | 282 | # If true, show page references after internal links. 283 | # 284 | # latex_show_pagerefs = False 285 | 286 | # If true, show URL addresses after external links. 287 | # 288 | # latex_show_urls = False 289 | 290 | # Documents to append as an appendix to all manuals. 291 | # 292 | # latex_appendices = [] 293 | 294 | # It false, will not define \strong, \code, itleref, \crossref ... but only 295 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added 296 | # packages. 297 | # 298 | # latex_keep_old_macro_names = True 299 | 300 | # If false, no module index is generated. 301 | # 302 | # latex_domain_indices = True 303 | 304 | 305 | # -- Options for manual page output --------------------------------------- 306 | 307 | # One entry per manual page. List of tuples 308 | # (source start file, name, description, authors, manual section). 309 | man_pages = [ 310 | (master_doc, 'panoptesclient', u'Panoptes Client Documentation', 311 | [author], 1) 312 | ] 313 | 314 | # If true, show URL addresses after external links. 315 | # 316 | # man_show_urls = False 317 | 318 | 319 | # -- Options for Texinfo output ------------------------------------------- 320 | 321 | # Grouping the document tree into Texinfo files. List of tuples 322 | # (source start file, target name, title, author, 323 | # dir menu entry, description, category) 324 | texinfo_documents = [ 325 | (master_doc, 'PanoptesClient', u'Panoptes Client Documentation', 326 | author, 'PanoptesClient', 'One line description of project.', 327 | 'Miscellaneous'), 328 | ] 329 | 330 | # Documents to append as an appendix to all manuals. 331 | # 332 | # texinfo_appendices = [] 333 | 334 | # If false, no module index is generated. 335 | # 336 | # texinfo_domain_indices = True 337 | 338 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 339 | # 340 | # texinfo_show_urls = 'footnote' 341 | 342 | # If true, do not generate a @detailmenu in the "Top" node's menu. 343 | # 344 | # texinfo_no_detailmenu = False 345 | -------------------------------------------------------------------------------- /panoptes_client/tests/test_workflow.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | from panoptes_client.panoptes import PanoptesAPIException 4 | from panoptes_client.workflow import Workflow 5 | from panoptes_client.caesar import Caesar 6 | from panoptes_client.aggregation import Aggregation 7 | 8 | if sys.version_info <= (3, 0): 9 | from mock import patch, MagicMock 10 | else: 11 | from unittest.mock import patch, MagicMock 12 | 13 | 14 | class TestWorkflow(unittest.TestCase): 15 | 16 | def setUp(self): 17 | super().setUp() 18 | caesar_post_patch = patch.object(Caesar, 'http_post') 19 | caesar_put_patch = patch.object(Caesar, 'http_put') 20 | caesar_get_patch = patch.object(Caesar, 'http_get') 21 | self.caesar_post_mock = caesar_post_patch.start() 22 | self.caesar_put_mock = caesar_put_patch.start() 23 | self.caesar_get_mock = caesar_get_patch.start() 24 | self.addCleanup(caesar_post_patch.stop) 25 | self.addCleanup(caesar_get_patch.stop) 26 | self.addCleanup(caesar_put_patch.stop) 27 | 28 | def test_save_to_caesar_update(self): 29 | workflow = Workflow(1) 30 | workflow.save_to_caesar() 31 | 32 | self.caesar_put_mock.assert_called_once() 33 | self.caesar_put_mock.assert_called_with('workflows/1', json={ 34 | 'workflow': { 35 | 'id': workflow.id, 36 | 'public_extracts': False, 37 | 'public_reductions': False 38 | } 39 | }) 40 | 41 | def test_save_to_caesar_create(self): 42 | self.caesar_get_mock.side_effect = PanoptesAPIException("Couldn't find Workflow with 'id'=1") 43 | 44 | workflow = Workflow(1) 45 | workflow.save_to_caesar() 46 | 47 | self.caesar_post_mock.assert_called_once() 48 | self.caesar_post_mock.assert_called_with('workflows', json={ 49 | 'workflow': { 50 | 'id': workflow.id, 51 | 'public_extracts': False, 52 | 'public_reductions': False 53 | } 54 | }) 55 | 56 | def test_save_to_caesar_raises_err(self): 57 | self.caesar_get_mock.side_effect = PanoptesAPIException("Some other error not workflow_id missing error") 58 | 59 | with self.assertRaises(PanoptesAPIException): 60 | workflow = Workflow(1) 61 | workflow.save_to_caesar() 62 | 63 | self.caesar_post_mock.assert_not_called() 64 | self.caesar_put_mock.assert_not_called() 65 | 66 | def test_caesar_subject_extracts(self): 67 | workflow = Workflow(1) 68 | workflow.caesar_subject_extracts(1234) 69 | 70 | self.caesar_get_mock.assert_called_with( 71 | f'workflows/{workflow.id}/extractors/all/extracts', params={'subject_id': 1234}) 72 | 73 | def test_caesar_subject_reductions_get_all_reductions(self): 74 | workflow = Workflow(1) 75 | workflow.caesar_subject_reductions(1234) 76 | 77 | self.caesar_get_mock.assert_called_with( 78 | f'workflows/{workflow.id}/subjects/1234/reductions') 79 | 80 | def test_caesar_subject_reductions_filter_by_reducer_key(self): 81 | workflow = Workflow(1) 82 | workflow.caesar_subject_reductions(1234, 'test_reducer_key') 83 | 84 | self.caesar_get_mock.assert_called_with( 85 | f'workflows/{workflow.id}/subjects/1234/reductions?reducer_key=test_reducer_key') 86 | 87 | def test_caesar_extractors(self): 88 | workflow = Workflow(1) 89 | workflow.caesar_extractors() 90 | 91 | self.caesar_get_mock.assert_called_with( 92 | f'workflows/{workflow.id}/extractors') 93 | 94 | def test_caesar_reducers(self): 95 | workflow = Workflow(1) 96 | workflow.caesar_reducers() 97 | 98 | self.caesar_get_mock.assert_called_with( 99 | f'workflows/{workflow.id}/reducers') 100 | 101 | def test_caesar_rules_subject_rules(self): 102 | workflow = Workflow(1) 103 | workflow.caesar_rules('subject') 104 | 105 | self.caesar_get_mock.assert_called_with( 106 | f'workflows/{workflow.id}/subject_rules') 107 | 108 | def test_caesar_rules_user_rules(self): 109 | workflow = Workflow(1) 110 | workflow.caesar_rules('user') 111 | 112 | self.caesar_get_mock.assert_called_with( 113 | f'workflows/{workflow.id}/user_rules') 114 | 115 | def test_caesar_effects_subject_rule_effects(self): 116 | workflow = Workflow(1) 117 | workflow.caesar_effects('subject', 123) 118 | 119 | self.caesar_get_mock.assert_called_with( 120 | f'workflows/{workflow.id}/subject_rules/123/subject_rule_effects') 121 | 122 | def test_caesar_effects_user_rule_effects(self): 123 | workflow = Workflow(1) 124 | workflow.caesar_effects('user', 123) 125 | self.caesar_get_mock.assert_called_with( 126 | f'workflows/{workflow.id}/user_rules/123/user_rule_effects') 127 | 128 | def test_add_caesar_extractor_valid_extractor(self): 129 | workflow = Workflow(1) 130 | workflow.add_caesar_extractor('external', 'alice') 131 | 132 | self.caesar_post_mock.assert_called_with(f'workflows/{workflow.id}/extractors', json={ 133 | 'extractor': { 134 | 'type': 'external', 135 | 'key': 'alice', 136 | 'task_key': 'T0' 137 | } 138 | }) 139 | 140 | def test_add_caesar_extractor_invalid_extractor(self): 141 | with self.assertRaises(ValueError) as extractor_error: 142 | workflow = Workflow(1) 143 | workflow.add_caesar_extractor('invalid_extractor_type', 'invalid') 144 | 145 | self.caesar_post_mock.assert_not_called() 146 | self.assertEqual('Invalid extractor type', 147 | str(extractor_error.exception)) 148 | 149 | def test_add_caesar_reducer_valid_reducer(self): 150 | workflow = Workflow(1) 151 | workflow.add_caesar_reducer('count', 'count_key') 152 | 153 | self.caesar_post_mock.assert_called_with(f'workflows/{workflow.id}/reducers', json={ 154 | 'reducer': { 155 | 'type': 'count', 156 | 'key': 'count_key' 157 | } 158 | }) 159 | 160 | def test_add_caesar_reducer_invalid_reducer(self): 161 | with self.assertRaises(ValueError) as invalid_reducer_err: 162 | workflow = Workflow(1) 163 | workflow.add_caesar_reducer('invalid_reducer_type', 'key') 164 | 165 | self.caesar_post_mock.assert_not_called() 166 | self.assertEqual('Invalid reducer type', str( 167 | invalid_reducer_err.exception)) 168 | 169 | def test_add_caesar_rule_valid_rule_type(self): 170 | workflow = Workflow(1) 171 | condition_string = '["gte", ["lookup", "complete.0", 0], ["const", 3]]' 172 | workflow.add_caesar_rule(condition_string, 'subject') 173 | 174 | self.caesar_post_mock.assert_called_with(f'workflows/{workflow.id}/subject_rules', json={ 175 | 'subject_rule': { 176 | 'condition_string': condition_string 177 | } 178 | }) 179 | 180 | def test_add_caesar_rule_invalid_rule_type(self): 181 | with self.assertRaises(ValueError) as invalid_rule_type_err: 182 | workflow = Workflow(1) 183 | condition_string = '["gte", ["lookup", "complete.0", 0], ["const", 3]]' 184 | invalid_rule_type = 'invalid_type' 185 | workflow.add_caesar_rule(condition_string, invalid_rule_type) 186 | 187 | self.caesar_post_mock.assert_not_called() 188 | expected_message = f'Invalid rule type: {invalid_rule_type}. Rule types can either be by "subject" or "user"' 189 | self.assertEqual(expected_message, str(invalid_rule_type_err.exception)) 190 | 191 | def test_add_caesar_rule_effect_valid_effect(self): 192 | workflow = Workflow(1) 193 | retire_reason = { 194 | 'reason': 'other' 195 | } 196 | workflow.add_caesar_rule_effect('subject', 12, 'retire_subject', retire_reason) 197 | expected_endpoint = f'workflows/{workflow.id}/subject_rules/{12}/subject_rule_effects' 198 | self.caesar_post_mock.assert_called_with(expected_endpoint, json={ 199 | 'subject_rule_effect': { 200 | 'action': 'retire_subject', 201 | 'config': retire_reason 202 | } 203 | }) 204 | 205 | def test_add_caesar_rule_effect_invalid_effect(self): 206 | with self.assertRaises(ValueError) as invalid_effect_err: 207 | workflow = Workflow(1) 208 | workflow.add_caesar_rule_effect('subject', 12, 'promote_user', {'some': 'config'}) 209 | 210 | self.caesar_post_mock.assert_not_called() 211 | self.assertEqual('Invalid action for rule type', str(invalid_effect_err.exception)) 212 | 213 | 214 | class TestAggregation(unittest.TestCase): 215 | def setUp(self): 216 | self.instance = Workflow(1) 217 | self.mock_user_id = 1 218 | 219 | @patch.object(Aggregation, 'where') 220 | def test_run_aggregation_existing(self, mock_where): 221 | mock_current_agg = MagicMock() 222 | mock_current_agg.delete = MagicMock() 223 | 224 | mock_aggregations = MagicMock() 225 | mock_aggregations.object_count = 1 226 | mock_aggregations.__next__.return_value = mock_current_agg 227 | mock_where.return_value = mock_aggregations 228 | 229 | result = self.instance.run_aggregation(self.mock_user_id, False) 230 | 231 | mock_current_agg.delete.assert_not_called() 232 | self.assertEqual(result, mock_current_agg) 233 | 234 | @patch.object(Aggregation, 'where') 235 | @patch.object(Aggregation, 'save') 236 | def test_run_aggregation_existing_and_delete(self, mock_save, mock_where): 237 | mock_current_agg = MagicMock() 238 | mock_current_agg.delete = MagicMock() 239 | 240 | mock_aggregations = MagicMock() 241 | mock_aggregations.object_count = 1 242 | mock_aggregations.__next__.return_value = mock_current_agg 243 | mock_where.return_value = mock_aggregations 244 | 245 | mock_save_func = MagicMock() 246 | mock_save.return_value = mock_save_func() 247 | 248 | result = self.instance.run_aggregation(self.mock_user_id, True) 249 | 250 | mock_current_agg.delete.assert_called_once() 251 | mock_save_func.assert_called_once() 252 | self.assertNotEqual(result, mock_current_agg) 253 | 254 | @patch.object(Aggregation, 'where') 255 | def test_get_batch_aggregation(self, mock_where): 256 | mock_current_agg = MagicMock() 257 | mock_aggregations = MagicMock() 258 | mock_aggregations.__next__.return_value = mock_current_agg 259 | mock_where.return_value = mock_aggregations 260 | 261 | result = self.instance.get_batch_aggregation() 262 | 263 | self.assertEqual(result, mock_current_agg) 264 | 265 | @patch.object(Aggregation, 'where') 266 | def test_get_batch_aggregation_failure(self, mock_where): 267 | mock_where.return_value = iter([]) 268 | 269 | with self.assertRaises(PanoptesAPIException): 270 | self.instance.get_batch_aggregation() 271 | 272 | @patch.object(Workflow, 'get_batch_aggregation') 273 | def test_get_agg_property(self, mock_get_batch_aggregation): 274 | mock_aggregation = MagicMock() 275 | mock_aggregation.test_property = 'returned_test_value' 276 | 277 | mock_get_batch_aggregation.return_value = mock_aggregation 278 | 279 | result = self.instance._get_agg_property('test_property') 280 | 281 | self.assertEqual(result, 'returned_test_value') 282 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /panoptes_client/caesar.py: -------------------------------------------------------------------------------- 1 | from panoptes_client.panoptes import Panoptes, PanoptesAPIException 2 | 3 | 4 | class Caesar(object): 5 | """ 6 | The low-level Caesar HTTP client class. Use this class to interact with the 7 | Caesar API. User credentials are shared with Panoptes, so log in via 8 | :py:meth:`.Panoptes.connect` before use. 9 | """ 10 | EXTRACTOR_TYPES = ['blank', 'external', 'question', 'survey', 'who', 'pluck_field', 'shape'] 11 | REDUCER_TYPES = [ 12 | 'consensus', 'count', 'placeholder', 'external', 'first_extract', 'stats', 13 | 'unique_count', 'rectangle', 'sqs' 14 | ] 15 | RULE_TO_ACTION_TYPES = { 16 | 'subject': ['retire_subject', 'add_subject_to_set', 'add_to_collection', 'external', 'external_with_basic_auth'], 17 | 'user': ['promote_user'] 18 | } 19 | 20 | def __init__( 21 | self, 22 | endpoint=None, 23 | redirect_url='https://caesar.zooniverse.org/auth/zooniverse/callback' 24 | ): 25 | _default_endpoint = 'https://caesar.zooniverse.org' 26 | if Panoptes.client().endpoint == 'https://panoptes-staging.zooniverse.org': 27 | _default_endpoint = 'https://caesar-staging.zooniverse.org' 28 | 29 | if endpoint is None: 30 | endpoint = _default_endpoint 31 | 32 | self.endpoint = endpoint 33 | self.headers = { 34 | 'Accept': 'application/json' 35 | } 36 | 37 | def http_get(self, *args, **kwargs): 38 | kwargs['endpoint'] = self.endpoint 39 | kwargs['headers'] = self.headers 40 | return Panoptes.client().get(*args, **kwargs) 41 | 42 | def http_post(self, *args, **kwargs): 43 | kwargs['endpoint'] = self.endpoint 44 | kwargs['headers'] = { 45 | 'Accept': 'application/json', 46 | 'Content-Type': 'application/json' 47 | } 48 | return Panoptes.client().post(*args, **kwargs) 49 | 50 | def http_put(self, *args, **kwargs): 51 | kwargs['endpoint'] = self.endpoint 52 | kwargs['headers'] = { 53 | 'Accept': 'application/json', 54 | 'Content-Type': 'application/json' 55 | } 56 | return Panoptes.client().put(*args, **kwargs) 57 | 58 | def http_delete(self, *args, **kwargs): 59 | kwargs['endpoint'] = self.endpoint 60 | return Panoptes.client().delete(*args, **kwargs) 61 | 62 | def get_workflow(self, workflow_id): 63 | """ 64 | Returns workflow object if exists in Caesar 65 | """ 66 | return self.http_get(f'workflows/{workflow_id}')[0] 67 | 68 | def get_reductions_by_workflow_and_subject(self, workflow_id, subject_id): 69 | """ 70 | Returns a list of all subject reductions as dicts from Caesar given the ids of the workflow and subject. 71 | """ 72 | return self.http_get(f'workflows/{workflow_id}/subjects/{subject_id}/reductions')[0] 73 | 74 | def get_workflow_extractors(self, workflow_id): 75 | """ 76 | Returns a list of extractors as dicts from Caesar for workflow with provided workflow_id 77 | """ 78 | return self.http_get(f'workflows/{workflow_id}/extractors')[0] 79 | 80 | def get_workflow_reducers(self, workflow_id): 81 | """ 82 | Returns a list of reducers as dicts from Caesar for workflow with provided workflow_id 83 | """ 84 | return self.http_get(f'workflows/{workflow_id}/reducers')[0] 85 | 86 | def get_extracts_by_workflow_and_subject(self, workflow_id, subject_id): 87 | """ 88 | Returns a list of extracts as dicts from Caesar for workflow with provided workflow_id 89 | """ 90 | return self.http_get( 91 | f'workflows/{workflow_id}/extractors/extractor/extracts', params={'subject_id': subject_id})[0] 92 | 93 | def save_workflow(self, workflow_id, public_extracts=False, public_reductions=False): 94 | """ 95 | Adds/updates workflow with provided workflow_id to Caesar. Checks to see if workflow exists in Caesar, if not 96 | then creates workflow and returns workflow as a dict from Caesar if created. 97 | If workflow is already in Caesar, will update the Caesar workflow. 98 | 99 | Examples:: 100 | 101 | Caesar().save_workflow(123, public_extracts=True, public_reductions=True) 102 | """ 103 | try: 104 | self.get_workflow(workflow_id) 105 | except PanoptesAPIException as err: 106 | if "couldn't find workflow with 'id'" in str(err).lower(): 107 | return self.http_post('workflows', json={ 108 | 'workflow': { 109 | 'id': workflow_id, 110 | 'public_extracts': public_extracts, 111 | 'public_reductions': public_reductions 112 | } 113 | })[0] 114 | else: 115 | raise err 116 | else: 117 | return self.http_put(f'workflows/{workflow_id}', json={ 118 | 'workflow': { 119 | 'id': workflow_id, 120 | 'public_extracts': public_extracts, 121 | 'public_reductions': public_reductions 122 | } 123 | })[0] 124 | 125 | def create_workflow_extractor(self, workflow_id, extractor_key, 126 | extractor_type, task_key='T0', other_extractor_attributes=None): 127 | """ 128 | Adds a Caesar extractor for workflow with id workflow_id. Will return extractor as a dict with 'id' if success. 129 | 130 | - **extractor_type** can be one of the following: 'blank', 'external', 'question', 'survey', 'who', 'pluck_field', or 'shape' 131 | - **extractor_key** is the unique key that you want to give to the extractor. The key will be used to track this specific reducer within Caesar. 132 | 133 | Examples:: 134 | 135 | Caesar().create_workflow_extractor(12, 'question', 'complete', 'T0', {'if_missing': ignore }) 136 | """ 137 | 138 | self.validate_extractor_type(extractor_type) 139 | if other_extractor_attributes is None: 140 | other_extractor_attributes = {} 141 | 142 | payload = { 143 | 'extractor': { 144 | 'type': extractor_type, 145 | 'key': extractor_key, 146 | 'task_key': task_key, 147 | **other_extractor_attributes 148 | } 149 | } 150 | return self.http_post(f'workflows/{workflow_id}/extractors', json=payload)[0] 151 | 152 | def create_workflow_reducer(self, workflow_id, reducer_type, key, other_reducer_attributes=None): 153 | """ 154 | Adds a Caesar reducer for given workflow. Will return reducer as dict with 'id' if successful. 155 | 156 | - **reducer_type** can be one of the following: 157 | 'consensus', 'count', 'placeholder', 'external', 'first_extract', 158 | 'stats', 'unique_count', 'rectangle', 'sqs' 159 | - **key** is a unique name for your reducer. This key will be used to track this specific reducer within Caesar. 160 | 161 | Examples:: 162 | 163 | Caesar().create_workflow_reducer(1234, 'count', 'count', {'filters' : {'extractor_keys': ['complete']}}) 164 | """ 165 | 166 | self.validate_reducer_type(reducer_type) 167 | if other_reducer_attributes is None: 168 | other_reducer_attributes = {} 169 | 170 | payload = { 171 | 'reducer': { 172 | 'type': reducer_type, 173 | 'key': key, 174 | **other_reducer_attributes 175 | } 176 | } 177 | 178 | return self.http_post(f'workflows/{workflow_id}/reducers', json=payload)[0] 179 | 180 | def create_workflow_rule(self, workflow_id, rule_type, condition_string='[]'): 181 | """ 182 | Adds a Caesar rule for given workflow. Will return rule as a dict with 'id' if successful. 183 | 184 | - **condition_string** is a string that represents a single operation (sometimes nested). 185 | The general syntax is like if you'd write Lisp in json. 186 | It is a stringified array with the first item being a string identifying the operator. 187 | See for examples of condition strings https://zooniverse.github.io/caesar/#rules 188 | - **rule_type** can either be 'subject' or 'user' 189 | 190 | Examples:: 191 | 192 | caesar = Caesar() 193 | workflow = Workflow(1234) 194 | caesar.create_workflow_rule(workflow.id, 'subject','["gte", ["lookup", "complete.0", 0], ["const", 3]]') 195 | 196 | """ 197 | 198 | self.validate_rule_type(rule_type) 199 | payload = { 200 | f'{rule_type}_rule': { 201 | 'condition_string': condition_string 202 | } 203 | } 204 | return self.http_post(f'workflows/{workflow_id}/{rule_type}_rules', json=payload)[0] 205 | 206 | def create_workflow_rule_effect(self, workflow_id, rule_type, rule_id, action, config=None): 207 | """ 208 | Adds a Caesar effect for workflow with id `workflow_id` and rule with id `rule_id`. 209 | Method will return effect as a dict with 'id' if successful. 210 | 211 | - **rule_type** can either be 'subject' or 'user' 212 | - **rule_id** is the id of the subject rule or user rule that the effect should run 213 | - **action** can be one of the following: 214 | - **(actions for subject rules)** - 'retire_subject', 'add_subject_to_set', 'add_to_collection', 'external' 215 | - **(actions for user rules)** - 'promote_user' 216 | 217 | Examples:: 218 | 219 | retirement_config = {'reason': 'classification_count'} 220 | Caesar().create_workflow_rule_effect(1, 'subject', subject_rule['id'], 'retire_subject', retirement_config) 221 | """ 222 | 223 | self.validate_rule_type(rule_type) 224 | self.validate_action(rule_type, action) 225 | if config is None: 226 | config = {} 227 | payload = { 228 | f'{rule_type}_rule_effect': { 229 | 'action': action, 230 | 'config': config 231 | } 232 | } 233 | 234 | request_url = f'workflows/{workflow_id}/{rule_type}_rules/{rule_id}/{rule_type}_rule_effects' 235 | return self.http_post(request_url, json=payload)[0] 236 | 237 | def import_data_extracts(self, workflow_id, csv_source): 238 | """ 239 | Imports machine-learnt data extracts into Caesar. 240 | 241 | - **csv_source** must be a publicly accessible csv at the time of import. 242 | Eg. csv can be hosted via an AWS S3 Bucket, Azure Blob Storage, or Panoptes media item. 243 | See `this csv `_ as an example. 244 | `csv_source`'s csv must have header/titles/rows of the following: 245 | 246 | - `extractor_key` (key corresponding to the extractor in Caesar) 247 | - `subject_id` 248 | - `data` (the machine learnt data for the corresponding subject). This entry should be JSON. 249 | 250 | Example:: 251 | 252 | caesar = Caesar(endpoint='https://caesar-staging.zooniverse.org') 253 | caesar.import_data_extracts(1234, 'https://panoptes-uploads-staging.zooniverse.org/project_attached_image/f1ab241f-2896-4efc-a1bc-3baaff64d783.csv') 254 | """ 255 | return self.http_post(f'workflows/{workflow_id}/extracts/import', json={'file': csv_source}) 256 | 257 | def validate_rule_type(self, rule_type): 258 | if rule_type not in self.RULE_TO_ACTION_TYPES.keys(): 259 | raise ValueError(f'Invalid rule type: {rule_type}. Rule types can either be by "subject" or "user"') 260 | 261 | def validate_reducer_type(self, reducer_type): 262 | if reducer_type not in self.REDUCER_TYPES: 263 | raise ValueError('Invalid reducer type') 264 | 265 | def validate_extractor_type(self, extractor_type): 266 | if extractor_type not in self.EXTRACTOR_TYPES: 267 | raise ValueError('Invalid extractor type') 268 | 269 | def validate_action(self, rule_type, action): 270 | if action not in self.RULE_TO_ACTION_TYPES[rule_type]: 271 | raise ValueError('Invalid action for rule type') 272 | -------------------------------------------------------------------------------- /panoptes_client/subject.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from panoptes_client.subject_workflow_status import SubjectWorkflowStatus 3 | 4 | _OLD_STR_TYPES = (str,) 5 | try: 6 | _OLD_STR_TYPES = _OLD_STR_TYPES + (unicode,) 7 | except NameError: 8 | pass 9 | 10 | from builtins import range, str 11 | 12 | import logging 13 | import requests 14 | import threading 15 | import time 16 | 17 | from copy import deepcopy 18 | from concurrent.futures import ThreadPoolExecutor 19 | import mimetypes 20 | 21 | try: 22 | import magic 23 | MEDIA_TYPE_DETECTION = 'magic' 24 | except ImportError: 25 | import importlib.metadata 26 | try: 27 | importlib.metadata.version("python-magic") 28 | logging.getLogger('panoptes_client').info( 29 | 'libmagic not operational, likely due to lack of shared libraries. ' 30 | 'Media MIME type determination will be based on file extensions.' 31 | ) 32 | except importlib.metadata.PackageNotFoundError: 33 | pass 34 | MEDIA_TYPE_DETECTION = 'mimetypes' 35 | 36 | from panoptes_client.panoptes import ( 37 | LinkResolver, 38 | ObjectNotSavedException, 39 | Panoptes, 40 | PanoptesAPIException, 41 | PanoptesObject, 42 | ) 43 | from redo import retry 44 | 45 | UPLOAD_RETRY_LIMIT = 5 46 | RETRY_BACKOFF_INTERVAL = 5 47 | ASYNC_SAVE_THREADS = 5 48 | 49 | ALLOWED_MIME_TYPES = [ 50 | "image/jpeg", 51 | "image/png", 52 | "image/gif", 53 | "image/svg+xml", 54 | "audio/mpeg", 55 | "video/mp4", 56 | "audio/mp4", 57 | "video/mpeg", 58 | "text/plain", 59 | "application/json", 60 | ] 61 | 62 | class Subject(PanoptesObject): 63 | _api_slug = 'subjects' 64 | _link_slug = 'subjects' 65 | _edit_attributes = ( 66 | 'locations', 67 | 'metadata', 68 | { 69 | 'links': ( 70 | 'project', 71 | ), 72 | }, 73 | ) 74 | _local = threading.local() 75 | 76 | @classmethod 77 | def async_saves(cls): 78 | """ 79 | Returns a context manager to allow asynchronously creating subjects 80 | or creating and uploading subject attached images/media. 81 | 82 | Using this context manager will create a pool of threads which will 83 | create multiple subjects at once and upload any local files 84 | simultaneously. 85 | 86 | The recommended way to use this is with the `with` statement:: 87 | 88 | with Subject.async_saves(): 89 | local_files = [...] 90 | for filename in local_files: 91 | s = Subject() 92 | s.links.project = 1234 93 | s.add_location(filename) 94 | s.save() 95 | 96 | with Subject.async_saves(): 97 | local_files = [...] 98 | for filename in local_files: 99 | s = Subject(1234) 100 | s.save_attached_image(local_file) 101 | 102 | Alternatively, you can manually shut down the thread pool:: 103 | 104 | pool = Subject.async_saves() 105 | local_files = [...] 106 | try: 107 | for filename in local_files: 108 | s = Subject() 109 | s.links.project = 1234 110 | s.add_location(filename) 111 | s.save() 112 | finally: 113 | pool.shutdown() 114 | """ 115 | cls._local.save_exec = ThreadPoolExecutor( 116 | max_workers=ASYNC_SAVE_THREADS 117 | ) 118 | return cls._local.save_exec 119 | 120 | def __init__(self, raw={}, etag=None): 121 | super(Subject, self).__init__(raw, etag) 122 | if not self.locations: 123 | self.locations = [] 124 | if not self.metadata: 125 | self.metadata = {} 126 | self._original_metadata = {} 127 | self._media_files = [None] * len(self.locations) 128 | 129 | def save(self, client=None): 130 | """ 131 | Like :py:meth:`.PanoptesObject.save`, but also uploads any local files 132 | which have previosly been added to the subject with 133 | :py:meth:`add_location`. Automatically retries uploads on error. 134 | 135 | If multiple local files are to be uploaded, several files will be 136 | uploaded simultaneously to save time. 137 | """ 138 | if not client: 139 | client = Panoptes.client() 140 | 141 | async_save = hasattr(self._local, 'save_exec') 142 | 143 | with client: 144 | if async_save: 145 | try: 146 | # The recursive call will exec in a new thread, so 147 | # self._local.save_exec will be undefined above 148 | self._async_future = self._local.save_exec.submit( 149 | self.save, 150 | client=client, 151 | ) 152 | return 153 | except RuntimeError: 154 | del self._local.save_exec 155 | async_save = False 156 | 157 | if not self.metadata == self._original_metadata: 158 | self.modified_attributes.add('metadata') 159 | 160 | response = retry( 161 | super(Subject, self).save, 162 | attempts=UPLOAD_RETRY_LIMIT, 163 | sleeptime=RETRY_BACKOFF_INTERVAL, 164 | retry_exceptions=(PanoptesAPIException,), 165 | log_args=False, 166 | ) 167 | 168 | if not response: 169 | return 170 | 171 | try: 172 | if async_save: 173 | upload_exec = self._local.save_exec 174 | else: 175 | upload_exec = ThreadPoolExecutor( 176 | max_workers=ASYNC_SAVE_THREADS, 177 | ) 178 | 179 | for location, media_data in zip( 180 | response['subjects'][0]['locations'], 181 | self._media_files 182 | ): 183 | if not media_data: 184 | continue 185 | 186 | for media_type, url in location.items(): 187 | upload_exec.submit( 188 | retry, 189 | self._upload_media, 190 | args=(url, media_data, media_type), 191 | attempts=UPLOAD_RETRY_LIMIT, 192 | sleeptime=RETRY_BACKOFF_INTERVAL, 193 | retry_exceptions=( 194 | requests.exceptions.RequestException, 195 | ), 196 | log_args=False, 197 | ) 198 | 199 | self._media_files = [None] * len(self.locations) 200 | 201 | finally: 202 | if not async_save: 203 | upload_exec.shutdown() 204 | 205 | def _upload_media(self, url, media_data, media_type): 206 | upload_response = requests.put( 207 | url, 208 | headers={ 209 | 'Content-Type': media_type, 210 | 'x-ms-blob-type': 'BlockBlob', 211 | }, 212 | data=media_data, 213 | ) 214 | upload_response.raise_for_status() 215 | return upload_response 216 | 217 | def _detect_media_type(self, media_data=None, manual_mimetype=None): 218 | if manual_mimetype is not None: 219 | return manual_mimetype 220 | 221 | if MEDIA_TYPE_DETECTION == 'magic': 222 | return magic.from_buffer(media_data, mime=True) 223 | 224 | media_type = mimetypes.guess_type(media_data)[0] 225 | if not media_type: 226 | raise UnknownMediaException( 227 | 'Could not detect file type. Please try installing ' 228 | 'libmagic: https://panoptes-python-client.readthedocs.' 229 | 'io/en/latest/user_guide.html#uploading-non-image-' 230 | 'media-types' 231 | ) 232 | return media_type 233 | 234 | def _validate_media_type(self, media_type=None): 235 | if media_type not in ALLOWED_MIME_TYPES: 236 | raise UnknownMediaException(f"File type {media_type} is not allowed.") 237 | 238 | @property 239 | def async_save_result(self): 240 | """ 241 | Retrieves the result of this subject's asynchronous save. 242 | 243 | - Returns `True` if the subject was saved successfully. 244 | - Raises `concurrent.futures.CancelledError` if the save was cancelled. 245 | - If the save failed, raises the relevant exception. 246 | - Returns `False` if the subject hasn't finished saving or if the 247 | subject has not been queued for asynchronous save. 248 | """ 249 | if hasattr(self, "_async_future") and self._async_future.done(): 250 | self._async_future.result() 251 | return True 252 | else: 253 | return False 254 | 255 | @property 256 | def attached_images(self): 257 | """ 258 | A dict containing attached images/media of a subject. This should NOT 259 | be confused with subject locations. A subject_location is a media 260 | record that saves the location of the media that will be classified in a project's classifier. 261 | A subject_attached_image is a media record that serves as 262 | ancillary/auxiliary media to the subject and will be shown on a subject's Talk page. 263 | """ 264 | if self.id is None: 265 | raise ObjectNotSavedException 266 | return self.http_get('{}/attached_images'.format(self.id))[0] 267 | 268 | def set_raw(self, raw, etag=None, loaded=True): 269 | super(Subject, self).set_raw(raw, etag, loaded) 270 | if loaded and self.metadata: 271 | self._original_metadata = deepcopy(self.metadata) 272 | 273 | def subject_workflow_status(self, workflow_id): 274 | """ 275 | Returns SubjectWorkflowStatus of Subject in Workflow 276 | 277 | Example:: 278 | 279 | subject.subject_workflow_status(4321) 280 | """ 281 | return next(SubjectWorkflowStatus.where(subject_id=self.id, workflow_id=workflow_id)) 282 | 283 | def add_location(self, location, manual_mimetype=None): 284 | """ 285 | Add a media location to this subject. 286 | 287 | - **location** can be an open :py:class:`file` object, a path to a 288 | local file, or a :py:class:`dict` containing MIME types and URLs for 289 | remote media. 290 | 291 | - **manual_mimetype** optional, passes in a specific MIME type for media item. 292 | 293 | Examples:: 294 | 295 | subject.add_location(my_file) 296 | subject.add_location('/data/image.jpg') 297 | subject.add_location({'image/png': 'https://example.com/image.png'}) 298 | subject.add_location(my_file, manual_mimetype='image/png') 299 | """ 300 | if type(location) is dict: 301 | self.locations.append(location) 302 | self._media_files.append(None) 303 | self.modified_attributes.add('locations') 304 | return 305 | elif type(location) in (str,) + _OLD_STR_TYPES: 306 | f = open(location, 'rb') 307 | else: 308 | f = location 309 | 310 | try: 311 | media_data = f.read() 312 | media_type = self._detect_media_type(media_data, manual_mimetype) 313 | 314 | self._validate_media_type(media_type) 315 | 316 | self.locations.append(media_type) 317 | self._media_files.append(media_data) 318 | self.modified_attributes.add('locations') 319 | finally: 320 | f.close() 321 | 322 | def _add_attached_image( 323 | self, 324 | src=None, 325 | content_type='image/png', 326 | external_link=True, 327 | metadata=None, 328 | client=None, 329 | ): 330 | if self.id is None: 331 | raise ObjectNotSavedException 332 | metadata = metadata or {} 333 | media_data = { 334 | 'content_type': content_type, 335 | 'external_link': external_link, 336 | 'metadata': metadata, 337 | } 338 | if src: 339 | media_data['src'] = src 340 | 341 | if not client: 342 | client = Panoptes.client() 343 | 344 | with client: 345 | json_response, _ = self.http_post('{}/attached_images'.format(self.id), json={'media': media_data}) 346 | 347 | return json_response['media'][0]['src'] 348 | 349 | def _save_attached_image(self, attached_media, manual_mimetype=None, metadata=None, client=None): 350 | if not client: 351 | client = Panoptes.client() 352 | 353 | with client: 354 | metadata = metadata or {} 355 | 356 | if type(attached_media) is dict: 357 | for content_type, url in attached_media.items(): 358 | self._add_attached_image( 359 | src=url, 360 | content_type=content_type, 361 | metadata=metadata, 362 | external_link=True, 363 | ) 364 | return 365 | elif type(attached_media) in (str,) + _OLD_STR_TYPES: 366 | f = open(attached_media, 'rb') 367 | else: 368 | f = attached_media 369 | 370 | media_type = None 371 | try: 372 | media_data = f.read() 373 | media_type = self._detect_media_type(media_data, manual_mimetype) 374 | self._validate_media_type(media_type) 375 | finally: 376 | f.close() 377 | file_url = self._add_attached_image( 378 | src=None, 379 | content_type=media_type, 380 | metadata=metadata, 381 | external_link=False, 382 | ) 383 | self._upload_media(file_url, media_data, media_type) 384 | 385 | def save_attached_image( 386 | self, 387 | attached_media, 388 | manual_mimetype=None, 389 | metadata=None, 390 | client=None 391 | ): 392 | """ 393 | Add a attached_media to this subject. 394 | NOTE: This should NOT be confused with subject location. 395 | A subject location is the content of the subject that a volunteer will classify. 396 | A subject attached_media is ancillary data associated to the subject that get displayed on the Subject's Talk Page. 397 | 398 | - **attached_media** can be an open :py:class:`file` object, a path to a 399 | local file, or a :py:class:`dict` containing MIME types and URLs for 400 | remote media. 401 | - **manual_mimetype** optional string, passes in a specific MIME type for media item. 402 | - **metadata** can be a :py:class:`dict` that stores additional info on attached_media. 403 | - **client** optional Panoptes.client() instance. Sent as a parameter for threading purposes for parallelization so that thread uses the correct client context. 404 | 405 | Examples:: 406 | 407 | # Upload image by sending in a :py:class:`file` object 408 | subject.save_attached_image(my_file) 409 | # Upload local image by passing path to file 410 | subject.save_attached_image('/data/image.jpg') 411 | # Upload local image and set mimetype and record's metadata 412 | subject.save_attached_image(attached_media=my_file, manual_mimetype='image/jpg', metadata={'metadata_test': 'Object 1'}) 413 | # Upload externally hosted image 414 | subject.save_attached_image({"image/png": "https://example.com/test.png"}) 415 | 416 | We can utilize `async_saves` to upload/save attached_images in parallel. 417 | 418 | Examples:: 419 | from concurrent.futures import as_completed 420 | subject = Subject(1234) 421 | 422 | # list of file locations 423 | local_files = [...] 424 | 425 | with Subject.async_saves(): 426 | future_to_file = {subject.save_attached_image(file_location): file_location for file_location in local_files} 427 | for future in as_completed(future_to_file): 428 | local_file = future_to_file[future] 429 | try: 430 | future.result() 431 | except Exception as exc: 432 | print(f"Upload failed for {local_file}") 433 | 434 | """ 435 | if not client: 436 | client = Panoptes.client() 437 | 438 | async_save = hasattr(self._local, 'save_exec') 439 | 440 | future_result = None 441 | with client: 442 | metadata = metadata or {} 443 | 444 | try: 445 | if async_save: 446 | upload_exec = self._local.save_exec 447 | else: 448 | upload_exec = ThreadPoolExecutor(max_workers=ASYNC_SAVE_THREADS) 449 | future_result = upload_exec.submit( 450 | retry, 451 | self._save_attached_image, 452 | args=( 453 | attached_media, 454 | manual_mimetype, 455 | metadata, 456 | client 457 | ), 458 | attempts=UPLOAD_RETRY_LIMIT, 459 | sleeptime=RETRY_BACKOFF_INTERVAL, 460 | retry_exceptions=( 461 | requests.exceptions.RequestException 462 | ), 463 | log_args=False, 464 | ) 465 | finally: 466 | if not async_save: 467 | # Shuts down and waits for the task if this isn't being used in a `async_saves` block 468 | upload_exec.shutdown(wait=True) 469 | return future_result 470 | 471 | 472 | class UnknownMediaException(Exception): 473 | pass 474 | 475 | 476 | LinkResolver.register(Subject) 477 | LinkResolver.register(Subject, 'subject') 478 | -------------------------------------------------------------------------------- /docs/user_guide.rst: -------------------------------------------------------------------------------- 1 | User Guide 2 | ========== 3 | 4 | Introduction 5 | ------------ 6 | 7 | The Panoptes Client provides high level access to the HTTP API for common 8 | project management tasks. The client module provides a set of classes which act 9 | as an object-relational mapping (ORM) layer on top of the API, allowing you to 10 | perform tasks such as creating/uploading subjects, retiring subjects, and 11 | downloading data exports without having to concern yourself with the low level 12 | detail of how the API functions. 13 | 14 | Most of the classes you'll need can be imported directly from the 15 | ``panoptes_client`` package; see the :doc:`module reference ` 16 | for a complete list. 17 | 18 | Of special note is the :py:class:`.Panoptes` class which provides the 19 | :py:meth:`.Panoptes.connect` method. This method must be called to log into the 20 | API before you can perform any privileged, project owner-specific actions 21 | (though some things, such as listing public projects or available workflows, 22 | can be done anonymously, without logging in). 23 | 24 | Most of the classes you'll be using are subclasses of the abstract 25 | :py:class:`.PanoptesObject` class. Any references in this documentation to 26 | "Panoptes object classes" or "model classes" refer to these subclasses. You 27 | should familiarise yourself with the methods that Panoptes object classes all 28 | have, in particular :py:meth:`.PanoptesObject.save` and 29 | :py:meth:`.PanoptesObject.where`. 30 | 31 | You might also want to refer to the `Panoptes API documentation 32 | `_ as this lists the full options and allowed 33 | values for many of the methods in this module -- many method arguments are 34 | simply passed to the API as-is, with the API performing server-side validation. 35 | The API documentation also lists the full attributes for each class; these are 36 | not included in this documentation. 37 | 38 | Installation 39 | ------------ 40 | 41 | Install latest stable release:: 42 | 43 | $ pip install panoptes-client 44 | 45 | Or for development or testing, you can install the development version directly 46 | from GitHub:: 47 | 48 | $ pip install -U git+https://github.com/zooniverse/panoptes-python-client.git 49 | 50 | Upgrade an existing installation:: 51 | 52 | $ pip install -U panoptes-client 53 | 54 | The Panoptes Client is supported on all versions of Python 2 and 3, from Python 55 | 2.7 onwards. 56 | 57 | Uploading non-image media types 58 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 59 | 60 | If you wish to upload subjects with non-image media (e.g. audio or video), 61 | it is desirable to have the ``libmagic`` library installed for type detection. 62 | If you don't already have ``libmagic``, please see the `dependency information 63 | for python-magic `_ for 64 | more details. 65 | 66 | If `libmagic` is not installed, assignment of MIME types (e.g., image/jpeg, 67 | video/mp4, text/plain, application/json, etc) will be based on file extensions. 68 | Be aware that if file names and extension aren't accurate, this could lead to 69 | issues when the media is loaded. 70 | 71 | Usage Examples 72 | -------------- 73 | 74 | Tutorial: Creating a new project 75 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 76 | 77 | Once you have the client installed, you can import the modules you need from the 78 | ``panoptes_client`` package. For this tutorial, we're going to log into the 79 | Panoptes API, create a project, create a subject set, and finally create some 80 | subjects to go in the new set. Let's start by importing all the classes we'll 81 | need for that:: 82 | 83 | from panoptes_client import Panoptes, Project, SubjectSet, Subject 84 | 85 | Now that we've imported all that, we can use the :py:meth:`.Panoptes.connect` 86 | method to log in:: 87 | 88 | Panoptes.connect(username='example', password='example') 89 | 90 | Next we will create our new project. All we need to do is instantiate a new 91 | instance of :py:class:`.Project`, set some required attributes, and then save 92 | it, like so:: 93 | 94 | tutorial_project = Project() 95 | 96 | tutorial_project.display_name = 'Tutorial Project' 97 | tutorial_project.description = 'My first project created in Python' 98 | tutorial_project.primary_language = 'en' 99 | tutorial_project.private = True 100 | 101 | tutorial_project.save() 102 | 103 | 104 | Now if you log into the `Zooniverse project builder 105 | `_ you should see the new project listed there. 106 | Next we will create a subject set in the same way:: 107 | 108 | subject_set = SubjectSet() 109 | 110 | subject_set.links.project = tutorial_project 111 | subject_set.display_name = 'Tutorial subject set' 112 | 113 | subject_set.save() 114 | 115 | Here you'll notice we set the ``subject_set.links.project`` attribute. ``links`` 116 | is a special attribute that handles connecting related Panoptes objects to each 117 | other. You can directly assign a Panoptes object instance, as above, or you can 118 | assign an object's ID number if you have it. As well as assigning objects, you 119 | can use ``links`` to access related objects. Now that we've created our new 120 | subject set, we will also see a link to it on ``tutorial_project`` if we reload 121 | it:: 122 | 123 | tutorial_project.reload() 124 | print(tutorial_project.links.subject_sets) 125 | 126 | This would output something like this:: 127 | 128 | [] 129 | 130 | Showing a list of the linked subject sets (containing only our new set in this 131 | case). Here ``1234`` is the internal ID number of the subject set (also 132 | accessible as ``subject_set.id``), so the exact result you get will be slightly 133 | different. 134 | 135 | Now that we have a subject set, let's create some subjects and add them to it. 136 | For this tutorial, we'll assume you have a :py:class:`dict` containing filenames 137 | and subject metadata. In reality you might load this from a CSV file, or query a 138 | database, or generate it in any number of different ways, but this would be 139 | outside the scope of this tutorial:: 140 | 141 | subject_metadata = { 142 | '/Users/me/file1.png': { 143 | 'subject_reference': 1, 144 | 'date': '2017-01-01', 145 | }, 146 | '/Users/me/file2.png': { 147 | 'subject_reference': 2, 148 | 'date': '2017-01-02', 149 | }, 150 | '/Users/me/file3.png': { 151 | 'subject_reference': 3, 152 | 'date': '2017-01-03', 153 | }, 154 | } 155 | 156 | Now we create a :py:class:`.Subject` instance for each one:: 157 | 158 | new_subjects = [] 159 | 160 | for filename, metadata in subject_metadata.items(): 161 | subject = Subject() 162 | 163 | subject.links.project = tutorial_project 164 | subject.add_location(filename) 165 | 166 | subject.metadata.update(metadata) 167 | 168 | subject.save() 169 | new_subjects.append(subject) 170 | 171 | Saving the subject will create the subject in Panoptes and then upload the 172 | image file. The :py:meth:`.Subject.add_location` method prepares files to be 173 | uploaded. You can give it a string, as above, to point to a path on the local 174 | filesystem, or you can give it an open :py:class:`file` object, or a 175 | :py:class:`dict` for remote URLs. See the :py:meth:`.Subject.add_location` 176 | documentation for examples. 177 | 178 | 179 | Note that by default the ``metadata`` attribute is an empty :py:class:`dict`, 180 | so in this example we just call :py:meth:`dict.update()` to merge it with our 181 | existing metadata. You can also set individual keys as normal:: 182 | 183 | subject.metadata['my_metadata'] = 'abcd' 184 | 185 | Or you can leave it empty if you don't need to set anything. 186 | 187 | All that's left to do now is to link our new subjects to our new subject set. 188 | That can be done with the :py:meth:`.SubjectSet.add` method:: 189 | 190 | subject_set.add(new_subjects) 191 | 192 | That takes the list of subjects and links them all in one go. This is the 193 | preferred way of doing it if you have several subjects to link (because it's 194 | faster than making several separate calls), but you can also link subjects one 195 | at a time if you need to:: 196 | 197 | subject_set.add(subject1) 198 | subject_set.add(subject2) 199 | 200 | And that's all there is to it! Your new subjects are now linked to the new 201 | subject set. 202 | 203 | Tutorial: Adding a Workflow to Caesar 204 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 205 | For this tutorial, we will connect to Caesar and add workflow to Caesar in 2 ways (via Caesar or via Workflow). We start by importing all the classes we'll need:: 206 | 207 | from panoptes_client import Panoptes, Workflow, Caesar 208 | 209 | Now that we've imported all that, we can use the :py:meth:`.Panoptes.connect` 210 | method to log in (see above tutorial). 211 | 212 | Next we can instantiate an instance of :py:class`.Caesar`:: 213 | 214 | caesar = Caesar() 215 | 216 | Note that the token from coming from :py:meth:`.Panoptes.connect` will also get us connected to Caesar. 217 | 218 | We can add workflow to Caesar using this instace of :py:class`.Caesar`, assuming you have a `workflow_id` handy:: 219 | 220 | caesar.save_workflow(1234) 221 | 222 | Another way we can do this is via :py:class`.Workflow`. We can do this by first instantiating an instance of :py:class`.Workflow` with provided `workflow_id`:: 223 | 224 | workflow = Workflow(1234) 225 | 226 | We can then add this workflow to Caesar:: 227 | 228 | workflow.save_to_caesar() 229 | 230 | 231 | 232 | Tutorial: Retiring and Unretiring Subjects 233 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 234 | For this tutorial, we're going to retire and unretire subjects in a given workflow. We start by importing all the classes we'll need:: 235 | 236 | from panoptes_client import Panoptes, Workflow, Subject, SubjectSet 237 | 238 | Now that we've imported all that, we can use the :py:meth:`.Panoptes.connect` 239 | method to log in (see above tutorial) 240 | 241 | Next we can instantiate an instance of :py:class`.Workflow`, assuming you have a `workflow_id` handy:: 242 | 243 | workflow = Workflow('1234') 244 | 245 | We can retire subjects by doing any one of the following, for these examples, we have a Subject with id `4321`:: 246 | 247 | workflow.retire_subjects(4321) 248 | workflow.retire_subjects([4321]) 249 | workflow.retire_subjects(Subject(4321)) 250 | workflow.retire_subjects([Subject(4321)]) 251 | 252 | Similarly, we allow the ability to unretire subjects by subject by doing any one of the following, for these examples, we use a `Subject` with id `4321`:: 253 | 254 | workflow.unretire_subjects(4321) 255 | workflow.unretire_subjects([4321]) 256 | workflow.unretire_subjects(Subject(4321)) 257 | workflow.unretire_subjects([Subject(4321)]) 258 | 259 | We also allow the ability to unretire subjects by `SubjectSet` by doing any on of the following, for these examples, we use a `SubjectSet` with id `5678`:: 260 | 261 | workflow.unretire_subjects_by_subject_set(5678) 262 | workflow.unretire_subjects_by_subject_set([5678]) 263 | workflow.unretire_subjects_by_subject_set(SubjectSet(5678)) 264 | workflow.unretire_subjects_by_subject_set([SubjectSet(5678)]) 265 | 266 | Other examples 267 | ~~~~~~~~~~~~~~ 268 | 269 | Print all project titles:: 270 | 271 | for project in Project.where(): 272 | print(project.title) 273 | 274 | Find a project by slug and print all its workflow names:: 275 | 276 | project = Project.find(slug='zooniverse/example') 277 | for workflow in project.links.workflows: 278 | print(workflow.display_name) 279 | 280 | List the subjects in a subject_set:: 281 | 282 | subject_set = SubjectSet.find(1234) 283 | for subject in subject_set.subjects: 284 | print(subject.id) 285 | 286 | Add subject set to first workflow in project:: 287 | 288 | workflow = project.links.workflows[0] 289 | workflow.links.subject_sets.add(subject_set) 290 | 291 | Look up user resource according to login / username:: 292 | 293 | user_results = User.where(login='username') 294 | user = next(user_results) 295 | 296 | Look up user resource for current logged in user:: 297 | 298 | user = User.me() 299 | 300 | Project owners and collaborators can update their users' project settings 301 | (workflow_id only; for use with leveling up feature):: 302 | 303 | user = User.find("1234") 304 | project = Project.find("1234") 305 | new_settings = {"workflow_id": "1234"} 306 | 307 | ProjectPreferences.save_settings( 308 | project=project, 309 | user=user, 310 | settings=new_settings, 311 | ) 312 | 313 | Alternatively, the project ID and user ID can be passed in directly if they are 314 | already known:: 315 | 316 | ProjectPreferences.save_settings( 317 | project=project_id, 318 | user=user_id, 319 | settings=new_settings, 320 | ) 321 | 322 | Project owner/collaborator can also fetch all project settings for a project:: 323 | 324 | project = Project.find("1234") 325 | 326 | pp_all = ProjectPreferences.fetch_settings(project=project) 327 | 328 | for pp in pp_all: 329 | print('Workflow ID: {}, User ID: {}'.format(pp.settings['workflow_id'], pp.raw['links']['user'])) 330 | 331 | Or the project settings for a particular user:: 332 | 333 | project = Project.find("1234") 334 | user = User.find("1234") 335 | 336 | pp_all = ProjectPreferences.fetch_settings(project=project, user=user) 337 | 338 | pp = next(pp_all) 339 | print('Workflow ID: {}, User ID: {}'.format(pp.settings['workflow_id'], pp.raw['links']['user'])) 340 | 341 | Project settings can also be fetched with the project ID and user ID 342 | directly if already known:: 343 | 344 | pp_all = ProjectPreferences.fetch_settings(project=project_id, user=user_id) 345 | 346 | pp = next(pp_all) 347 | print('Workflow ID: {}, User ID: {}'.format(pp.settings['workflow_id'], pp.raw['links']['user'])) 348 | 349 | iNaturalist Imports 350 | ~~~~~~~~~~~~~~~~~~~ 351 | Importing iNaturalist observations to Panoptes as subjects is possible via an 352 | API endpoint. Project owners and collaborators can use this client to send 353 | a request to begin that import process:: 354 | 355 | # The ID of the iNat taxon to be imported 356 | taxon_id = 1234 357 | 358 | # The subject set to which new subjects will be added 359 | subject_set_id = 5678 360 | 361 | Inaturalist.inat_import(taxon_id, subject_set_id) 362 | 363 | As an optional parameter, the updated_since timestamp string can be included 364 | and will filter obeservations by that parameter:: 365 | 366 | Inaturalist.inat_import(taxon_id, subject_set_id, '2022-10-31') 367 | 368 | Be aware that this command only initiates a background job on the Zooniverse 369 | to import Observations. The request will return a 200 upon success, but there 370 | is no progress to observe. You can refresh the subject set in the project builder 371 | to see how far along it is, and the authenticated user will receive an email 372 | when this job is completed. 373 | 374 | Caesar features by Workflow 375 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 376 | Most Caesar use cases are usually through a workflow: the following are examples of Caesar functions that can be done via Workflow. 377 | 378 | Add Caesar Extractor by Workflow:: 379 | 380 | workflow = Workflow(1234) 381 | workflow.add_extractor('question', 'complete', 'T1', {'if_missing' : 'ignore'}) 382 | 383 | Add Reducer by Workflow:: 384 | 385 | external_reducer_attributes = { 386 | 'url': 'https://aggregation-caesar.zooniverse.org/reducers/optics_line_text_reducer', 387 | 'filters': { 388 | 'extractor_keys': ['alice'] 389 | } 390 | } 391 | workflow.add_caesar_reducer('external', 'alice', external_reducer_attributes) 392 | 393 | Adding Subject Rules by Workflow. When creating a rule, the `condition_string` argumentis a stringified array with the first item being a string identifying the operator. See https://zooniverse.github.io/caesar/#rules for examples of condition strings:: 394 | 395 | condition_string = '["gte", ["lookup", "complete.0", 0], ["const", 30]]' 396 | workflow.add_caesar_rule(condition_string, 'subject') 397 | 398 | Adding Subject Effect for a Subject Rule with id `1234` by Workflow. Ths particular effect being created will retire subjects early due to a consensus. :: 399 | 400 | workflow.add_caesar_rule_effect('subject', 1234, 'retire_subject', {'reason' : 'consensus'}) 401 | 402 | Project Copier 403 | ~~~~~~~~~~~~~~ 404 | The project copier feature clones an existing template project (i.e., projects which have the project.configuration `template` flag set as true and are not live). 405 | 406 | You can set the template flag using the Project.save() method. See example below:: 407 | 408 | project = Project(project_id) 409 | project.configuration = {"template": True} 410 | project.save() 411 | 412 | **How to use** 413 | 414 | This functionality can be accessed by the Panoptes python client. It exists on the Project module and can be called with the `copy` method:: 415 | 416 | Project(project_id).copy() 417 | 418 | You can also pass an optional `new_subject_set_name` parameter and this would be used to create a new SubjectSet for the newly cloned project:: 419 | 420 | Project(project_id).copy(new_subject_set_name='My New Subject Set') 421 | 422 | Data Exports 423 | ~~~~~~~~~~~~ 424 | The Panoptes Python Client allows you to generate, describe, and download data exports (e.g., classifications, subjects, workflows) via the Python ``panoptes_client`` library. 425 | 426 | Multiple types of exports can be generated using the Python Client, including project-level products (classifications, subjects, workflows) and smaller scale classification exports (for workflows and subject sets). 427 | For the examples below, we will demonstrate commands for a project wide classifications export, but these functions work for any export type. 428 | 429 | **Get Exports** 430 | 431 | As the name implies, this method downloads a data export over HTTP. This uses the `get_export` method and can be called by passing in the following parameters: 432 | 433 | * *export_type*: string specifying which type of export should be downloaded. 434 | * *generate*: a boolean specifying whether to generate a new export and wait for it to be ready, or to just download the latest existing export. Default is False. 435 | * *wait*: a boolean specifying whether to wait for an in-progress export to finish, if there is one. Has no effect if `generate` is true (wait will occur in this case). Default is False. 436 | * *wait_timeout*: the number of seconds to wait if `wait` is True or `generate` is True. Has no effect if `wait` and `generate` are both False. Default is None (wait indefinetly). 437 | 438 | Examples:: 439 | 440 | # Fetch existing export 441 | classification_export = Project(1234).get_export('classifications') 442 | 443 | # Generate export, wait indefinetly for result to complete 444 | classification_export = Project(1234).get_export('classifications', generate=True) 445 | 446 | # Fetch export currently being processed, wait up to 600 seconds for export to complete 447 | classification_export = Project(1234).get_export('classifications', wait=True, wait_timeout=600) 448 | 449 | The returned Response object has two additional attributes as a convenience for working with the CSV content; `csv_reader` and `csv_dictreader`, which are wrappers for `csv.reader()` and `csv.DictReader` respectively. 450 | These wrappers take care of correctly decoding the export content for the CSV parser:: 451 | 452 | classification_export = Project(1234).get_export('classifications') 453 | for row in classification_export.csv_dictreader(): 454 | print(row) 455 | 456 | **Generate Exports** 457 | 458 | As the name implies, this method generates/starts a data export. This uses the `generate_export` method and can be called by passing in the `export_type` parameter:: 459 | 460 | export_info = Project(1234).generate_export('classifications') 461 | 462 | This kick off the export generation process and returns `export_info` as a dictionary containing the metadata on the selected export. 463 | 464 | **Describing Exports** 465 | 466 | This method fetches information/metadata about a specific type of export. This uses the `describe_export` method and can be called by passing the `export_type` (e.g., classifications, subjects) this way:: 467 | 468 | export_info = Project(1234).describe_export('classifications') 469 | 470 | This would return `export_info` as a dictionary containing the metadata on the selected export. 471 | 472 | Subject Set Classification Exports 473 | ++++++++++++++++++++++++++++++++++ 474 | 475 | As mentioned above, it is possible to request a classifications export for project, workflow, or subject set scope. 476 | For the subject set classification export, classifications are included in the export if they satisfy two selection criteria: 477 | 478 | 1. The subject referenced in the classification is a member of the relevant subject set. 479 | 2. The relevant subject set is currently linked to the workflow referenced in the classification. 480 | 481 | Example Usage:: 482 | 483 | # For a SubjectSet, check which Workflows to which it is currently linked 484 | subject_set = SubjectSet.find(1234) 485 | for wf in subject_set.links.workflows: 486 | print(wf.id, wf.display_name) 487 | 488 | # Generate Export 489 | subject_set_classification_export = subject_set.get_export('classifications', generate=True) 490 | 491 | Automated Aggregation of Classifications 492 | ++++++++++++++++++++++++++++++++++++++++ 493 | 494 | The Zooniverse supports research teams by maintaining the ``panoptes_aggregation`` Python package 495 | (see `docs `_ and `repo `_). 496 | This software requires local installation to run, which can be a deterrent for its use. 497 | As an alternative to installing and running this aggregation code, we provide a Zooniverse-hosted service for producing aggregated results for simple datasets. 498 | This "batch aggregation" feature is built to perform simple workflow-level data aggregation that uses baseline extractors and reducers without any custom configuration. 499 | Please see :py:meth:`.Workflow.run_aggregation` and :py:meth:`.Workflow.get_batch_aggregation_links` docstrings for full details. 500 | 501 | Example Usage:: 502 | 503 | # Generate input data exports: workflow-level classification export and project-level workflows export 504 | Workflow(1234).generate_export('classification') 505 | Project(2345).generate_export('workflows') 506 | 507 | # Request batch aggregation data product 508 | Workflow(1234).run_aggregation() 509 | 510 | # Fetch batch aggregation download URLs 511 | urls = Workflow(1234).get_batch_aggregation_links() 512 | print(urls) 513 | 514 | # Load Reductions CSV using Pandas 515 | pd.read_csv(urls['reductions']) 516 | -------------------------------------------------------------------------------- /panoptes_client/workflow.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | from builtins import str 3 | from copy import deepcopy 4 | from panoptes_client.set_member_subject import SetMemberSubject 5 | from panoptes_client.subject_workflow_status import SubjectWorkflowStatus 6 | 7 | from panoptes_client.exportable import Exportable 8 | from panoptes_client.panoptes import Panoptes, PanoptesObject, LinkResolver, PanoptesAPIException 9 | from panoptes_client.subject import Subject 10 | from panoptes_client.subject_set import SubjectSet 11 | from panoptes_client.utils import batchable 12 | 13 | from panoptes_client.caesar import Caesar 14 | from panoptes_client.user import User 15 | from panoptes_client.aggregation import Aggregation 16 | import logging 17 | 18 | class Workflow(PanoptesObject, Exportable): 19 | _api_slug = 'workflows' 20 | _link_slug = 'workflows' 21 | _edit_attributes = ( 22 | 'active', 23 | 'configuration', 24 | 'display_name', 25 | 'first_task', 26 | 'mobile_friendly', 27 | 'primary_language', 28 | 'retirement', 29 | 'tasks', 30 | { 31 | 'links': ( 32 | 'project', 33 | ) 34 | }, 35 | ) 36 | 37 | def __init__(self, raw={}, etag=None): 38 | super(Workflow, self).__init__(raw, etag) 39 | if not self.configuration: 40 | self.configuration = {} 41 | self._original_configuration = {} 42 | if not self.retirement: 43 | self.retirement = {} 44 | self._original_retirement = {} 45 | if not self.tasks: 46 | self.tasks = {} 47 | self._original_tasks = {} 48 | 49 | def set_raw(self, raw, etag=None, loaded=True): 50 | super(Workflow, self).set_raw(raw, etag, loaded) 51 | if loaded: 52 | if self.configuration: 53 | self._original_configuration = deepcopy(self.configuration) 54 | if self.retirement: 55 | self._original_retirement = deepcopy(self.retirement) 56 | if self.tasks: 57 | self._original_tasks = deepcopy(self.tasks) 58 | elif loaded: 59 | self._original_configuration = None 60 | self._original_retirement = None 61 | self._original_tasks = None 62 | 63 | def save(self): 64 | """ 65 | Adds workflow configuration, retirement, and tasks dicts to the list of 66 | savable attributes if it has changed. 67 | """ 68 | if not self.configuration == self._original_configuration: 69 | self.modified_attributes.add('configuration') 70 | if not self.retirement == self._original_retirement: 71 | self.modified_attributes.add('retirement') 72 | if not self.tasks == self._original_tasks: 73 | self.modified_attributes.add('tasks') 74 | 75 | super(Workflow, self).save() 76 | 77 | @batchable 78 | def retire_subjects(self, subjects, reason='other'): 79 | """ 80 | Retires subjects in this workflow. 81 | 82 | - **subjects** can be a list of :py:class:`Subject` instances, a list 83 | of subject IDs, a single :py:class:`Subject` instance, or a single 84 | subject ID. 85 | - **reason** gives the reason the :py:class:`Subject` has been retired. 86 | Defaults to **other**. 87 | 88 | Examples:: 89 | 90 | workflow.retire_subjects(1234) 91 | workflow.retire_subjects([1,2,3,4]) 92 | workflow.retire_subjects(Subject(1234)) 93 | workflow.retire_subjects([Subject(12), Subject(34)]) 94 | """ 95 | 96 | subjects = [s.id if isinstance(s, Subject) else s for s in subjects] 97 | 98 | return Workflow.http_post( 99 | '{}/retired_subjects'.format(self.id), 100 | json={ 101 | 'subject_ids': subjects, 102 | 'retirement_reason': reason 103 | }, 104 | ) 105 | 106 | @batchable 107 | def unretire_subjects(self, subjects): 108 | """ 109 | Un-retires subjects in this workflow by subjects. 110 | 111 | - **subjects** can be a list of :py:class:`Subject` instances, a list 112 | of subject IDs, a single :py:class:`Subject` instance, or a single 113 | subject ID. 114 | """ 115 | 116 | subjects = [s.id if isinstance(s, Subject) else s for s in subjects] 117 | return Workflow.http_post( 118 | '{}/unretire_subjects'.format(self.id), 119 | json={ 120 | 'subject_ids': subjects 121 | }, 122 | ) 123 | 124 | @batchable 125 | def unretire_subjects_by_subject_set(self, subject_sets): 126 | """ 127 | Un-retires subjects in this workflow by subject_sets. 128 | 129 | - **subjects_sets** can be a list of :py:class:`SubjectSet` instances, a 130 | list of subject_set IDs, a single :py:class:`SubjectSet` instance, or 131 | a single subject_set ID. 132 | """ 133 | subject_sets = [s.id if isinstance( 134 | s, SubjectSet) else s for s in subject_sets] 135 | return Workflow.http_post( 136 | '{}/unretire_subjects'.format(self.id), 137 | json={ 138 | 'subject_set_ids': subject_sets 139 | }, 140 | ) 141 | 142 | def add_subject_sets(self, subject_sets): 143 | """ 144 | A wrapper around :py:meth:`.LinkCollection.add`. Equivalent to:: 145 | 146 | workflow.links.subject_sets.add(subject_sets) 147 | """ 148 | 149 | return self.links.subject_sets.add(subject_sets) 150 | 151 | def remove_subject_sets(self, subject_sets): 152 | """ 153 | A wrapper around :py:meth:`.LinkCollection.remove`. Equivalent to:: 154 | 155 | workflow.links.subject_sets.remove(subject_sets) 156 | """ 157 | 158 | return self.links.subject_sets.remove(subject_sets) 159 | 160 | def subject_workflow_status(self, subject_id): 161 | """ 162 | Returns SubjectWorkflowStatus of the current workflow given subject_id 163 | 164 | Example:: 165 | 166 | workflow.subject_workflow_status(1234) 167 | """ 168 | return next(SubjectWorkflowStatus.where(subject_id=subject_id, workflow_id=self.id)) 169 | 170 | def subject_workflow_statuses(self, subject_set_id): 171 | """ 172 | A generator which yields :py:class:`.SubjectWorkflowStatus` objects for subjects in the 173 | subject set of the given workflow 174 | 175 | Examples:: 176 | 177 | for status in workflow.subject_workflow_statuses(1234): 178 | print(status.retirement_reason) 179 | """ 180 | subject_ids = [] 181 | for sms in SetMemberSubject.where(subject_set_id=subject_set_id): 182 | subject_ids.append(sms.links.subject.id) 183 | 184 | subject_ids = ','.join(map(str, subject_ids)) 185 | for status in SubjectWorkflowStatus.where(subject_ids=subject_ids, workflow_id=self.id): 186 | yield status 187 | 188 | """ CAESAR METHODS """ 189 | 190 | def save_to_caesar(self, public_extracts=False, public_reductions=False): 191 | """ 192 | Adds/updates selected Workflow to Caesar. Returns workflow as a dict from Caesar if created. 193 | 194 | - **public_extracts** set to True to Enable Public Extracts, Defaults to False 195 | - **public_reductions** set to True to Enable Public Reductions. Defaults to False 196 | 197 | Examples:: 198 | 199 | workflow.save_to_caesar() 200 | workflow.save_to_caesar(public_extracts=True, public_reductions=True) 201 | 202 | """ 203 | return Caesar().save_workflow(self.id, public_extracts, public_reductions) 204 | 205 | def caesar_subject_extracts(self, subject_id): 206 | """ 207 | Returns a list of subject extracts as a dict from Caesar for a given subject. 208 | 209 | Examples:: 210 | 211 | workflow.caesar_subject_extracts(1234) 212 | 213 | s = Subject(1234) 214 | workflow.caesar_subject_extracts(s.id) 215 | """ 216 | url = f'{self._api_slug}/{self.id}/extractors/all/extracts' 217 | return Caesar().http_get(url, params={'subject_id': subject_id})[0] 218 | 219 | def caesar_subject_reductions(self, subject_id, reducer_key=""): 220 | """ 221 | Returns a list of subject reductions as dicts from Caesar for a given subject. 222 | Defaults to return all subject reductions for a given subject. 223 | 224 | - **reducer_key** If given, will filter and return reductions for the reducer with that reducer_key. 225 | 226 | Examples:: 227 | 228 | workflow.caesar_subject_reductions(1234) 229 | workflow.caesar_subject_reductions(1234,'points') 230 | """ 231 | url = f'{self._api_slug}/{self.id}/subjects/{subject_id}/reductions' 232 | if reducer_key.strip(): 233 | url += f'?reducer_key={reducer_key.strip()}' 234 | return Caesar().http_get(url)[0] 235 | 236 | def caesar_extractors(self): 237 | """ 238 | Returns a list of extractors as dicts from Caesar for particular workflow. 239 | 240 | Examples:: 241 | 242 | workflow.caesar_extractors() 243 | """ 244 | return Caesar().http_get(f'{self._api_slug}/{self.id}/extractors')[0] 245 | 246 | def caesar_reducers(self): 247 | """ 248 | Returns a list of reducers as dicts from Caesar for particular workflow. 249 | 250 | Examples:: 251 | 252 | workflow.caesar_reducers() 253 | """ 254 | return Caesar().http_get(f'{self._api_slug}/{self.id}/reducers')[0] 255 | 256 | def caesar_rules(self, rule_type): 257 | """ 258 | Returns a list of Caesar workflow rules as dicts. 259 | 260 | - **rule_type** can either be 'subject' or 'user'; 261 | if 'subject' will return subject rules, 262 | if 'user' will return user rules 263 | 264 | Examples:: 265 | 266 | workflow.caesar_rules('subject') 267 | workflow.caesar_rules('user') 268 | """ 269 | return Caesar().http_get(f'{self._api_slug}/{self.id}/{rule_type}_rules')[0] 270 | 271 | def caesar_effects(self, rule_type, rule_id): 272 | """ 273 | Returns a list of Caesar workflow effects as dicts for the workflow rule with id `rule_id`. 274 | 275 | - **rule_type** can either be 'subject' or 'user'; 276 | if 'subject' will return effects of subject rules with id `rule_id`, 277 | if 'user' will return will return effects of user rules with id `rule_id` 278 | 279 | Examples:: 280 | 281 | workflow.caesar_effects('subject', 123) 282 | workflow.caesar_effects('user', 321) 283 | """ 284 | return Caesar().http_get(f'{self._api_slug}/{self.id}/{rule_type}_rules/{rule_id}/{rule_type}_rule_effects')[0] 285 | 286 | def add_caesar_extractor(self, extractor_type, extractor_key, task_key='T0', extractor_other_attributes=None): 287 | """ 288 | Adds a Caesar extractor for given workflow. Will return extractor as a dict with 'id' if successful. 289 | 290 | - **extractor_type** can be one of the following: 291 | 'blank', 'external', 'question', 'survey', 'who', 'pluck_field', or 'shape' 292 | - **extractor_key** is the unique key that you want to give to the extractor. 293 | The key will be used to track this specific reducer within Caesar. 294 | 295 | Examples:: 296 | 297 | workflow.add_caesar_extractor('question', 'complete', 'T0', {'if_missing': ignore }) 298 | """ 299 | caesar = Caesar() 300 | caesar.validate_extractor_type(extractor_type) 301 | if extractor_other_attributes is None: 302 | extractor_other_attributes = {} 303 | payload = { 304 | 'extractor': { 305 | 'type': extractor_type, 306 | 'key': extractor_key, 307 | 'task_key': task_key, 308 | **extractor_other_attributes 309 | } 310 | } 311 | return caesar.http_post(f'{self._api_slug}/{self.id}/extractors', json=payload)[0] 312 | 313 | def add_caesar_reducer(self, reducer_type, key, other_reducer_attributes=None): 314 | """ 315 | Adds a Caesar reducer for given workflow. Will return reducer as dict with 'id' if successful. 316 | 317 | - **reducer_type** can be one of the following: 318 | 'consensus', 'count', 'placeholder', 'external', 'first_extract', 319 | 'stats', 'unique_count', 'rectangle', 'sqs' 320 | - **key** is a unique name for your reducer. This key will be used to track this specific reducer within Caesar. 321 | 322 | Examples:: 323 | 324 | workflow.add_caesar_reducer('count', 'count', {'filters' : {'extractor_keys': ['complete']}}) 325 | """ 326 | caesar = Caesar() 327 | caesar.validate_reducer_type(reducer_type) 328 | if other_reducer_attributes is None: 329 | other_reducer_attributes = {} 330 | payload = { 331 | 'reducer': { 332 | 'type': reducer_type, 333 | 'key': key, 334 | **other_reducer_attributes 335 | } 336 | } 337 | return caesar.http_post(f'{self._api_slug}/{self.id}/reducers', json=payload)[0] 338 | 339 | def add_caesar_rule(self, condition_string, rule_type): 340 | """ 341 | Adds a Caesar rule for given workflow. Will return rule as a dict with 'id' if successful. 342 | 343 | - **condition_string** is a string that represents a single operation (sometimes nested). 344 | The general syntax is like if you'd write Lisp in json. 345 | It is a stringified array with the first item being a string identifying the operator. 346 | See https://zooniverse.github.io/caesar/#rules for examples of condition strings 347 | - **rule_type** can either be 'subject' or 'user' 348 | 349 | Examples:: 350 | 351 | workflow.add_caesar_rule('["gte", ["lookup", "complete.0", 0], ["const", 3]]', 'subject') 352 | 353 | """ 354 | caesar = Caesar() 355 | caesar.validate_rule_type(rule_type) 356 | payload = {f'{rule_type}_rule': { 357 | 'condition_string': condition_string 358 | }} 359 | return caesar.http_post(f'{self._api_slug}/{self.id}/{rule_type}_rules', json=payload)[0] 360 | 361 | def add_caesar_rule_effect(self, rule_type, rule_id, action, effect_config=None): 362 | """ 363 | Adds a Caesar effect for workflow and given the workflow rule with id rule_id. 364 | Method will return effect as a dict with 'id' if successful. 365 | 366 | - **rule_type** can either be 'subject' or 'user' 367 | - **rule_id** is the id of the subject rule or user rule that the effect should run 368 | - **action** can be one of the following: 369 | - **(actions for subject rules)** - 'retire_subject', 'add_subject_to_set', 'add_to_collection', 'external' 370 | - **(actions for user rules)** - 'promote_user' 371 | 372 | Examples:: 373 | 374 | workflow.add_caesar_rule_effect('subject', subject_rule['id'], 'retire_subject', 375 | {'reason': 'classification_count'}) 376 | """ 377 | caesar = Caesar() 378 | caesar.validate_rule_type(rule_type) 379 | caesar.validate_action(rule_type, action) 380 | if effect_config is None: 381 | effect_config = {} 382 | 383 | payload = { 384 | f'{rule_type}_rule_effect': { 385 | 'action': action, 386 | 'config': effect_config 387 | } 388 | } 389 | return caesar.http_post( 390 | f'{self._api_slug}/{self.id}/{rule_type}_rules/{rule_id}/{rule_type}_rule_effects', 391 | json=payload 392 | )[0] 393 | 394 | def import_caesar_data_extracts(self, csv_source): 395 | """ 396 | Imports machine-learnt data as extracts into Caesar. 397 | 398 | - **csv_source** must be a publicly accessible csv at the time of import. 399 | Eg. csv can be hosted via an AWS S3 Bucket, Azure Blob Storage, or Panoptes media item. 400 | See `this csv `_ as an example. 401 | `csv_source`'s csv must have header/titles/rows of the following: 402 | 403 | - `extractor_key` (key corresponding to the extractor in Caesar) 404 | - `subject_id` 405 | - `data` (the machine learnt data for the corresponding subject) 406 | 407 | Example:: 408 | 409 | workflow.import_caesar_data_extracts('https://panoptes-uploads-staging.zooniverse.org/project_attached_image/f1ab241f-2896-4efc-a1bc-3baaff64d783.csv') 410 | """ 411 | return Caesar().http_post(f'{self._api_slug}/{self.id}/extracts/import', json={'file': csv_source}) 412 | 413 | def add_alice_extractors(self, alice_task_key='T0', question_task_key='T1', 414 | question_extractor_if_missing='ignore', 415 | other_question_extractor_attrib=None, 416 | other_alice_extractor_attrib=None): 417 | """ 418 | Adds ALICE Extractors (two extractors: Question and External). 419 | 420 | - QuestionExtractor getting created will have a key of `complete` 421 | - **question_task_key** - Task ID that reflects placement of: 422 | “Have all the volunteer-made underline marks turned grey?” step. Defaults to T1 423 | - ExternalExtractor getting created will have a key of `alice` 424 | - **alice_task_key** - Task ID that reflects placement of Transcription Task step (Defaults to T0) 425 | 426 | Examples:: 427 | 428 | workflow.add_alice_extractors() 429 | """ 430 | if other_question_extractor_attrib is None: 431 | other_question_extractor_attrib = {} 432 | 433 | if other_alice_extractor_attrib is None: 434 | other_alice_extractor_attrib = {} 435 | 436 | question_extractor_attributes = { 437 | 'if_missing': question_extractor_if_missing, 438 | **other_question_extractor_attrib 439 | } 440 | 441 | alice_extractor_attributes = { 442 | 'url': f'https://aggregation-caesar.zooniverse.org/extractors/line_text_extractor?task={alice_task_key}', 443 | **other_alice_extractor_attrib 444 | } 445 | 446 | self.add_caesar_extractor('question', 'complete', question_task_key, question_extractor_attributes) 447 | self.add_caesar_extractor('external', 'alice', alice_task_key, alice_extractor_attributes) 448 | 449 | def add_alice_reducers(self, alice_min_views=5, low_consensus_threshold=3): 450 | """ 451 | Adds ALICE Reducers for given workflow (three reducers: External, Stats, Count). 452 | 453 | - **alice_min_views** - This is the threshold number of classifications in order to "gray-out" a transcribed line. 454 | Default is 5. 455 | - **low_consensus_threshold** - This is the threshold number of classifications in agreement for good consensus. 456 | Default is 3 457 | """ 458 | external_reducer_url = 'https://aggregation-caesar.zooniverse.org/reducers/optics_line_text_reducer' 459 | if alice_min_views or low_consensus_threshold: 460 | external_reducer_url += f'?minimum_views={alice_min_views}&' 461 | external_reducer_url += f'low_consensus_threshold={low_consensus_threshold}' 462 | 463 | default_filter_attribs = { 464 | 'repeated_classifications': 'keep_first' 465 | } 466 | external_reducer_attributes = { 467 | 'url': external_reducer_url, 468 | 'filters': { 469 | 'extractor_keys': ['alice'], 470 | **default_filter_attribs 471 | } 472 | } 473 | self.add_caesar_reducer('external', 'alice', external_reducer_attributes) 474 | 475 | complete_reducer_attribs = { 476 | 'filters': { 477 | 'extractor_keys': ['complete'], 478 | **default_filter_attribs 479 | } 480 | } 481 | self.add_caesar_reducer('stats', 'complete', complete_reducer_attribs) 482 | 483 | self.add_caesar_reducer('count', 'count', complete_reducer_attribs) 484 | 485 | def add_alice_rules_and_effects(self, question_retirement_limit=3, count_retirement_limit=30): 486 | """ 487 | Adds subject rules and corresponding effects for ALICE configuration of the given workflow. 488 | Two subject rules are created that will trigger retirement: a Question rule and a Count rule. 489 | A total of 4 subject rule effects should get created. 490 | There should be 2 effects related to the Question Rule condition 491 | (one to send to ALICE and the other to retire subject). 492 | There should also be 2 effects related to the Count Rule condition 493 | (one to send to alice and the other to retire subject) 494 | 495 | - **question_retirement_limit** - Question subject rule created will trigger retirement when the answer to: 496 | "is this complete" question reaches this threshhold limit (defaults to 3) 497 | - **count_retirement_limit** - Count Subject Rule created will trigger retirement when the classification count reaches this limit (defaults to 30) 498 | 499 | """ 500 | question_subject_rule = self.add_caesar_rule( 501 | f'["gte", ["lookup", "complete.0", 0], ["const", {question_retirement_limit}]]', 502 | 'subject' 503 | ) 504 | send_to_alice_effect_config = { 505 | 'url': 'https://tove.zooniverse.org/import', 506 | 'reducer_key': 'alice' 507 | } 508 | self.add_caesar_rule_effect('subject', question_subject_rule['id'], 'external', send_to_alice_effect_config) 509 | self.add_caesar_rule_effect('subject', question_subject_rule['id'], 'retire_subject', {'reason': 'consensus'}) 510 | 511 | count_subject_rule = self.add_caesar_rule( 512 | f'["gte", ["lookup", "count.classifications", 0], ["const", {count_retirement_limit}]]', 513 | 'subject' 514 | ) 515 | self.add_caesar_rule_effect('subject', count_subject_rule['id'], 'external', send_to_alice_effect_config) 516 | self.add_caesar_rule_effect('subject', count_subject_rule['id'], 'retire_subject', {'reason': 'classification_count'}) 517 | 518 | def configure_for_alice(self): 519 | """ 520 | Configures workflow for ALICE/TOVE. 521 | 522 | - This method will add workflow to Caesar 523 | - This method will create Caesar Extractors needed for ALICE with defaults. 524 | - This method will also create Caesar Reducers needed for ALICE with defaults. 525 | (In particular, `minimum_views` = 5, and `low_consensus_threshold` = 3) 526 | - And this method will also create Caesar Subject Rules and Effects needed for ALICE with defaults. 527 | (In particular, Question-based retirement's retirement limit is 3 and Count-based retirement default is 30.) 528 | 529 | """ 530 | self.save_to_caesar(public_extracts=True, public_reductions=True) 531 | self.add_alice_extractors() 532 | self.add_alice_reducers() 533 | self.add_alice_rules_and_effects() 534 | 535 | def run_aggregation(self, user=None, delete_if_exists=False): 536 | """ 537 | This method will start a new batch aggregation run, Will return a dict with the created aggregation if successful. 538 | 539 | - **user** can be either a :py:class:`.User` or an ID. Defaults to logged in user if not set. 540 | - **delete_if_exists** parameter is optional; if true, deletes any previous instance. 541 | 542 | Examples:: 543 | 544 | Workflow(1234).run_aggregation() 545 | Workflow(1234).run_aggregation(user=1234, delete_if_exists=True) 546 | """ 547 | 548 | if(isinstance(user, User)): 549 | _user_id = user.id 550 | elif (isinstance(user, (int, str,))): 551 | _user_id = user 552 | elif User.me(): 553 | _user_id = User.me().id 554 | else: 555 | raise TypeError('Invalid user parameter. Provide user ID or login.') 556 | 557 | try: 558 | workflow_aggs = Aggregation.where(workflow_id=self.id) 559 | if workflow_aggs.object_count > 0: 560 | current_wf_agg = next(workflow_aggs) 561 | if delete_if_exists: 562 | current_wf_agg.delete() 563 | return self._create_agg(_user_id) 564 | else: 565 | logging.getLogger('panoptes_client').info( 566 | 'Aggregation exists for Workflow {}. '.format(self.id) + 567 | 'Set delete_if_exists to True to create new aggregation.' 568 | ) 569 | return current_wf_agg 570 | else: 571 | return self._create_agg(_user_id) 572 | except PanoptesAPIException as err: 573 | raise err 574 | 575 | def get_batch_aggregation(self): 576 | """ 577 | This method will fetch existing aggregation resource, if any. 578 | """ 579 | try: 580 | return next(Aggregation.where(workflow_id=self.id)) 581 | except StopIteration: 582 | raise PanoptesAPIException( 583 | 'Could not find Aggregation for Workflow {}'.format(self.id) 584 | ) 585 | 586 | def _create_agg(self, user_id): 587 | new_agg = Aggregation() 588 | new_agg.links.workflow = self.id 589 | new_agg.links.user = user_id 590 | new_agg.save() 591 | return new_agg 592 | 593 | def _get_agg_property(self, param): 594 | return getattr(self.get_batch_aggregation(), param, None) 595 | 596 | def get_batch_aggregation_status(self): 597 | """ 598 | This method will fetch existing aggregation status, if any. 599 | """ 600 | return self._get_agg_property('status') 601 | 602 | def get_batch_aggregation_links(self): 603 | """ 604 | This method will fetch existing aggregation links, if any. 605 | 606 | Data product options, returned as dictionary of type/URL key-value pairs: 607 | 1. reductions: subject-level reductions results CSV 608 | 2. aggregation: a ZIP file containing all inputs (workflow-level classification export, project-level workflows export) and outputs (extracts, reductions) 609 | """ 610 | uuid = self._get_agg_property('uuid') 611 | aggregation_url = 'https://aggregationdata.blob.core.windows.net' 612 | env = 'production' 613 | if Panoptes.client().endpoint == 'https://panoptes-staging.zooniverse.org': 614 | env = 'staging' 615 | return {'reductions': f'{aggregation_url}/{env}/{uuid}/{self.id}_reductions.csv', 616 | 'aggregation': f'{aggregation_url}/{env}/{uuid}/{self.id}_aggregation.zip'} 617 | 618 | @property 619 | def versions(self): 620 | """ 621 | A generator which yields all :py:class:`.WorkflowVersion` instances for 622 | this workflow. 623 | """ 624 | 625 | return WorkflowVersion.where(workflow=self) 626 | 627 | 628 | LinkResolver.register(Workflow) 629 | LinkResolver.register(Workflow, 'active_workflows', readonly=True) 630 | 631 | # Keep import WorkflowVersion import on bottom to avoid circular import 632 | from panoptes_client.workflow_version import WorkflowVersion 633 | --------------------------------------------------------------------------------