├── MANIFEST.in ├── pyproject.toml ├── PYAOGMANEO_LICENSE.md ├── branch_switch.sh ├── examples ├── cartpole_env_runner.py ├── lunarlander_env_runner.py ├── EXAMPLES.md ├── cartpole_manual.py ├── wavy_line_prediction.py └── env_runner.py ├── source └── pyaogmaneo │ ├── py_helpers.cpp │ ├── py_helpers.h │ ├── py_image_encoder.h │ ├── py_hierarchy.h │ ├── py_image_encoder.cpp │ ├── py_module.cpp │ └── py_hierarchy.cpp ├── CMake └── FindAOgmaNeo.cmake ├── CONTRIBUTING.md ├── CMakeLists.txt ├── README.md ├── .github └── workflows │ └── build_publish.yml ├── setup.py └── LICENSE.md /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md LICENSE 2 | global-include CMakeLists.txt *.cmake 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ "setuptools", "wheel", "cmake" ] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /PYAOGMANEO_LICENSE.md: -------------------------------------------------------------------------------- 1 | PyAOgmaNeo - Copyright (c) 2020-2025 Ogma Intelligent Systems Corp. https://ogma.ai 2 | 3 | This program is published under the Attribution-NonCommercial-ShareAlike 4.0 4 | International (CC BY-NC-SA 4.0) License, in the hope that it will be useful, 5 | but WITHOUT ANY WARRANTY; without even the implied warranty of 6 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 7 | 8 | You should have received a copy of the CC BY-NC-SA 4.0 along with this 9 | program. If not, see https://creativecommons.org/licenses/by-nc-sa/4.0/ 10 | 11 | Please contact licenses@ogmacorp.com regarding uses not covered by this license. 12 | -------------------------------------------------------------------------------- /branch_switch.sh: -------------------------------------------------------------------------------- 1 | AOGMANEO_LOCATION="../AOgmaNeo" 2 | PYAOGMANEO_LOCATION="$PWD" 3 | 4 | echo -n "Enter branch name to switch to and build: " 5 | read branch_name 6 | 7 | echo "Switching to branch $branch_name" 8 | 9 | cd $AOGMANEO_LOCATION 10 | 11 | git fetch origin $branch_name 12 | git checkout $branch_name 13 | git merge origin/$branch_name 14 | 15 | cd build 16 | cmake -DBUILD_SHARED_LIBS=On .. 17 | sudo make install 18 | 19 | cd $PYAOGMANEO_LOCATION 20 | 21 | git fetch origin $branch_name 22 | git checkout $branch_name 23 | git merge origin/$branch_name 24 | 25 | export USE_SYSTEM_AOGMANEO=1 26 | 27 | python -m pip install . 28 | 29 | echo "Done switching!" 30 | -------------------------------------------------------------------------------- /examples/cartpole_env_runner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ---------------------------------------------------------------------------- 4 | # PyAOgmaNeo 5 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 6 | # 7 | # This copy of PyAOgmaNeo is licensed to you under the terms described 8 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 9 | # ---------------------------------------------------------------------------- 10 | 11 | # Simple Cart-Pole example using EnvRunner 12 | 13 | import gymnasium as gym 14 | from env_runner import EnvRunner # EnvRunner automatically creates an AOgmaNeo hierarchy and appropriate encoders for most Gymnasium environments 15 | 16 | env = gym.make('CartPole-v1') 17 | 18 | runner = EnvRunner(env, terminal_reward=-100.0, reward_scale=0.0) # Cart-Pole environment always returns a reward of 1, so use a custom reward function: -1 if episode ends, 0 otherwise 19 | 20 | for episode in range(10000): 21 | env.reset() 22 | 23 | # Timesteps 24 | for t in range(500): 25 | done, _ = runner.act() # Step the environment and agent 26 | 27 | if done: 28 | print(f"Episode {episode + 1} finished after {t + 1} timesteps") 29 | break 30 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_helpers.cpp: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #include "py_helpers.h" 10 | 11 | #include 12 | 13 | using namespace pyaon; 14 | 15 | void File_Reader::read(void* data, long len) { 16 | ins.read(static_cast(data), len); 17 | } 18 | 19 | void File_Writer::write(const void* data, long len) { 20 | outs.write(static_cast(data), len); 21 | } 22 | 23 | void Buffer_Reader::read(void* data, long len) { 24 | auto view = buffer->unchecked(); 25 | 26 | for (long i = 0; i < len; i++) 27 | static_cast(data)[i] = view(start + i); 28 | 29 | start += len; 30 | } 31 | 32 | void Buffer_Writer::write(const void* data, long len) { 33 | assert(buffer.size() >= start + len); 34 | 35 | auto view = buffer.mutable_unchecked(); 36 | 37 | for (long i = 0; i < len; i++) 38 | view[start + i] = static_cast(data)[i]; 39 | 40 | start += len; 41 | } 42 | -------------------------------------------------------------------------------- /CMake/FindAOgmaNeo.cmake: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------- 2 | # AOgmaNeo 3 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | # 5 | # This copy of AOgmaNeo is licensed to you under the terms described 6 | # in the AOGMANEO_LICENSE.md file included in this distribution. 7 | # -------------------------------------------------------------------------- 8 | 9 | # Locate AOgmaNeo library 10 | # 11 | # This module defines 12 | # AOGMANEO_LIBRARY, the name of the library to link against 13 | # AOGMANEO_FOUND, if false, do not try to link to AOgmaNeo 14 | # AOGMANEO_INCLUDE_DIR, where to find AOgmaNeo headers 15 | 16 | if(AOGMANEO_INCLUDE_DIR) 17 | # Already in cache, be silent 18 | set(AOGMANEO_FIND_QUIETLY TRUE) 19 | endif(AOGMANEO_INCLUDE_DIR) 20 | 21 | find_path(AOGMANEO_INCLUDE_DIR aogmaneo/hierarchy.h) 22 | 23 | set(AOGMANEO_NAMES aogmaneo AOgmaNeo AOGMANEO) 24 | 25 | find_library(AOGMANEO_LIBRARY NAMES ${AOGMANEO_NAMES}) 26 | 27 | # Per-recommendation 28 | set(AOGMANEO_INCLUDE_DIRS "${AOGMANEO_INCLUDE_DIR}") 29 | set(AOGMANEO_LIBRARIES "${AOGMANEO_LIBRARY}") 30 | 31 | # handle the QUIETLY and REQUIRED arguments and set AOGMANEO_FOUND to TRUE if 32 | # all listed variables are TRUE 33 | include(FindPackageHandleStandardArgs) 34 | find_package_handle_standard_args(AOgmaNeo DEFAULT_MSG AOGMANEO_LIBRARY AOGMANEO_INCLUDE_DIR) 35 | -------------------------------------------------------------------------------- /examples/lunarlander_env_runner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ---------------------------------------------------------------------------- 4 | # PyAOgmaNeo 5 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 6 | # 7 | # This copy of PyAOgmaNeo is licensed to you under the terms described 8 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 9 | # ---------------------------------------------------------------------------- 10 | 11 | # Lunar lander environment with EnvRunner 12 | 13 | import gymnasium as gym 14 | from env_runner import EnvRunner # EnvRunner automatically creates an AOgmaNeo hierarchy and appropriate encoders for most Gymnasium environments 15 | 16 | env = gym.make('LunarLander-v3')#, render_mode='human') 17 | 18 | runner = EnvRunner(env, terminal_reward=0.0, reward_scale=1.0) 19 | 20 | average_reward = 0.0 21 | max_reward = 0.0 22 | 23 | for episode in range(5000): 24 | env.reset() 25 | 26 | total_reward = 0.0 27 | 28 | # timesteps 29 | for t in range(10000): 30 | done, reward = runner.act() # step the environment and agent 31 | 32 | total_reward += reward 33 | 34 | if done: 35 | if episode == 0: 36 | average_reward = total_reward 37 | max_reward = total_reward 38 | else: 39 | average_reward = 0.99 * average_reward + 0.01 * total_reward 40 | max_reward = max(max_reward, total_reward) 41 | 42 | print(f"Episode {episode + 1} finished after {t + 1} timesteps, receiving {total_reward} reward. Average: {average_reward} Max: {max_reward}") 43 | break 44 | -------------------------------------------------------------------------------- /examples/EXAMPLES.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | # Examples 10 | 11 | ## The EnvRunner 12 | 13 | The env_runner is a simple way to automatically create AOgmaNeo systems for [Gymnasium](https://github.com/Farama-Foundation/Gymnasium) tasks. It will automatically create the hierarchy and appropriate pre-encoders, by guessing reasonable settings. This is good enough for initial experimentation, but further control requires manual usage. Image-based environments require [tinyscaler](https://github.com/Farama-Foundation/TinyScaler) in order to scale images to appropriate sizes. 14 | 15 | ## CartPole examples 16 | 17 | The CartPole examples are simple tests on the Gymnasium CartPole environment. The manual one sets up a hierarchy manually, while the EnvRunner one does it automatically using EnvRunner. 18 | 19 | ## wave_line_prediction example 20 | 21 | The wave_line_prediction example shows how to use a SPH for prediction of a simple waveform. Requires matplotlib. 22 | 23 | ## lunarlander_env_runner example 24 | 25 | The lunar lander example shows a slightly more complicated reinforcement learning environment using EnvRunner. The Gymnasium LunarLander environment features a landing module that the agent must maneuver to the landing pad. 26 | 27 | ## License and Copyright 28 | 29 | Creative Commons License
The work in this repository is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. See the [PYAOGMANEO_LICENSE.md](./PYAOGMANEO_LICENSE.md) and [LICENSE.md](./LICENSE.md) file for further information. 30 | 31 | Contact Ogma via licenses@ogmacorp.com to discuss commercial use and licensing options. 32 | 33 | PyAOgmaNeo Copyright (c) 2020-2025 [Ogma Intelligent Systems Corp](https://ogmacorp.com). All rights reserved. 34 | -------------------------------------------------------------------------------- /examples/cartpole_manual.py: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # PyAOgmaNeo 3 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | # 5 | # This copy of PyAOgmaNeo is licensed to you under the terms described 6 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | # ---------------------------------------------------------------------------- 8 | 9 | # -*- coding: utf-8 -*- 10 | 11 | import pyaogmaneo as neo 12 | import gymnasium as gym 13 | import numpy as np 14 | 15 | # squashing function 16 | def sigmoid(x): 17 | return np.tanh(x * 0.5) * 0.5 + 0.5 18 | 19 | # create the environment 20 | env = gym.make('CartPole-v1') 21 | 22 | # get observation size 23 | num_obs = env.observation_space.shape[0] # 4 values for Cart-Pole 24 | num_actions = env.action_space.n # N actions (1 discrete value) 25 | input_resolution = 32 26 | 27 | # set the number of threads 28 | neo.set_num_threads(4) 29 | 30 | # define layer descriptors: Parameters of each layer upon creation 31 | lds = [] 32 | 33 | for i in range(2): # layers with exponential memory. Not much memory is needed for Cart-Pole, so we only use 2 layers 34 | ld = neo.LayerDesc() 35 | 36 | # set some layer structural parameters 37 | ld.hidden_size = (5, 5, 32) 38 | 39 | lds.append(ld) 40 | 41 | # create the hierarchy 42 | h = neo.Hierarchy([ neo.IODesc((2, 2, input_resolution), neo.none), neo.IODesc((1, 1, num_actions), neo.action) ], lds) 43 | 44 | reward = 0.0 45 | 46 | for episode in range(1000): 47 | obs, _ = env.reset() 48 | 49 | # timesteps 50 | for t in range(500): 51 | # sensory CSDR creation through "squash and bin" method 52 | csdr = (sigmoid(obs * 3.0) * (input_resolution - 1) + 0.5).astype(np.int32) 53 | 54 | h.step([ csdr, h.get_prediction_cis(1) ], True, reward) 55 | 56 | # retrieve the action, the hierarchy already automatically applied exploration 57 | action = h.get_prediction_cis(1)[0] # First and only column 58 | 59 | obs, reward, term, trunc, _ = env.step(action) 60 | 61 | # re-define reward so that it is 0 normally and then -100 if terminated 62 | if term: 63 | reward = -10.0 64 | else: 65 | reward = 0.0 66 | 67 | if term or trunc: 68 | print(f"Episode {episode + 1} finished after {t + 1} timesteps") 69 | 70 | break 71 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_helpers.h: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #pragma once 10 | 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | 19 | #include 20 | #include 21 | #include 22 | 23 | namespace py = pybind11; 24 | 25 | namespace pyaon { 26 | inline void set_num_threads( 27 | int num_threads 28 | ) { 29 | aon::set_num_threads(num_threads); 30 | } 31 | 32 | inline int get_num_threads() { 33 | return aon::get_num_threads(); 34 | } 35 | 36 | inline void set_global_state( 37 | unsigned long state 38 | ) { 39 | aon::global_state = state; 40 | } 41 | 42 | inline unsigned long get_global_state() { 43 | return aon::global_state; 44 | } 45 | 46 | class File_Reader : public aon::Stream_Reader { 47 | public: 48 | std::ifstream ins; 49 | 50 | void read( 51 | void* data, 52 | long len 53 | ) override; 54 | }; 55 | 56 | class File_Writer : public aon::Stream_Writer { 57 | public: 58 | std::ofstream outs; 59 | 60 | void write( 61 | const void* data, 62 | long len 63 | ) override; 64 | }; 65 | 66 | class Buffer_Reader : public aon::Stream_Reader { 67 | public: 68 | int start; 69 | const py::array_t* buffer; 70 | 71 | Buffer_Reader() 72 | : 73 | start(0), 74 | buffer(nullptr) 75 | {} 76 | 77 | void read( 78 | void* data, 79 | long len 80 | ) override; 81 | }; 82 | 83 | class Buffer_Writer : public aon::Stream_Writer { 84 | public: 85 | long start; 86 | py::array_t buffer; 87 | 88 | Buffer_Writer( 89 | long buffer_size 90 | ) 91 | : 92 | start(0), 93 | buffer(buffer_size) 94 | {} 95 | 96 | void write( 97 | const void* data, 98 | long len 99 | ) override; 100 | }; 101 | } 102 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to PyAOgmaNeo 2 | 3 | Thank you for your interest in contributing to PyAOgmaNeo! 4 | 5 | ## How to Contribute 6 | 7 | Follow these steps to contribute code or documentation to this project: 8 | 9 | 1. Download and review carefully our [Contributor Agreement](https://ogma.ai/wp-content/uploads/2016/09/OgmaContributorAgreement.pdf), and send us a completed and signed copy. 10 | 11 | Please sign this agreement with a pen, as we do not accept electronic signatures.
12 | Then email a scanned copy of this agreement to contributing@ogmacorp.com, or send us an original paper copy by [snail mail](https://ogma.ai/contact-ogma-ai/). 13 | 14 | You need to do this only once, before your first contribution. 15 | 16 | 2. For large contributions (see below), make sure to first discuss your ideas with us via a GitHub issue or by sending us an email at contributing@ogmacorp.com 17 | 18 | 3. Fork this repository. 19 | 20 | 4. Follow this project's naming and coding conventions when implementing your contribution - we want to keep all our source code consistent. 21 | 22 | 5. Update the test suite if appropriate for your contribution. 23 | 24 | 6. Submit a pull request. 25 | 26 | We review carefully any contribution that we accept, and these reviews may take some time. Please keep in mind there is no guarantee your contribution will be accepted: we may reject a pull request for any reason, or no reason. 27 | 28 | ## Our Favorite Contributions 29 | 30 | We prefer small contributions as they are easier to review and integrate. If you want to contribute but don't know where to start, consider one of these areas: 31 | 32 | * New sample programs 33 | 34 | * Small bug fixes that affect only one or a few source files 35 | 36 | * Fixes for the documentation 37 | 38 | * Ports to new platforms and compilers 39 | 40 | ## Large Contributions 41 | 42 | Please don't spend weeks or months on a new feature without checking with us first! 43 | 44 | Some contributions are troublesome and therefore difficult to accept: 45 | 46 | * New features that may be useful for your project but are not obviously useful to other projects
47 | We want to keep our software lean and focused! 48 | 49 | * Any update that breaks source compatibility with the most recent release 50 | 51 | ## Contact 52 | 53 | * Website: https://ogma.ai/contact-ogma-ai/ 54 | * Twitter: https://twitter.com/ogmacorp 55 | * Gitter: https://gitter.im/ogmaneo 56 | * Email: contributing@ogmacorp.com 57 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # PyAOgmaNeo 3 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | # 5 | # This copy of PyAOgmaNeo is licensed to you under the terms described 6 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | # ---------------------------------------------------------------------------- 8 | 9 | cmake_minimum_required(VERSION 3.24) 10 | 11 | project(pyaogmaneo) 12 | 13 | include(FetchContent) 14 | 15 | list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake/") 16 | 17 | set(CMAKE_VERBOSE_MAKEFILE OFF) 18 | 19 | set(CMAKE_CXX_STANDARD 14) 20 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 21 | 22 | if(NOT CMAKE_BUILD_TYPE) 23 | message("CMAKE_BUILD_TYPE not set, setting it to Release") 24 | set(CMAKE_BUILD_TYPE Release) 25 | endif() 26 | 27 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 28 | 29 | if(USE_SYSTEM_AOGMANEO) 30 | message(STATUS "Using system installation of AOgmaNeo") 31 | 32 | find_package(AOgmaNeo) 33 | else() 34 | message(STATUS "Not using system installation of AOgmaNeo, will download from repository") 35 | 36 | FetchContent_Declare( 37 | AOgmaNeo 38 | GIT_REPOSITORY https://github.com/ogmacorp/AOgmaNeo.git 39 | GIT_TAG 906c958201b76b0cc34165bd2d0d6b1c6ed98d81 40 | ) 41 | 42 | FetchContent_MakeAvailable(AOgmaNeo) 43 | endif() 44 | 45 | FetchContent_Declare( 46 | pybind11 47 | GIT_REPOSITORY https://github.com/pybind/pybind11 48 | GIT_TAG origin/master 49 | ) 50 | 51 | FetchContent_MakeAvailable(pybind11) 52 | 53 | include_directories(${AOgmaNeo_SOURCE_DIR}/source) 54 | 55 | find_package(OpenMP REQUIRED) 56 | 57 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 58 | 59 | ############################################################################ 60 | # Add the pyaogmaneo 61 | 62 | set(PYAOGMANEO_INCLUDE_DIR "source/pyaogmaneo;") 63 | 64 | include_directories(${PYAOGMANEO_INCLUDE_DIR}) 65 | 66 | set(PYAOGMANEO_SRC 67 | "source/pyaogmaneo/py_module.cpp" 68 | "source/pyaogmaneo/py_helpers.cpp" 69 | "source/pyaogmaneo/py_hierarchy.cpp" 70 | "source/pyaogmaneo/py_image_encoder.cpp" 71 | ) 72 | 73 | pybind11_add_module(pyaogmaneo ${PYAOGMANEO_SRC}) 74 | 75 | if(USE_SYSTEM_AOGMANEO) 76 | message(STATUS ${AOGMANEO_LIBRARIES}) 77 | target_link_libraries(pyaogmaneo PUBLIC ${AOGMANEO_LIBRARIES} ${OpenMP_CXX_FLAGS}) 78 | else() 79 | target_link_libraries(pyaogmaneo PUBLIC AOgmaNeo ${OpenMP_CXX_FLAGS}) 80 | endif() 81 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | # PyAOgmaNeo 10 | 11 | [![Join the chat at https://gitter.im/ogmaneo/Lobby](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/ogmaneo/Lobby) 12 | 13 | ## Introduction 14 | 15 | Welcome to the [Ogma](https://ogmacorp.com) PyAOgmaNeo library, which contains Python bindings to the [AOgmaNeo](https://github.com/ogmacorp/AOgmaNeo) library. 16 | 17 | ## Requirements 18 | 19 | - OpenMP (this will likely already be installed on your system) 20 | - pybind11 (will automatically install if not present) 21 | - cmake 22 | 23 | ## Installation 24 | 25 | You may install from pypi: 26 | 27 | > pip install pyaogmaneo 28 | 29 | Or from this directory: 30 | 31 | > pip install . 32 | 33 | This will download the AOgmaNeo library these bindings depend on automatically, and compile it. 34 | 35 | Note that the branch of AOgmaNeo that will be used for building is based on the current branch of this repository (PyAOgmaNeo). 36 | The build system will automatically download the AOgmaNeo branch of the same name as that currently checked out in this repository (using a specific commit id). 37 | 38 | If you would like to use an existing system install of AOgmaNeo, set the following environment variable: 39 | 40 | > export USE_SYSTEM_AOGMANEO 41 | 42 | before installing. 43 | 44 | ## Importing and Setup 45 | 46 | The PyAOgmaNeo module can be imported using: 47 | 48 | ```python 49 | import pyaogmaneo 50 | ``` 51 | 52 | Refer to [the examples](./examples) for usage. 53 | 54 | ## Contributions 55 | 56 | Refer to the [CONTRIBUTING.md](./CONTRIBUTING.md) file for information on making contributions to PyAOgmaNeo. 57 | 58 | ## License and Copyright 59 | 60 | Creative Commons License
The work in this repository is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. See the [PYAOGMANEO_LICENSE.md](./PYAOGMANEO_LICENSE.md) and [LICENSE.md](./LICENSE.md) file for further information. 61 | 62 | Contact Ogma via licenses@ogmacorp.com to discuss commercial use and licensing options. 63 | 64 | PyAOgmaNeo Copyright (c) 2020-2025 [Ogma Intelligent Systems Corp](https://ogmacorp.com). All rights reserved. 65 | -------------------------------------------------------------------------------- /.github/workflows/build_publish.yml: -------------------------------------------------------------------------------- 1 | name: build_publish 2 | 3 | on: 4 | #push: 5 | # branches: [master] 6 | #pull_request: 7 | # branches: [master] 8 | release: 9 | types: [published] 10 | 11 | jobs: 12 | build_wheels: 13 | name: Build wheels on ${{ matrix.os }} 14 | runs-on: ${{ matrix.os }} 15 | strategy: 16 | matrix: 17 | os: [ubuntu-22.04, windows-latest] 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - name: Set up QEMU 23 | if: runner.os == 'Linux' 24 | uses: docker/setup-qemu-action@v3 25 | with: 26 | platforms: all 27 | 28 | - name: Set up Python 29 | uses: actions/setup-python@v5 30 | with: 31 | python-version: "3.x" 32 | 33 | - name: Install dependencies 34 | run: python -m pip install --upgrade setuptools wheel cmake build 35 | 36 | - name: Build wheels 37 | uses: pypa/cibuildwheel@v2.22.0 38 | env: 39 | # configure cibuildwheel to build native archs ('auto'), and some emulated ones 40 | CIBW_ARCHS_LINUX: auto64 aarch64 41 | CIBW_PROJECT_REQUIRES_PYTHON: ">=3.9" # limit to 3.9 and up since build takes forever otherwise 42 | CIBW_SKIP: pp* # disable building PyPy wheels on all platforms 43 | CIBW_BUILD_VERBOSITY: 3 44 | 45 | - uses: actions/upload-artifact@v4 46 | with: 47 | name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} 48 | path: ./wheelhouse/*.whl 49 | 50 | build_sdist: 51 | name: Build source distribution 52 | runs-on: ubuntu-22.04 53 | steps: 54 | - uses: actions/checkout@v4 55 | 56 | - name: Set up Python 57 | uses: actions/setup-python@v5 58 | with: 59 | python-version: "3.x" 60 | 61 | - name: Install dependencies 62 | run: python -m pip install --upgrade setuptools wheel cmake build 63 | 64 | - name: Build sdist 65 | run: pipx run build --sdist 66 | 67 | - uses: actions/upload-artifact@v4 68 | with: 69 | name: cibw-sdist 70 | path: dist/*.tar.gz 71 | 72 | publish: 73 | runs-on: ubuntu-22.04 74 | needs: [build_wheels, build_sdist] 75 | if: github.event_name == 'release' && github.event.action == 'published' 76 | steps: 77 | - name: Download 78 | uses: actions/download-artifact@v4 79 | with: 80 | # unpacks all CIBW artifacts into dist/ 81 | pattern: cibw-* 82 | path: dist 83 | merge-multiple: true 84 | 85 | - name: Publish 86 | uses: pypa/gh-action-pypi-publish@release/v1 87 | with: 88 | password: ${{ secrets.PYPI_API_TOKEN }} 89 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_image_encoder.h: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #pragma once 10 | 11 | #include "py_helpers.h" 12 | #include 13 | 14 | namespace py = pybind11; 15 | 16 | namespace pyaon { 17 | struct Image_Visible_Layer_Desc { 18 | std::tuple size; 19 | 20 | int radius; 21 | 22 | Image_Visible_Layer_Desc( 23 | const std::tuple &size, 24 | int radius 25 | ) 26 | : 27 | size(size), 28 | radius(radius) 29 | {} 30 | 31 | void check_in_range() const; 32 | }; 33 | 34 | class Image_Encoder { 35 | private: 36 | aon::Image_Encoder enc; 37 | 38 | aon::Array c_inputs_backing; 39 | aon::Array c_inputs; 40 | 41 | void init_random( 42 | const std::tuple &hidden_size, 43 | const std::vector &visible_layer_descs 44 | ); 45 | 46 | void init_from_file( 47 | const std::string &file_name 48 | ); 49 | 50 | void init_from_buffer( 51 | const py::array_t &buffer 52 | ); 53 | 54 | public: 55 | aon::Image_Encoder::Params params; 56 | 57 | Image_Encoder( 58 | const std::tuple &hidden_size, 59 | const std::vector &visible_layer_descs, 60 | const std::string &file_name, 61 | const py::array_t &buffer 62 | ); 63 | 64 | void save_to_file( 65 | const std::string &file_name 66 | ); 67 | 68 | void set_state_from_buffer( 69 | const py::array_t &buffer 70 | ); 71 | 72 | void set_weights_from_buffer( 73 | const py::array_t &buffer 74 | ); 75 | 76 | py::array_t serialize_to_buffer(); 77 | 78 | py::array_t serialize_state_to_buffer(); 79 | 80 | py::array_t serialize_weights_to_buffer(); 81 | 82 | long get_size() const { 83 | return enc.size(); 84 | } 85 | 86 | long get_state_size() const { 87 | return enc.state_size(); 88 | } 89 | 90 | long get_weights_size() const { 91 | return enc.weights_size(); 92 | } 93 | 94 | void step( 95 | const std::vector> &inputs, 96 | bool learn_enabled, 97 | bool learn_recon 98 | ); 99 | 100 | void reconstruct( 101 | const py::array_t &recon_cis 102 | ); 103 | 104 | int get_num_visible_layers() const { 105 | return enc.get_num_visible_layers(); 106 | } 107 | 108 | py::array_t get_reconstruction( 109 | int i 110 | ) const; 111 | 112 | py::array_t get_hidden_cis() const; 113 | 114 | std::tuple get_hidden_size() const { 115 | aon::Int3 size = enc.get_hidden_size(); 116 | 117 | return { size.x, size.y, size.z }; 118 | } 119 | 120 | std::tuple get_visible_size( 121 | int i 122 | ) const { 123 | aon::Int3 size = enc.get_visible_layer_desc(i).size; 124 | 125 | return { size.x, size.y, size.z }; 126 | } 127 | 128 | std::tuple, std::tuple> get_receptive_field( 129 | int vli, 130 | const std::tuple &pos 131 | ); 132 | }; 133 | } 134 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ---------------------------------------------------------------------------- 4 | # PyAOgmaNeo 5 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 6 | # 7 | # This copy of PyAOgmaNeo is licensed to you under the terms described 8 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 9 | # ---------------------------------------------------------------------------- 10 | 11 | import os 12 | import re 13 | import sys 14 | import platform 15 | import subprocess 16 | 17 | from setuptools import setup, Extension 18 | from setuptools.command.build_ext import build_ext 19 | from distutils.version import LooseVersion 20 | 21 | # For developers, set to use system install of AOgmaNeo 22 | use_system_aogmaneo = True if "USE_SYSTEM_AOGMANEO" in os.environ else False 23 | 24 | class CMakeExtension(Extension): 25 | def __init__(self, name, sourcedir=''): 26 | Extension.__init__(self, name, sources=[ 27 | "source/pyaogmaneo/py_helpers.h", 28 | "source/pyaogmaneo/py_helpers.cpp", 29 | "source/pyaogmaneo/py_hierarchy.h", 30 | "source/pyaogmaneo/py_hierarchy.cpp", 31 | "source/pyaogmaneo/py_image_encoder.h", 32 | "source/pyaogmaneo/py_image_encoder.cpp", 33 | "source/pyaogmaneo/py_module.cpp", 34 | ]) 35 | 36 | self.sourcedir = os.path.abspath(sourcedir) 37 | 38 | class CMakeBuild(build_ext): 39 | def run(self): 40 | try: 41 | out = subprocess.check_output(['cmake', '--version']) 42 | except OSError: 43 | raise RuntimeError("CMake must be installed to build the following extensions: " + 44 | ", ".join(e.name for e in self.extensions)) 45 | 46 | if platform.system() == "Windows": 47 | cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1)) 48 | 49 | if cmake_version < '3.1.0': 50 | raise RuntimeError("CMake >= 3.1.0 is required on Windows") 51 | 52 | for ext in self.extensions: 53 | self.build_extension(ext) 54 | 55 | def build_extension(self, ext): 56 | extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) 57 | 58 | if not extdir.endswith(os.path.sep): 59 | extdir += os.path.sep 60 | 61 | cmake_args = [ '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, 62 | '-DPYBIND11_FINDPYTHON=ON', 63 | '-DUSE_SYSTEM_AOGMANEO=' + ('On' if use_system_aogmaneo else 'Off') ] 64 | 65 | cfg = 'Debug' if self.debug else 'Release' 66 | build_args = [ '--config', cfg ] 67 | 68 | if platform.system() == "Windows": 69 | cmake_args += [ '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir) ] 70 | 71 | if sys.maxsize > 2**32: 72 | cmake_args += ['-A', 'x64'] 73 | 74 | build_args += ['--', '/m'] 75 | else: 76 | cmake_args += [ '-DCMAKE_BUILD_TYPE=' + cfg ] 77 | build_args += [ '--', '-j2' ] 78 | 79 | env = os.environ.copy() 80 | 81 | env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version()) 82 | 83 | if not os.path.exists(self.build_temp): 84 | os.makedirs(self.build_temp) 85 | 86 | subprocess.check_call([ 'cmake', ext.sourcedir ] + cmake_args, cwd=self.build_temp, env=env) 87 | subprocess.check_call([ 'cmake', '--build', '.' ] + build_args, cwd=self.build_temp) 88 | 89 | setup( 90 | name="pyaogmaneo", 91 | version="2.14.4", 92 | description="Python bindings for the AOgmaNeo library", 93 | long_description='https://github.com/ogmacorp/PyAOgmaNeo', 94 | author='Ogma Intelligent Systems Corp', 95 | author_email='info@ogmacorp.com', 96 | url='https://ogmacorp.com/', 97 | license='Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License', 98 | classifiers=[ 99 | "Development Status :: 5 - Production/Stable", 100 | "Environment :: Console", 101 | "Intended Audience :: Science/Research", 102 | "License :: Other/Proprietary License", 103 | "Operating System :: POSIX :: Linux", 104 | "Programming Language :: Python", 105 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 106 | "Topic :: Software Development :: Libraries :: Python Modules" 107 | ], 108 | ext_modules=[ CMakeExtension("pyaogmaneo") ], 109 | cmdclass={ 110 | 'build_ext': CMakeBuild, 111 | }, 112 | zip_safe=False, 113 | include_package_data=True, 114 | ) 115 | -------------------------------------------------------------------------------- /examples/wavy_line_prediction.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ---------------------------------------------------------------------------- 4 | # PyAOgmaNeo 5 | # Copyright(c) 2020-2023 Ogma Intelligent Systems Corp. All rights reserved. 6 | # 7 | # This copy of PyAOgmaNeo is licensed to you under the terms described 8 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 9 | # ---------------------------------------------------------------------------- 10 | 11 | import pyaogmaneo as neo 12 | import numpy as np 13 | import matplotlib 14 | import matplotlib.pyplot as plt 15 | import struct 16 | 17 | #matplotlib.use('TkAgg') 18 | 19 | # set the number of threads 20 | neo.set_num_threads(4) 21 | 22 | # scalar encoding used in this example, take a byte and convert 4 consective bits into 2 one-hot columns with 16 cells in them 23 | 24 | def unorm8_to_csdr(x : float): 25 | assert(x >= 0.0 and x <= 1.0) 26 | 27 | i = int(x * 255.0 + 0.5) & 0xff 28 | 29 | return [ int(i & 0x0f), int((i & 0xf0) >> 4) ] 30 | 31 | # reverse transform of unorm8_to_csdr 32 | def csdr_to_unorm8(csdr): 33 | return (csdr[0] | (csdr[1] << 4)) / 255.0 34 | 35 | # some other ways of encoding individual scalers: 36 | 37 | # multi-scale embedding 38 | def f_to_csdr(x, num_columns, cells_per_column, scale_factor=0.25): 39 | csdr = [] 40 | 41 | scale = 1.0 42 | 43 | for i in range(num_columns): 44 | s = (x / scale) % (1.0 if x > 0.0 else -1.0) 45 | 46 | csdr.append(int((s * 0.5 + 0.5) * (cells_per_column - 1) + 0.5)) 47 | 48 | rec = scale * (float(csdr[i]) / float(cells_per_column - 1) * 2.0 - 1.0) 49 | x -= rec 50 | 51 | scale *= scale_factor 52 | 53 | return csdr 54 | 55 | def csdr_to_f(csdr, cells_per_column, scale_factor=0.25): 56 | x = 0.0 57 | 58 | scale = 1.0 59 | 60 | for i in range(len(csdr)): 61 | x += scale * (float(csdr[i]) / float(cells_per_column - 1) * 2.0 - 1.0) 62 | 63 | scale *= scale_factor 64 | 65 | return x 66 | 67 | # convert an ieee float to 8 columns with 16 cells each (similar to first approach but on floating-point data) 68 | def ieee_to_csdr(x : float): 69 | b = struct.pack("> 4) 76 | 77 | return csdr 78 | 79 | def csdr_to_ieee(csdr): 80 | bs = [] 81 | 82 | for i in range(4): 83 | bs.append(csdr[i * 2 + 0] | (csdr[i * 2 + 1] << 4)) 84 | 85 | return struct.unpack(" 0.5: 127 | msg = ">>>>>>>>>>>>>>>>>" 128 | 129 | msg += str(h.get_hidden_cis(0)) 130 | 131 | print(msg) 132 | 133 | # print progress 134 | if t % 100 == 0: 135 | print(t) 136 | 137 | # recall the sequence and plot the result 138 | ts = [] # time step 139 | vs = [] # predicted value 140 | 141 | trgs = [] # true value 142 | 143 | for t2 in range(1000): 144 | t = t2 + iters # get "continued" timestep (relative to previous training iterations) 145 | 146 | value_to_encode = wave(t) 147 | 148 | csdr = unorm8_to_csdr(float(value_to_encode)) 149 | 150 | # run off of own predictions with learning disabled 151 | h.step([ h.get_prediction_cis(0) ], False) # learning disabled for recall 152 | 153 | # decode value from latest prediction 154 | value = csdr_to_unorm8(h.get_prediction_cis(0)) 155 | 156 | # append to plot data 157 | ts.append(t2) 158 | vs.append(value + 1.1) # offset the plot by 1.1 so we can see it better 159 | 160 | trgs.append(value_to_encode) 161 | 162 | # show predicted value 163 | #print(value) 164 | 165 | # show plot 166 | plt.plot(ts, vs, ts, trgs) 167 | 168 | plt.show() 169 | 170 | 171 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_hierarchy.h: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #pragma once 10 | 11 | #include "py_helpers.h" 12 | #include 13 | 14 | namespace py = pybind11; 15 | 16 | namespace pyaon { 17 | enum IO_Type { 18 | none = 0, 19 | prediction = 1, 20 | action = 2 21 | }; 22 | 23 | struct IO_Desc { 24 | std::tuple size; 25 | IO_Type type; 26 | 27 | int num_dendrites_per_cell; 28 | 29 | int up_radius; 30 | int down_radius; 31 | 32 | int value_size; 33 | int value_num_dendrites_per_cell; 34 | int history_capacity; 35 | 36 | IO_Desc( 37 | const std::tuple &size, 38 | IO_Type type, 39 | int num_dendrites_per_cell, 40 | int up_radius, 41 | int down_radius, 42 | int value_size, 43 | int value_num_dendrites_per_cell, 44 | int history_capacity 45 | ) 46 | : 47 | size(size), 48 | type(type), 49 | num_dendrites_per_cell(num_dendrites_per_cell), 50 | up_radius(up_radius), 51 | down_radius(down_radius), 52 | value_size(value_size), 53 | value_num_dendrites_per_cell(value_num_dendrites_per_cell), 54 | history_capacity(history_capacity) 55 | {} 56 | 57 | void check_in_range() const; 58 | }; 59 | 60 | struct Layer_Desc { 61 | std::tuple hidden_size; 62 | 63 | int num_dendrites_per_cell; 64 | 65 | int up_radius; 66 | int recurrent_radius; 67 | int down_radius; 68 | 69 | Layer_Desc( 70 | const std::tuple &hidden_size, 71 | int num_dendrites_per_cell, 72 | int up_radius, 73 | int recurrent_radius, 74 | int down_radius 75 | ) 76 | : 77 | hidden_size(hidden_size), 78 | num_dendrites_per_cell(num_dendrites_per_cell), 79 | up_radius(up_radius), 80 | recurrent_radius(recurrent_radius), 81 | down_radius(down_radius) 82 | {} 83 | 84 | void check_in_range() const; 85 | }; 86 | 87 | struct Params { 88 | std::vector layers; 89 | std::vector ios; 90 | 91 | bool anticipation; 92 | }; 93 | 94 | class Hierarchy { 95 | private: 96 | aon::Hierarchy h; 97 | 98 | aon::Array c_input_cis_backing; 99 | aon::Array c_input_cis; 100 | 101 | void init_random( 102 | const std::vector &io_descs, 103 | const std::vector &layer_descs 104 | ); 105 | 106 | void init_from_file( 107 | const std::string &file_name 108 | ); 109 | 110 | void init_from_buffer( 111 | const py::array_t &buffer 112 | ); 113 | 114 | void copy_params_to_h(); 115 | 116 | public: 117 | Params params; 118 | 119 | Hierarchy( 120 | const std::vector &io_descs, 121 | const std::vector &layer_descs, 122 | const std::string &file_name, 123 | const py::array_t &buffer 124 | ); 125 | 126 | void save_to_file( 127 | const std::string &file_name 128 | ); 129 | 130 | void set_state_from_buffer( 131 | const py::array_t &buffer 132 | ); 133 | 134 | void set_weights_from_buffer( 135 | const py::array_t &buffer 136 | ); 137 | 138 | py::array_t serialize_to_buffer(); 139 | 140 | py::array_t serialize_state_to_buffer(); 141 | 142 | py::array_t serialize_weights_to_buffer(); 143 | 144 | long get_size() const { 145 | return h.size(); 146 | } 147 | 148 | long get_state_size() const { 149 | return h.state_size(); 150 | } 151 | 152 | long get_weights_size() const { 153 | return h.weights_size(); 154 | } 155 | 156 | void step( 157 | const std::vector> &input_cis, 158 | bool learn_enabled, 159 | float reward, 160 | float mimic 161 | ); 162 | 163 | void clear_state() { 164 | h.clear_state(); 165 | } 166 | 167 | int get_num_layers() const { 168 | return h.get_num_layers(); 169 | } 170 | 171 | py::array_t get_prediction_cis( 172 | int i 173 | ) const; 174 | 175 | py::array_t get_layer_prediction_cis( 176 | int l 177 | ) const; 178 | 179 | py::array_t get_prediction_acts( 180 | int i 181 | ) const; 182 | 183 | py::array_t sample_prediction( 184 | int i, 185 | float temperature 186 | ) const; 187 | 188 | py::array_t get_hidden_cis( 189 | int l 190 | ); 191 | 192 | std::tuple get_hidden_size( 193 | int l 194 | ) { 195 | if (l < 0 || l >= h.get_num_layers()) 196 | throw std::runtime_error("error: " + std::to_string(l) + " is not a valid layer index!"); 197 | 198 | aon::Int3 size = h.get_encoder(l).get_hidden_size(); 199 | 200 | return { size.x, size.y, size.z }; 201 | } 202 | 203 | int get_num_encoder_visible_layers( 204 | int l 205 | ) { 206 | if (l < 0 || l >= h.get_num_layers()) 207 | throw std::runtime_error("error: " + std::to_string(l) + " is not a valid layer index!"); 208 | 209 | return h.get_num_encoder_visible_layers(l); 210 | } 211 | 212 | int get_num_io() const { 213 | return h.get_num_io(); 214 | } 215 | 216 | std::tuple get_io_size( 217 | int i 218 | ) const { 219 | if (i < 0 || i >= h.get_num_io()) 220 | throw std::runtime_error("error: " + std::to_string(i) + " is not a valid input index!"); 221 | 222 | aon::Int3 size = h.get_io_size(i); 223 | 224 | return { size.x, size.y, size.z }; 225 | } 226 | 227 | IO_Type get_io_type( 228 | int i 229 | ) const { 230 | if (i < 0 || i >= h.get_num_io()) 231 | throw std::runtime_error("error: " + std::to_string(i) + " is not a valid input index!"); 232 | 233 | return static_cast(h.get_io_type(i)); 234 | } 235 | 236 | // retrieve additional parameters on the sph's structure 237 | int get_up_radius( 238 | int l 239 | ) const { 240 | if (l < 0 || l >= h.get_num_layers()) 241 | throw std::runtime_error("error: " + std::to_string(l) + " is not a valid layer index!"); 242 | 243 | return h.get_encoder(l).get_visible_layer_desc(0).radius; 244 | } 245 | 246 | int get_down_radius( 247 | int l, 248 | int i 249 | ) const { 250 | if (l < 0 || l >= h.get_num_layers()) 251 | throw std::runtime_error("error: " + std::to_string(l) + " is not a valid layer index!"); 252 | 253 | if (l == 0 && i < 0 || i >= h.get_num_io()) 254 | throw std::runtime_error("error: " + std::to_string(i) + " is not a valid input index!"); 255 | 256 | if (h.get_io_type(i) == aon::action) 257 | return h.get_actor(i).get_visible_layer_desc(0).radius; 258 | 259 | return h.get_decoder(l, i).get_visible_layer_desc(0).radius; 260 | } 261 | 262 | std::tuple, std::tuple> get_encoder_receptive_field( 263 | int l, 264 | int vli, 265 | const std::tuple &pos 266 | ); 267 | }; 268 | } 269 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_image_encoder.cpp: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #include "py_image_encoder.h" 10 | 11 | using namespace pyaon; 12 | 13 | void Image_Visible_Layer_Desc::check_in_range() const { 14 | if (std::get<0>(size) < 1) 15 | throw std::runtime_error("error: size[0] < 1 is not allowed!"); 16 | 17 | if (std::get<1>(size) < 1) 18 | throw std::runtime_error("error: size[1] < 1 is not allowed!"); 19 | 20 | if (std::get<2>(size) < 1) 21 | throw std::runtime_error("error: size[2] < 1 is not allowed!"); 22 | 23 | if (radius < 0) 24 | throw std::runtime_error("error: radius < 0 is not allowed!"); 25 | } 26 | 27 | Image_Encoder::Image_Encoder( 28 | const std::tuple &hidden_size, 29 | const std::vector &visible_layer_descs, 30 | const std::string &file_name, 31 | const py::array_t &buffer 32 | ) { 33 | if (buffer.unchecked().size() > 0) 34 | init_from_buffer(buffer); 35 | else if (!file_name.empty()) 36 | init_from_file(file_name); 37 | else { 38 | if (visible_layer_descs.empty()) 39 | throw std::runtime_error("error: Image_Encoder constructor requires some non-empty arguments!"); 40 | 41 | init_random(hidden_size, visible_layer_descs); 42 | } 43 | 44 | // copy params 45 | params = enc.params; 46 | 47 | c_inputs_backing.resize(enc.get_num_visible_layers()); 48 | c_inputs.resize(enc.get_num_visible_layers()); 49 | 50 | for (int i = 0; i < c_inputs_backing.size(); i++) 51 | c_inputs_backing[i].resize(enc.get_visible_layer_desc(i).size.x * enc.get_visible_layer_desc(i).size.y * enc.get_visible_layer_desc(i).size.z); 52 | } 53 | 54 | void Image_Encoder::init_random( 55 | const std::tuple &hidden_size, 56 | const std::vector &visible_layer_descs 57 | ) { 58 | bool all_in_range = true; 59 | 60 | aon::Array c_visible_layer_descs(visible_layer_descs.size()); 61 | 62 | for (int v = 0; v < visible_layer_descs.size(); v++) { 63 | visible_layer_descs[v].check_in_range(); 64 | 65 | c_visible_layer_descs[v].size = aon::Int3(std::get<0>(visible_layer_descs[v].size), std::get<1>(visible_layer_descs[v].size), std::get<2>(visible_layer_descs[v].size)); 66 | c_visible_layer_descs[v].radius = visible_layer_descs[v].radius; 67 | } 68 | 69 | if (std::get<0>(hidden_size) < 1) 70 | throw std::runtime_error("error: hidden_size[0] < 1 is not allowed!"); 71 | 72 | if (std::get<1>(hidden_size) < 1) 73 | throw std::runtime_error("error: hidden_size[1] < 1 is not allowed!"); 74 | 75 | if (std::get<2>(hidden_size) < 1) 76 | throw std::runtime_error("error: hidden_size[2] < 1 is not allowed!"); 77 | 78 | if (!all_in_range) 79 | throw std::runtime_error(" - Image_Encoder: some parameters out of range!"); 80 | 81 | enc.init_random(aon::Int3(std::get<0>(hidden_size), std::get<1>(hidden_size), std::get<2>(hidden_size)), c_visible_layer_descs); 82 | } 83 | 84 | void Image_Encoder::init_from_file( 85 | const std::string &file_name 86 | ) { 87 | File_Reader reader; 88 | reader.ins.open(file_name, std::ios::binary); 89 | 90 | enc.read(reader); 91 | } 92 | 93 | void Image_Encoder::init_from_buffer( 94 | const py::array_t &buffer 95 | ) { 96 | Buffer_Reader reader; 97 | reader.buffer = &buffer; 98 | 99 | enc.read(reader); 100 | } 101 | 102 | void Image_Encoder::save_to_file( 103 | const std::string &file_name 104 | ) { 105 | File_Writer writer; 106 | writer.outs.open(file_name, std::ios::binary); 107 | 108 | enc.write(writer); 109 | } 110 | 111 | void Image_Encoder::set_state_from_buffer( 112 | const py::array_t &buffer 113 | ) { 114 | Buffer_Reader reader; 115 | reader.buffer = &buffer; 116 | 117 | enc.read_state(reader); 118 | } 119 | 120 | void Image_Encoder::set_weights_from_buffer( 121 | const py::array_t &buffer 122 | ) { 123 | Buffer_Reader reader; 124 | reader.buffer = &buffer; 125 | 126 | enc.read_weights(reader); 127 | } 128 | 129 | py::array_t Image_Encoder::serialize_to_buffer() { 130 | Buffer_Writer writer(enc.size() + sizeof(int)); 131 | 132 | enc.write(writer); 133 | 134 | return writer.buffer; 135 | } 136 | 137 | py::array_t Image_Encoder::serialize_state_to_buffer() { 138 | Buffer_Writer writer(enc.state_size()); 139 | 140 | enc.write_state(writer); 141 | 142 | return writer.buffer; 143 | } 144 | 145 | py::array_t Image_Encoder::serialize_weights_to_buffer() { 146 | Buffer_Writer writer(enc.weights_size()); 147 | 148 | enc.write_weights(writer); 149 | 150 | return writer.buffer; 151 | } 152 | 153 | void Image_Encoder::step( 154 | const std::vector> &inputs, 155 | bool learn_enabled, 156 | bool learn_recon 157 | ) { 158 | if (inputs.size() != enc.get_num_visible_layers()) 159 | throw std::runtime_error("incorrect number of inputs given to Image_Encoder! expected " + std::to_string(enc.get_num_visible_layers()) + ", got " + std::to_string(inputs.size())); 160 | 161 | // copy params 162 | enc.params = params; 163 | 164 | for (int i = 0; i < inputs.size(); i++) { 165 | auto view = inputs[i].unchecked(); 166 | 167 | if (view.size() != c_inputs_backing[i].size()) 168 | throw std::runtime_error("incorrect image size given to Image_Encoder! expected " + std::to_string(c_inputs_backing[i].size()) + " inputs at input index " + std::to_string(i) + ", got " + std::to_string(view.size())); 169 | 170 | for (int j = 0; j < view.size(); j++) 171 | c_inputs_backing[i][j] = view(j); 172 | 173 | c_inputs[i] = c_inputs_backing[i]; 174 | } 175 | 176 | enc.step(c_inputs, learn_enabled, learn_recon); 177 | } 178 | 179 | void Image_Encoder::reconstruct( 180 | const py::array_t &recon_cis 181 | ) { 182 | if (recon_cis.size() != enc.get_hidden_cis().size()) 183 | throw std::runtime_error("error: recon_cis must match the output_size of the Image_Encoder!"); 184 | 185 | auto view = recon_cis.unchecked(); 186 | 187 | aon::Int_Buffer c_recon_cis_backing(view.size()); 188 | 189 | for (int j = 0; j < view.size(); j++) { 190 | if (view(j) < 0 || view(j) >= enc.get_hidden_size().z) 191 | throw std::runtime_error("recon csdr (recon_cis) has an out-of-bounds column index (" + std::to_string(view(j)) + ") at column index " + std::to_string(j) + ". it must be in the range [0, " + std::to_string(enc.get_hidden_size().z - 1) + "]"); 192 | 193 | c_recon_cis_backing[j] = view(j); 194 | } 195 | 196 | enc.reconstruct(c_recon_cis_backing); 197 | } 198 | 199 | py::array_t Image_Encoder::get_reconstruction( 200 | int i 201 | ) const { 202 | if (i < 0 || i >= enc.get_num_visible_layers()) 203 | throw std::runtime_error("cannot get reconstruction at index " + std::to_string(i) + " - out of bounds [0, " + std::to_string(enc.get_num_visible_layers()) + "]"); 204 | 205 | py::array_t reconstruction(enc.get_reconstruction(i).size()); 206 | 207 | auto view = reconstruction.mutable_unchecked(); 208 | 209 | for (int j = 0; j < view.size(); j++) 210 | view(j) = enc.get_reconstruction(i)[j]; 211 | 212 | return reconstruction; 213 | } 214 | 215 | py::array_t Image_Encoder::get_hidden_cis() const { 216 | py::array_t hidden_cis(enc.get_hidden_cis().size()); 217 | 218 | auto view = hidden_cis.mutable_unchecked(); 219 | 220 | for (int j = 0; j < view.size(); j++) 221 | view(j) = enc.get_hidden_cis()[j]; 222 | 223 | return hidden_cis; 224 | } 225 | 226 | std::tuple, std::tuple> Image_Encoder::get_receptive_field( 227 | int vli, 228 | const std::tuple &pos 229 | ) { 230 | int num_visible_layers = enc.get_num_visible_layers(); 231 | 232 | if (vli < 0 || vli >= num_visible_layers) 233 | throw std::runtime_error("visible layer index " + std::to_string(vli) + " out of range [0, " + std::to_string(num_visible_layers - 1) + "]!"); 234 | 235 | const aon::Int3 &hidden_size = enc.get_hidden_size(); 236 | 237 | if (std::get<0>(pos) < 0 || std::get<0>(pos) >= hidden_size.x || 238 | std::get<1>(pos) < 0 || std::get<1>(pos) >= hidden_size.y || 239 | std::get<2>(pos) < 0 || std::get<2>(pos) >= hidden_size.z) { 240 | throw std::runtime_error("position (" + std::to_string(std::get<0>(pos)) + ", " + std::to_string(std::get<1>(pos)) + ", " + std::to_string(std::get<2>(pos)) + ") " + 241 | + " not in size (" + std::to_string(hidden_size.x) + ", " + std::to_string(hidden_size.y) + ", " + std::to_string(hidden_size.z) + ")!"); 242 | } 243 | 244 | const aon::Image_Encoder::Visible_Layer &vl = enc.get_visible_layer(vli); 245 | const aon::Image_Encoder::Visible_Layer_Desc &vld = enc.get_visible_layer_desc(vli); 246 | 247 | int diam = vld.radius * 2 + 1; 248 | int area = diam * diam; 249 | 250 | aon::Int2 column_pos(std::get<0>(pos), std::get<1>(pos)); 251 | 252 | int hidden_column_index = aon::address2(column_pos, aon::Int2(hidden_size.x, hidden_size.y)); 253 | int hidden_cells_start = hidden_size.z * hidden_column_index; 254 | 255 | // projection 256 | aon::Float2 h_to_v = aon::Float2(static_cast(vld.size.x) / static_cast(hidden_size.x), 257 | static_cast(vld.size.y) / static_cast(hidden_size.y)); 258 | 259 | aon::Int2 visible_center = project(column_pos, h_to_v); 260 | 261 | // lower corner 262 | aon::Int2 field_lower_bound(visible_center.x - vld.radius, visible_center.y - vld.radius); 263 | 264 | // bounds of receptive field, clamped to input size 265 | aon::Int2 iter_lower_bound(aon::max(0, field_lower_bound.x), aon::max(0, field_lower_bound.y)); 266 | aon::Int2 iter_upper_bound(aon::min(vld.size.x - 1, visible_center.x + vld.radius), aon::min(vld.size.y - 1, visible_center.y + vld.radius)); 267 | 268 | int hidden_stride = vld.size.z * diam * diam; 269 | 270 | int field_count = area * vld.size.z; 271 | 272 | py::array_t field(field_count); 273 | 274 | auto view = field.mutable_unchecked(); 275 | 276 | // first clear 277 | for (int i = 0; i < field_count; i++) 278 | view(i) = 0; 279 | 280 | int hidden_cell_index = std::get<2>(pos) + hidden_cells_start; 281 | 282 | for (int ix = iter_lower_bound.x; ix <= iter_upper_bound.x; ix++) 283 | for (int iy = iter_lower_bound.y; iy <= iter_upper_bound.y; iy++) { 284 | int visible_column_index = address2(aon::Int2(ix, iy), aon::Int2(vld.size.x, vld.size.y)); 285 | 286 | aon::Int2 offset(ix - field_lower_bound.x, iy - field_lower_bound.y); 287 | 288 | int wi_start_partial = vld.size.z * (offset.y + diam * (offset.x + diam * hidden_column_index)); 289 | 290 | for (int vc = 0; vc < vld.size.z; vc++) { 291 | int wi = std::get<2>(pos) + hidden_size.z * (vc + wi_start_partial); 292 | 293 | view(vc + vld.size.z * (offset.y + diam * offset.x)) = vl.weights[wi]; 294 | } 295 | } 296 | 297 | std::tuple field_size(diam, diam, vld.size.z); 298 | 299 | return std::make_tuple(field, field_size); 300 | } 301 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_module.cpp: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #include "py_hierarchy.h" 10 | #include "py_image_encoder.h" 11 | 12 | namespace py = pybind11; 13 | 14 | PYBIND11_MODULE(pyaogmaneo, m) { 15 | m.def("set_num_threads", &pyaon::set_num_threads); 16 | m.def("get_num_threads", &pyaon::get_num_threads); 17 | 18 | m.def("set_global_state", &pyaon::set_global_state); 19 | m.def("get_global_state", &pyaon::get_global_state); 20 | 21 | py::enum_(m, "IOType") 22 | .value("none", pyaon::none) 23 | .value("prediction", pyaon::prediction) 24 | .value("action", pyaon::action) 25 | .export_values(); 26 | 27 | py::class_(m, "IODesc") 28 | .def(py::init< 29 | std::tuple, 30 | pyaon::IO_Type, 31 | int, 32 | int, 33 | int, 34 | int, 35 | int, 36 | int 37 | >(), 38 | py::arg("size") = std::tuple({ 5, 5, 16 }), 39 | py::arg("io_type") = pyaon::prediction, 40 | py::arg("num_dendrites_per_cell") = 4, 41 | py::arg("up_radius") = 2, 42 | py::arg("down_radius") = 2, 43 | py::arg("value_size") = 128, 44 | py::arg("value_num_dendrites_per_cell") = 4, 45 | py::arg("history_capacity") = 512 46 | ) 47 | .def_readwrite("size", &pyaon::IO_Desc::size) 48 | .def_readwrite("io_type", &pyaon::IO_Desc::type) 49 | .def_readwrite("num_dendrites_per_cell", &pyaon::IO_Desc::num_dendrites_per_cell) 50 | .def_readwrite("up_radius", &pyaon::IO_Desc::up_radius) 51 | .def_readwrite("down_radius", &pyaon::IO_Desc::down_radius) 52 | .def_readwrite("value_size", &pyaon::IO_Desc::value_size) 53 | .def_readwrite("value_num_dendrites_per_cell", &pyaon::IO_Desc::value_num_dendrites_per_cell) 54 | .def_readwrite("history_capacity", &pyaon::IO_Desc::history_capacity) 55 | .def("__copy__", 56 | [](const pyaon::IO_Desc &other) { 57 | return other; 58 | } 59 | ) 60 | .def("__deepcopy__", 61 | [](const pyaon::IO_Desc &other) { 62 | return other; 63 | } 64 | ); 65 | 66 | py::class_(m, "LayerDesc") 67 | .def(py::init< 68 | std::tuple, 69 | int, 70 | int, 71 | int, 72 | int 73 | >(), 74 | py::arg("hidden_size") = std::tuple({ 5, 5, 16 }), 75 | py::arg("num_dendrites_per_cell") = 4, 76 | py::arg("up_radius") = 2, 77 | py::arg("recurrent_radius") = 0, 78 | py::arg("down_radius") = 2 79 | ) 80 | .def_readwrite("hidden_size", &pyaon::Layer_Desc::hidden_size) 81 | .def_readwrite("num_dendrites_per_cell", &pyaon::Layer_Desc::num_dendrites_per_cell) 82 | .def_readwrite("up_radius", &pyaon::Layer_Desc::up_radius) 83 | .def_readwrite("recurrent_radius", &pyaon::Layer_Desc::recurrent_radius) 84 | .def_readwrite("down_radius", &pyaon::Layer_Desc::down_radius) 85 | .def("__copy__", 86 | [](const pyaon::Layer_Desc &other) { 87 | return other; 88 | } 89 | ) 90 | .def("__deepcopy__", 91 | [](const pyaon::Layer_Desc &other) { 92 | return other; 93 | } 94 | ); 95 | 96 | // bind params 97 | py::class_(m, "EncoderParams") 98 | .def(py::init<>()) 99 | .def_readwrite("choice", &aon::Encoder::Params::choice) 100 | .def_readwrite("vigilance", &aon::Encoder::Params::vigilance) 101 | .def_readwrite("lr", &aon::Encoder::Params::lr) 102 | .def_readwrite("active_ratio", &aon::Encoder::Params::active_ratio) 103 | .def_readwrite("l_radius", &aon::Encoder::Params::l_radius); 104 | 105 | py::class_(m, "DecoderParams") 106 | .def(py::init<>()) 107 | .def_readwrite("scale", &aon::Decoder::Params::scale) 108 | .def_readwrite("lr", &aon::Decoder::Params::lr); 109 | 110 | py::class_(m, "ActorParams") 111 | .def(py::init<>()) 112 | .def_readwrite("vlr", &aon::Actor::Params::vlr) 113 | .def_readwrite("plr", &aon::Actor::Params::plr) 114 | .def_readwrite("smoothing", &aon::Actor::Params::smoothing) 115 | .def_readwrite("discount", &aon::Actor::Params::discount) 116 | .def_readwrite("td_scale_decay", &aon::Actor::Params::td_scale_decay) 117 | .def_readwrite("value_range", &aon::Actor::Params::value_range) 118 | .def_readwrite("min_steps", &aon::Actor::Params::min_steps) 119 | .def_readwrite("history_iters", &aon::Actor::Params::history_iters); 120 | 121 | py::class_(m, "LayerParams") 122 | .def(py::init<>()) 123 | .def_readwrite("encoder", &aon::Hierarchy::Layer_Params::encoder) 124 | .def_readwrite("decoder", &aon::Hierarchy::Layer_Params::decoder) 125 | .def_readwrite("recurrent_importance", &aon::Hierarchy::Layer_Params::recurrent_importance); 126 | 127 | py::class_(m, "IOParams") 128 | .def(py::init<>()) 129 | .def_readwrite("decoder", &aon::Hierarchy::IO_Params::decoder) 130 | .def_readwrite("actor", &aon::Hierarchy::IO_Params::actor) 131 | .def_readwrite("importance", &aon::Hierarchy::IO_Params::importance); 132 | 133 | py::class_(m, "Params") 134 | .def(py::init<>()) 135 | .def_readwrite("layers", &pyaon::Params::layers) 136 | .def_readwrite("ios", &pyaon::Params::ios) 137 | .def_readwrite("anticipation", &pyaon::Params::anticipation); 138 | 139 | py::class_(m, "Hierarchy") 140 | .def(py::init< 141 | const std::vector&, 142 | const std::vector&, 143 | const std::string&, 144 | const py::array_t& 145 | >(), 146 | py::arg("io_descs") = std::vector(), 147 | py::arg("layer_descs") = std::vector(), 148 | py::arg("file_name") = std::string(), 149 | py::arg("buffer") = py::array_t() 150 | ) 151 | .def_readwrite("params", &pyaon::Hierarchy::params) 152 | .def("save_to_file", &pyaon::Hierarchy::save_to_file) 153 | .def("set_state_from_buffer", &pyaon::Hierarchy::set_state_from_buffer) 154 | .def("set_weights_from_buffer", &pyaon::Hierarchy::set_weights_from_buffer) 155 | .def("serialize_to_buffer", &pyaon::Hierarchy::serialize_to_buffer) 156 | .def("serialize_state_to_buffer", &pyaon::Hierarchy::serialize_state_to_buffer) 157 | .def("serialize_weights_to_buffer", &pyaon::Hierarchy::serialize_weights_to_buffer) 158 | .def("get_size", &pyaon::Hierarchy::get_size) 159 | .def("get_state_size", &pyaon::Hierarchy::get_state_size) 160 | .def("get_weights_size", &pyaon::Hierarchy::get_weights_size) 161 | .def("step", &pyaon::Hierarchy::step, 162 | py::arg("input_cis"), 163 | py::arg("learn_enabled") = true, 164 | py::arg("reward") = 0.0f, 165 | py::arg("mimic") = 0.0f 166 | ) 167 | .def("clear_state", &pyaon::Hierarchy::clear_state) 168 | .def("get_num_layers", &pyaon::Hierarchy::get_num_layers) 169 | .def("get_prediction_cis", &pyaon::Hierarchy::get_prediction_cis) 170 | .def("get_layer_prediction_cis", &pyaon::Hierarchy::get_layer_prediction_cis) 171 | .def("get_prediction_acts", &pyaon::Hierarchy::get_prediction_acts) 172 | .def("sample_prediction", &pyaon::Hierarchy::sample_prediction) 173 | .def("get_hidden_cis", &pyaon::Hierarchy::get_hidden_cis) 174 | .def("get_hidden_size", &pyaon::Hierarchy::get_hidden_size) 175 | .def("get_num_encoder_visible_layers", &pyaon::Hierarchy::get_num_encoder_visible_layers) 176 | .def("get_num_io", &pyaon::Hierarchy::get_num_io) 177 | .def("get_io_size", &pyaon::Hierarchy::get_io_size) 178 | .def("get_io_type", &pyaon::Hierarchy::get_io_type) 179 | .def("get_up_radius", &pyaon::Hierarchy::get_up_radius) 180 | .def("get_down_radius", &pyaon::Hierarchy::get_down_radius) 181 | .def("get_encoder_receptive_field", &pyaon::Hierarchy::get_encoder_receptive_field) 182 | .def("__copy__", 183 | [](const pyaon::Hierarchy &other) { 184 | return other; 185 | } 186 | ) 187 | .def("__deepcopy__", 188 | [](const pyaon::Hierarchy &other) { 189 | return other; 190 | } 191 | ); 192 | 193 | py::class_(m, "ImageVisibleLayerDesc") 194 | .def(py::init< 195 | std::tuple, 196 | int 197 | >(), 198 | py::arg("size") = std::tuple({ 5, 5, 16 }), 199 | py::arg("radius") = 4 200 | ) 201 | .def_readwrite("size", &pyaon::Image_Visible_Layer_Desc::size) 202 | .def_readwrite("radius", &pyaon::Image_Visible_Layer_Desc::radius); 203 | 204 | // bind params 205 | py::class_(m, "ImageEncoderParams") 206 | .def(py::init<>()) 207 | .def_readwrite("falloff", &aon::Image_Encoder::Params::falloff) 208 | .def_readwrite("lr", &aon::Image_Encoder::Params::lr) 209 | .def_readwrite("scale", &aon::Image_Encoder::Params::scale) 210 | .def_readwrite("rr", &aon::Image_Encoder::Params::rr) 211 | .def_readwrite("n_radius", &aon::Image_Encoder::Params::n_radius); 212 | 213 | py::class_(m, "ImageEncoder") 214 | .def(py::init< 215 | const std::tuple&, 216 | const std::vector&, 217 | const std::string&, 218 | const py::array_t& 219 | >(), 220 | py::arg("hidden_size") = std::tuple({ 5, 5, 16 }), 221 | py::arg("visible_layer_descs") = std::vector(), 222 | py::arg("file_name") = std::string(), 223 | py::arg("buffer") = py::array_t() 224 | ) 225 | .def_readwrite("params", &pyaon::Image_Encoder::params) 226 | .def("save_to_file", &pyaon::Image_Encoder::save_to_file) 227 | .def("set_state_from_buffer", &pyaon::Image_Encoder::set_state_from_buffer) 228 | .def("set_weights_from_buffer", &pyaon::Image_Encoder::set_weights_from_buffer) 229 | .def("serialize_to_buffer", &pyaon::Image_Encoder::serialize_to_buffer) 230 | .def("serialize_state_to_buffer", &pyaon::Image_Encoder::serialize_state_to_buffer) 231 | .def("serialize_weights_to_buffer", &pyaon::Image_Encoder::serialize_weights_to_buffer) 232 | .def("get_size", &pyaon::Image_Encoder::get_size) 233 | .def("get_state_size", &pyaon::Image_Encoder::get_state_size) 234 | .def("get_weights_size", &pyaon::Image_Encoder::get_weights_size) 235 | .def("step", &pyaon::Image_Encoder::step, 236 | py::arg("inputs"), 237 | py::arg("learn_enabled") = true, 238 | py::arg("learn_recon") = false 239 | ) 240 | .def("reconstruct", &pyaon::Image_Encoder::reconstruct) 241 | .def("get_num_visible_layers", &pyaon::Image_Encoder::get_num_visible_layers) 242 | .def("get_reconstruction", &pyaon::Image_Encoder::get_reconstruction) 243 | .def("get_hidden_cis", &pyaon::Image_Encoder::get_hidden_cis) 244 | .def("get_hidden_size", &pyaon::Image_Encoder::get_hidden_size) 245 | .def("get_visible_size", &pyaon::Image_Encoder::get_visible_size) 246 | .def("get_receptive_field", &pyaon::Image_Encoder::get_receptive_field) 247 | .def("__copy__", 248 | [](const pyaon::Image_Encoder &other) { 249 | return other; 250 | } 251 | ) 252 | .def("__deepcopy__", 253 | [](const pyaon::Image_Encoder &other) { 254 | return other; 255 | } 256 | ); 257 | } 258 | -------------------------------------------------------------------------------- /examples/env_runner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # ---------------------------------------------------------------------------- 4 | # PyAOgmaNeo 5 | # Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 6 | # 7 | # This copy of PyAOgmaNeo is licensed to you under the terms described 8 | # in the PYAOGMANEO_LICENSE.md file included in this distribution. 9 | # ---------------------------------------------------------------------------- 10 | 11 | import pyaogmaneo as neo 12 | import numpy as np 13 | import gymnasium as gym 14 | import tinyscaler 15 | import os 16 | import time 17 | 18 | def sigmoid(x): 19 | return np.tanh(x * 0.5) * 0.5 + 0.5 20 | 21 | input_type_none = neo.none 22 | input_type_prediction = neo.prediction 23 | input_type_action = neo.action 24 | 25 | class EnvRunner: 26 | def _handle_nodict_obs_space(self, obs_space, obs_resolution, hidden_size, image_scale, image_radius, key=None): 27 | match type(obs_space): 28 | case gym.spaces.Discrete: 29 | self.input_sizes.append((1, 1, obs_space.n)) 30 | self.input_types.append(input_type_none) 31 | self.input_lows.append([0.0]) 32 | self.input_highs.append([0.0]) 33 | self.input_encs.append(-1) 34 | case gym.spaces.multi_discrete: 35 | square_size = int(np.ceil(np.sqrt(len(obs_space.nvec)))) 36 | high = np.max(obs_space.nvec) 37 | 38 | self.input_sizes.append((square_size, square_size, high)) 39 | self.input_types.append(input_type_none) 40 | self.input_lows.append([0.0]) 41 | self.input_highs.append([0.0]) 42 | self.input_encs.append(-1) 43 | case gym.spaces.Box: 44 | match obs_space.shape: 45 | case (): 46 | return 47 | 48 | if len(obs_space.shape) == 1 or len(obs_space.shape) == 0: 49 | square_size = int(np.ceil(np.sqrt(len(obs_space.low)))) 50 | self.input_sizes.append((square_size, square_size, obs_resolution)) 51 | self.input_types.append(input_type_none) 52 | lows = obs_space.low 53 | highs = obs_space.high 54 | 55 | # detect large numbers/inf 56 | for i in range(len(lows)): 57 | if abs(lows[i]) > 10000 or abs(highs[i]) > 10000: 58 | # indicate inf by making low greater than high 59 | lows[i] = 1.0 60 | highs[i] = -1.0 61 | 62 | self.input_lows.append(lows) 63 | self.input_highs.append(highs) 64 | self.input_encs.append(-1) 65 | elif len(obs_space.shape) == 2 or len(obs_space.shape) == 3: 66 | scaled_size = (int(obs_space.shape[0] * image_scale), int(obs_space.shape[1] * image_scale), 1 if len(obs_space.shape) == 2 else obs_space.shape[2]) 67 | 68 | self.image_sizes.append(scaled_size) 69 | 70 | image_enc = neo.ImageEncoder(hidden_size, [neo.ImageVisibleLayerDesc(scaled_size, image_radius)]) 71 | 72 | self.input_sizes.append(hidden_size) 73 | self.input_types.append(input_type_none) 74 | self.input_lows.append([0.0]) 75 | self.input_highs.append([1.0]) 76 | self.input_encs.append(len(self.image_encs)) 77 | 78 | self.image_encs.append(image_enc) 79 | else: 80 | raise Exception("unsupported Box input: dimensions too high " + str(obs_space.shape)) 81 | case _: 82 | raise Exception("unsupported input type " + str(type(obs_space))) 83 | 84 | self.input_keys.append(key) 85 | 86 | def __init__(self, env, layer_sizes=2 * [(5, 5, 64)], 87 | num_dendrites_per_cell=4, 88 | input_radius=2, layer_radius=2, hidden_size=(10, 10, 16), 89 | image_radius=8, image_scale=0.5, obs_resolution=16, action_resolution=16, action_importance=0.5, 90 | reward_scale=1.0, terminal_reward=0.0, inf_sensitivity=2.0, n_threads=4 91 | ): 92 | self.env = env 93 | 94 | neo.set_num_threads(n_threads) 95 | neo.set_global_state(int(time.time())) 96 | 97 | self.im_enc = None 98 | self.im_enc_index = -1 99 | 100 | self.input_sizes = [] 101 | self.input_lows = [] 102 | self.input_highs = [] 103 | self.input_types = [] 104 | self.input_keys = [] 105 | self.input_encs = [] 106 | self.image_encs = [] 107 | self.image_sizes = [] 108 | self.action_indices = [] 109 | 110 | self.reward_scale = reward_scale 111 | self.terminal_reward = terminal_reward 112 | 113 | self.inf_sensitivity = inf_sensitivity 114 | 115 | obs_space = env.observation_space 116 | 117 | if type(obs_space) is gym.spaces.Dict: 118 | for key, value in obs_space.items(): 119 | self._handle_nodict_obs_space(value, obs_resolution, hidden_size, image_scale, image_radius, key=key) 120 | else: 121 | self._handle_nodict_obs_space(obs_space, obs_resolution, hidden_size, image_scale, image_radius) 122 | 123 | # actions 124 | if type(self.env.action_space) is gym.spaces.Discrete: 125 | self.action_indices.append(len(self.input_sizes)) 126 | self.input_sizes.append((1, 1, self.env.action_space.n)) 127 | self.input_types.append(input_type_action) 128 | self.input_lows.append([0.0]) 129 | self.input_highs.append([0.0]) 130 | self.input_encs.append(-1) 131 | self.input_keys.append(None) 132 | elif type(self.env.action_space) is gym.spaces.multi_discrete: 133 | square_size = int(np.ceil(np.sqrt(len(self.env.action_space.nvec)))) 134 | high = np.max(self.env.action_space.nvec) 135 | 136 | self.action_indices.append(len(self.input_sizes)) 137 | self.input_sizes.append((square_size, square_size, high)) 138 | self.input_types.append(input_type_action) 139 | self.input_lows.append([0.0]) 140 | self.input_highs.append([0.0]) 141 | self.input_encs.append(-1) 142 | self.input_keys.append(None) 143 | elif type(self.env.action_space) is gym.spaces.Box: 144 | if len(self.env.action_space.shape) < 3: 145 | if len(self.env.action_space.shape) == 2: 146 | self.action_indices.append(len(self.input_sizes)) 147 | self.input_sizes.append((self.env.action_space.shape[0], self.env.action_space.shape[1], action_resolution)) 148 | self.input_types.append(input_type_action) 149 | self.input_keys.append(None) 150 | lows = self.env.action_space.low 151 | highs = self.env.action_space.high 152 | 153 | self.input_lows.append(lows) 154 | self.input_highs.append(highs) 155 | self.input_encs.append(-1) 156 | else: 157 | square_size = int(np.ceil(np.sqrt(len(self.env.action_space.low)))) 158 | self.action_indices.append(len(self.input_sizes)) 159 | self.input_sizes.append((square_size, square_size, action_resolution)) 160 | self.input_types.append(input_type_action) 161 | self.input_keys.append(None) 162 | lows = self.env.action_space.low 163 | highs = self.env.action_space.high 164 | 165 | self.input_lows.append(lows) 166 | self.input_highs.append(highs) 167 | self.input_encs.append(-1) 168 | else: 169 | raise Exception("unsupported Box action: dimensions too high " + str(self.env.action_space.shape)) 170 | else: 171 | raise Exception("unsupported action type " + str(type(self.env.action_space))) 172 | 173 | lds = [] 174 | 175 | for i in range(len(layer_sizes)): 176 | ld = neo.LayerDesc() 177 | 178 | ld.hidden_size = layer_sizes[i] 179 | ld.num_dendrites_per_cell = num_dendrites_per_cell 180 | ld.up_radius = layer_radius 181 | ld.down_radius = layer_radius 182 | 183 | lds.append(ld) 184 | 185 | io_descs = [] 186 | 187 | for i in range(len(self.input_sizes)): 188 | io_descs.append(neo.IODesc(self.input_sizes[i], self.input_types[i], num_dendrites_per_cell=num_dendrites_per_cell, up_radius=input_radius, down_radius=layer_radius)) 189 | 190 | self.h = neo.Hierarchy(io_descs, lds) 191 | 192 | self.actions = [] 193 | 194 | for i in range(len(self.action_indices)): 195 | index = self.action_indices[i] 196 | 197 | self.h.params.ios[index].importance = action_importance 198 | 199 | size = self.h.get_io_size(index)[0] * self.h.get_io_size(index)[1] 200 | 201 | start_act = [] 202 | 203 | for _ in range(size): 204 | start_act.append(np.random.randint(0, self.input_sizes[index][2])) 205 | 206 | self.actions.append(start_act) 207 | 208 | self.actions = np.array(self.actions, np.int32) 209 | 210 | self.obs_space = obs_space 211 | 212 | self.learn_enabled = True 213 | 214 | def _feed_observation(self, obs): 215 | self.inputs = [] 216 | 217 | action_index = 0 218 | image_enc_index = 0 219 | 220 | for i in range(len(self.input_sizes)): 221 | sub_obs = obs 222 | 223 | if self.input_keys[i] is not None: 224 | sub_obs = sub_obs[self.input_keys[i]] 225 | 226 | if self.input_types[i] == input_type_action: 227 | self.inputs.append(self.actions[action_index]) 228 | 229 | action_index += 1 230 | elif self.input_encs[i] != -1: 231 | # format image 232 | img = tinyscaler.scale((sub_obs - self.input_lows[i]) / (self.input_highs[i][0] - self.input_lows[i][0]), 233 | (self.image_sizes[image_enc_index][1], self.image_sizes[image_enc_index][0])) 234 | 235 | # encode image 236 | self.image_encs[image_enc_index].step([img.astype(np.uint8).ravel()], True) 237 | 238 | self.inputs.append(self.image_encs[image_enc_index].get_hidden_cis()) 239 | 240 | image_enc_index += 1 241 | else: 242 | sub_obs = sub_obs.ravel() 243 | 244 | indices = [] 245 | 246 | for j in range(len(self.input_lows[i])): 247 | if self.input_lows[i][j] < self.input_highs[i][j]: 248 | # rescale 249 | #indices.append(int(min(1.0, max(0.0, (sub_obs[j] - self.input_lows[i][j]) / (self.input_highs[i][j] - self.input_lows[i][j]))) * (self.input_sizes[i][2] - 1) + 0.5)) 250 | indices.append(int(sigmoid(sub_obs[j] * self.inf_sensitivity) * (self.input_sizes[i][2] - 1) + 0.5)) 251 | elif self.input_lows[i][j] > self.input_highs[i][j]: # Inf 252 | # Rescale 253 | indices.append(int(sigmoid(sub_obs[j] * self.inf_sensitivity) * (self.input_sizes[i][2] - 1) + 0.5)) 254 | else: 255 | if type(self.env.observation_space) is gym.spaces.multi_discrete: 256 | indices.append(int(sub_obs[j]) % self.sub_obs_space.nvec[j]) 257 | else: 258 | indices.append(int(sub_obs[j])) 259 | 260 | if len(indices) < self.input_sizes[i][0] * self.input_sizes[i][1]: 261 | indices += ((self.input_sizes[i][0] * self.input_sizes[i][1]) - len(indices)) * [int(0)] 262 | 263 | self.inputs.append(np.array(indices, dtype=np.int32)) 264 | 265 | def act(self, epsilon=0.02, obs_preprocess=None): 266 | feed_actions = [] 267 | 268 | for i in range(len(self.action_indices)): 269 | index = self.action_indices[i] 270 | 271 | assert(self.input_types[index] == input_type_action) 272 | 273 | if self.input_lows[index][0] < self.input_highs[index][0]: 274 | feed_action = [] 275 | 276 | # explore 277 | for j in range(len(self.input_lows[index])): 278 | if np.random.rand() < epsilon: 279 | self.actions[i][j] = np.random.randint(0, self.input_sizes[index][2]) 280 | 281 | if self.input_lows[index][j] < self.input_highs[index][j]: 282 | feed_action.append(self.actions[i][j] / float(self.input_sizes[index][2] - 1) * (self.input_highs[index][j] - self.input_lows[index][j]) + self.input_lows[index][j]) 283 | else: 284 | feed_action.append(self.actions[i][j]) 285 | 286 | feed_actions.append(feed_action) 287 | else: 288 | if type(self.env.action_space) is gym.spaces.multi_discrete: 289 | for j in range(len(self.env.action_space.nvec)): 290 | if np.random.rand() < epsilon: 291 | self.actions[i][j] = np.random.randint(0, self.input_sizes[index][2]) 292 | 293 | feed_actions.append(int(self.actions[i][j])) 294 | else: 295 | if np.random.rand() < epsilon: 296 | self.actions[i][0] = np.random.randint(0, self.input_sizes[index][2]) 297 | 298 | feed_actions.append(int(self.actions[i][0])) 299 | 300 | # remove outer array if needed 301 | if len(feed_actions) == 1: 302 | feed_actions = feed_actions[0] 303 | 304 | obs, reward, term, trunc, info = self.env.step(feed_actions) 305 | 306 | if obs_preprocess is not None: 307 | obs = obs_preprocess(obs) 308 | 309 | if type(obs) is not np.array: 310 | obs = np.array(obs) 311 | 312 | self._feed_observation(obs) 313 | 314 | r = reward * self.reward_scale + float(term) * self.terminal_reward 315 | 316 | start_time = time.perf_counter() 317 | 318 | self.h.step(self.inputs, self.learn_enabled, r) 319 | 320 | end_time = time.perf_counter() 321 | 322 | #if term or trunc: 323 | # print((end_time - start_time) * 1000.0) 324 | 325 | # retrieve actions 326 | for i in range(len(self.action_indices)): 327 | index = self.action_indices[i] 328 | 329 | assert self.input_types[index] == input_type_action 330 | 331 | self.actions[i] = self.h.get_prediction_cis(index) 332 | 333 | return term or trunc, reward 334 | -------------------------------------------------------------------------------- /source/pyaogmaneo/py_hierarchy.cpp: -------------------------------------------------------------------------------- 1 | // ---------------------------------------------------------------------------- 2 | // PyAOgmaNeo 3 | // Copyright(c) 2020-2025 Ogma Intelligent Systems Corp. All rights reserved. 4 | // 5 | // This copy of PyAOgmaNeo is licensed to you under the terms described 6 | // in the PYAOGMANEO_LICENSE.md file included in this distribution. 7 | // ---------------------------------------------------------------------------- 8 | 9 | #include "py_hierarchy.h" 10 | 11 | using namespace pyaon; 12 | 13 | void IO_Desc::check_in_range() const { 14 | if (std::get<0>(size) < 1) 15 | throw std::runtime_error("error: size[0] < 1 is not allowed!"); 16 | 17 | if (std::get<1>(size) < 1) 18 | throw std::runtime_error("error: size[1] < 1 is not allowed!"); 19 | 20 | if (std::get<2>(size) < 1) 21 | throw std::runtime_error("error: size[2] < 1 is not allowed!"); 22 | 23 | if (num_dendrites_per_cell < 1) 24 | throw std::runtime_error("error: num_dendrites_per_cell < 1 is not allowed!"); 25 | 26 | if (up_radius < 0) 27 | throw std::runtime_error("error: up_radius < 0 is not allowed!"); 28 | 29 | if (down_radius < 0) 30 | throw std::runtime_error("error: down_radius < 0 is not allowed!"); 31 | 32 | if (value_size < 2) 33 | throw std::runtime_error("error: value_size < 2 is not allowed!"); 34 | 35 | if (value_num_dendrites_per_cell < 1) 36 | throw std::runtime_error("error: value_num_dendrites_per_cell < 1 is not allowed!"); 37 | 38 | if (history_capacity < 2) 39 | throw std::runtime_error("error: history_capacity < 2 is not allowed!"); 40 | } 41 | 42 | void Layer_Desc::check_in_range() const { 43 | if (std::get<0>(hidden_size) < 1) 44 | throw std::runtime_error("error: hidden_size[0] < 1 is not allowed!"); 45 | 46 | if (std::get<1>(hidden_size) < 1) 47 | throw std::runtime_error("error: hidden_size[1] < 1 is not allowed!"); 48 | 49 | if (std::get<2>(hidden_size) < 1) 50 | throw std::runtime_error("error: hidden_size[2] < 1 is not allowed!"); 51 | 52 | if (num_dendrites_per_cell < 1) 53 | throw std::runtime_error("error: num_dendrites_per_cell < 1 is not allowed!"); 54 | 55 | if (up_radius < 0) 56 | throw std::runtime_error("error: up_radius < 0 is not allowed!"); 57 | 58 | if (recurrent_radius < -1) 59 | throw std::runtime_error("error: recurrent_radius < -1 is not allowed!"); 60 | 61 | if (down_radius < 0) 62 | throw std::runtime_error("error: down_radius < 0 is not allowed!"); 63 | } 64 | 65 | Hierarchy::Hierarchy( 66 | const std::vector &io_descs, 67 | const std::vector &layer_descs, 68 | const std::string &file_name, 69 | const py::array_t &buffer 70 | ) { 71 | if (buffer.unchecked().size() > 0) 72 | init_from_buffer(buffer); 73 | else if (!file_name.empty()) 74 | init_from_file(file_name); 75 | else { 76 | if (io_descs.empty() || layer_descs.empty()) 77 | throw std::runtime_error("error: Hierarchy constructor requires some non-empty arguments!"); 78 | 79 | init_random(io_descs, layer_descs); 80 | } 81 | 82 | // copy params 83 | params.ios.resize(h.get_num_io()); 84 | 85 | for (int i = 0; i < h.get_num_io(); i++) 86 | params.ios[i] = h.params.ios[i]; 87 | 88 | // copy params 89 | params.layers.resize(h.get_num_layers()); 90 | 91 | for (int l = 0; l < h.get_num_layers(); l++) 92 | params.layers[l] = h.params.layers[l]; 93 | 94 | params.anticipation = h.params.anticipation; 95 | 96 | c_input_cis_backing.resize(h.get_num_io()); 97 | c_input_cis.resize(h.get_num_io()); 98 | 99 | for (int i = 0; i < c_input_cis_backing.size(); i++) 100 | c_input_cis_backing[i].resize(h.get_io_size(i).x * h.get_io_size(i).y); 101 | } 102 | 103 | void Hierarchy::init_random( 104 | const std::vector &io_descs, 105 | const std::vector &layer_descs 106 | ) { 107 | aon::Array c_io_descs(io_descs.size()); 108 | 109 | for (int i = 0; i < io_descs.size(); i++) { 110 | io_descs[i].check_in_range(); 111 | 112 | c_io_descs[i] = aon::Hierarchy::IO_Desc( 113 | aon::Int3(std::get<0>(io_descs[i].size), std::get<1>(io_descs[i].size), std::get<2>(io_descs[i].size)), 114 | static_cast(io_descs[i].type), 115 | io_descs[i].num_dendrites_per_cell, 116 | io_descs[i].up_radius, 117 | io_descs[i].down_radius, 118 | io_descs[i].value_size, 119 | io_descs[i].value_num_dendrites_per_cell, 120 | io_descs[i].history_capacity 121 | ); 122 | } 123 | 124 | aon::Array c_layer_descs(layer_descs.size()); 125 | 126 | for (int l = 0; l < layer_descs.size(); l++) { 127 | layer_descs[l].check_in_range(); 128 | 129 | c_layer_descs[l] = aon::Hierarchy::Layer_Desc( 130 | aon::Int3(std::get<0>(layer_descs[l].hidden_size), std::get<1>(layer_descs[l].hidden_size), std::get<2>(layer_descs[l].hidden_size)), 131 | layer_descs[l].num_dendrites_per_cell, 132 | layer_descs[l].up_radius, 133 | layer_descs[l].recurrent_radius, 134 | layer_descs[l].down_radius 135 | ); 136 | } 137 | 138 | h.init_random(c_io_descs, c_layer_descs); 139 | } 140 | 141 | void Hierarchy::init_from_file( 142 | const std::string &file_name 143 | ) { 144 | File_Reader reader; 145 | reader.ins.open(file_name, std::ios::binary); 146 | 147 | h.read(reader); 148 | } 149 | 150 | void Hierarchy::init_from_buffer( 151 | const py::array_t &buffer 152 | ) { 153 | Buffer_Reader reader; 154 | reader.buffer = &buffer; 155 | 156 | h.read(reader); 157 | } 158 | 159 | void Hierarchy::save_to_file( 160 | const std::string &file_name 161 | ) { 162 | File_Writer writer; 163 | writer.outs.open(file_name, std::ios::binary); 164 | 165 | h.write(writer); 166 | } 167 | 168 | void Hierarchy::set_state_from_buffer( 169 | const py::array_t &buffer 170 | ) { 171 | Buffer_Reader reader; 172 | reader.buffer = &buffer; 173 | 174 | h.read_state(reader); 175 | } 176 | 177 | void Hierarchy::set_weights_from_buffer( 178 | const py::array_t &buffer 179 | ) { 180 | Buffer_Reader reader; 181 | reader.buffer = &buffer; 182 | 183 | h.read_weights(reader); 184 | } 185 | 186 | py::array_t Hierarchy::serialize_to_buffer() { 187 | Buffer_Writer writer(h.size() + sizeof(int)); 188 | 189 | h.write(writer); 190 | 191 | return writer.buffer; 192 | } 193 | 194 | py::array_t Hierarchy::serialize_state_to_buffer() { 195 | Buffer_Writer writer(h.state_size()); 196 | 197 | h.write_state(writer); 198 | 199 | return writer.buffer; 200 | } 201 | 202 | py::array_t Hierarchy::serialize_weights_to_buffer() { 203 | Buffer_Writer writer(h.weights_size()); 204 | 205 | h.write_weights(writer); 206 | 207 | return writer.buffer; 208 | } 209 | 210 | void Hierarchy::step( 211 | const std::vector> &input_cis, 212 | bool learn_enabled, 213 | float reward, 214 | float mimic 215 | ) { 216 | if (input_cis.size() != h.get_num_io()) 217 | throw std::runtime_error("incorrect number of input_cis passed to step! received " + std::to_string(input_cis.size()) + ", need " + std::to_string(h.get_num_io())); 218 | 219 | copy_params_to_h(); 220 | 221 | for (int i = 0; i < input_cis.size(); i++) { 222 | auto view = input_cis[i].unchecked(); 223 | 224 | int num_columns = h.get_io_size(i).x * h.get_io_size(i).y; 225 | 226 | if (view.size() != num_columns) 227 | throw std::runtime_error("incorrect csdr size at index " + std::to_string(i) + " - expected " + std::to_string(num_columns) + " columns, got " + std::to_string(view.size())); 228 | 229 | for (int j = 0; j < view.size(); j++) { 230 | if (view(j) < 0 || view(j) >= h.get_io_size(i).z) 231 | throw std::runtime_error("input csdr at input index " + std::to_string(i) + " has an out-of-bounds column index (" + std::to_string(view(j)) + ") at column index " + std::to_string(j) + ". it must be in the range [0, " + std::to_string(h.get_io_size(i).z - 1) + "]"); 232 | 233 | c_input_cis_backing[i][j] = view(j); 234 | } 235 | 236 | c_input_cis[i] = c_input_cis_backing[i]; 237 | } 238 | 239 | h.step(c_input_cis, learn_enabled, reward, mimic); 240 | } 241 | 242 | py::array_t Hierarchy::get_prediction_cis( 243 | int i 244 | ) const { 245 | if (i < 0 || i >= h.get_num_io()) 246 | throw std::runtime_error("prediction index " + std::to_string(i) + " out of range [0, " + std::to_string(h.get_num_io() - 1) + "]!"); 247 | 248 | if (!h.io_layer_exists(i) || h.get_io_type(i) == aon::none) 249 | throw std::runtime_error("no decoder exists at index " + std::to_string(i) + " - did you set it to the correct type?"); 250 | 251 | py::array_t predictions(h.get_prediction_cis(i).size()); 252 | 253 | auto view = predictions.mutable_unchecked(); 254 | 255 | for (int j = 0; j < view.size(); j++) 256 | view(j) = h.get_prediction_cis(i)[j]; 257 | 258 | return predictions; 259 | } 260 | 261 | py::array_t Hierarchy::get_layer_prediction_cis( 262 | int l 263 | ) const { 264 | if (l < 1 || l >= h.get_num_layers()) 265 | throw std::runtime_error("layer index " + std::to_string(l) + " out of range [1, " + std::to_string(h.get_num_layers() - 1) + "]!"); 266 | 267 | const aon::Int_Buffer &cis = h.get_decoder(l, 0).get_hidden_cis(); 268 | 269 | py::array_t predictions(cis.size()); 270 | 271 | auto view = predictions.mutable_unchecked(); 272 | 273 | for (int j = 0; j < view.size(); j++) 274 | view(j) = cis[j]; 275 | 276 | return predictions; 277 | } 278 | 279 | py::array_t Hierarchy::get_prediction_acts( 280 | int i 281 | ) const { 282 | if (i < 0 || i >= h.get_num_io()) 283 | throw std::runtime_error("prediction index " + std::to_string(i) + " out of range [0, " + std::to_string(h.get_num_io() - 1) + "]!"); 284 | 285 | if (!h.io_layer_exists(i) || h.get_io_type(i) == aon::none) 286 | throw std::runtime_error("no decoder or actor exists at index " + std::to_string(i) + " - did you set it to the correct type?"); 287 | 288 | py::array_t predictions(h.get_prediction_acts(i).size()); 289 | 290 | auto view = predictions.mutable_unchecked(); 291 | 292 | for (int j = 0; j < view.size(); j++) 293 | view(j) = h.get_prediction_acts(i)[j]; 294 | 295 | return predictions; 296 | } 297 | 298 | py::array_t Hierarchy::sample_prediction( 299 | int i, 300 | float temperature 301 | ) const { 302 | if (temperature == 0.0f) 303 | return get_prediction_cis(i); 304 | 305 | if (i < 0 || i >= h.get_num_io()) 306 | throw std::runtime_error("prediction index " + std::to_string(i) + " out of range [0, " + std::to_string(h.get_num_io() - 1) + "]!"); 307 | 308 | if (!h.io_layer_exists(i) || h.get_io_type(i) == aon::none) 309 | throw std::runtime_error("no decoder or actor exists at index " + std::to_string(i) + " - did you set it to the correct type?"); 310 | 311 | py::array_t sample(h.get_prediction_cis(i).size()); 312 | 313 | auto view = sample.mutable_unchecked(); 314 | 315 | int size_z = h.get_io_size(i).z; 316 | 317 | float temperature_inv = 1.0f / temperature; 318 | 319 | for (int j = 0; j < view.size(); j++) { 320 | float total = 0.0f; 321 | 322 | for (int k = 0; k < size_z; k++) 323 | total += aon::powf(h.get_prediction_acts(i)[k + j * size_z], temperature_inv); 324 | 325 | float cusp = aon::randf() * total; 326 | 327 | float sum_so_far = 0.0f; 328 | 329 | for (int k = 0; k < size_z; k++) { 330 | sum_so_far += aon::powf(h.get_prediction_acts(i)[k + j * size_z], temperature_inv); 331 | 332 | if (sum_so_far >= cusp) { 333 | view(j) = k; 334 | 335 | break; 336 | } 337 | } 338 | } 339 | 340 | return sample; 341 | } 342 | 343 | py::array_t Hierarchy::get_hidden_cis( 344 | int l 345 | ) { 346 | if (l < 0 || l >= h.get_num_layers()) 347 | throw std::runtime_error("error: " + std::to_string(l) + " is not a valid layer index!"); 348 | 349 | py::array_t hidden_cis(h.get_encoder(l).get_hidden_cis().size()); 350 | 351 | auto view = hidden_cis.mutable_unchecked(); 352 | 353 | for (int j = 0; j < view.size(); j++) 354 | view(j) = h.get_encoder(l).get_hidden_cis()[j]; 355 | 356 | return hidden_cis; 357 | } 358 | 359 | void Hierarchy::copy_params_to_h() { 360 | if (params.ios.size() != h.params.ios.size()) 361 | throw std::runtime_error("ios parameter size mismatch - did you modify the length of params.ios?"); 362 | 363 | if (params.layers.size() != h.params.layers.size()) 364 | throw std::runtime_error("layers parameter size mismatch - did you modify the length of params.layers?"); 365 | 366 | // copy params 367 | for (int i = 0; i < params.ios.size(); i++) 368 | h.params.ios[i] = params.ios[i]; 369 | 370 | // copy params 371 | for (int l = 0; l < params.layers.size(); l++) 372 | h.params.layers[l] = params.layers[l]; 373 | 374 | h.params.anticipation = params.anticipation; 375 | } 376 | 377 | std::tuple, std::tuple> Hierarchy::get_encoder_receptive_field( 378 | int l, 379 | int vli, 380 | const std::tuple &pos 381 | ) { 382 | if (l < 0 || l >= h.get_num_layers()) 383 | throw std::runtime_error("layer index " + std::to_string(l) + " out of range [0, " + std::to_string(h.get_num_layers() - 1) + "]!"); 384 | 385 | const aon::Encoder &enc = h.get_encoder(l); 386 | 387 | int num_visible_layers = enc.get_num_visible_layers(); 388 | 389 | if (vli < 0 || vli >= num_visible_layers) 390 | throw std::runtime_error("visible layer index " + std::to_string(vli) + " out of range [0, " + std::to_string(num_visible_layers - 1) + "]!"); 391 | 392 | const aon::Int3 &hidden_size = enc.get_hidden_size(); 393 | 394 | if (std::get<0>(pos) < 0 || std::get<0>(pos) >= hidden_size.x || 395 | std::get<1>(pos) < 0 || std::get<1>(pos) >= hidden_size.y || 396 | std::get<2>(pos) < 0 || std::get<2>(pos) >= hidden_size.z) { 397 | throw std::runtime_error("position (" + std::to_string(std::get<0>(pos)) + ", " + std::to_string(std::get<1>(pos)) + ", " + std::to_string(std::get<2>(pos)) + ") " + 398 | + " not in size (" + std::to_string(hidden_size.x) + ", " + std::to_string(hidden_size.y) + ", " + std::to_string(hidden_size.z) + ")!"); 399 | } 400 | 401 | const aon::Encoder::Visible_Layer &vl = enc.get_visible_layer(vli); 402 | const aon::Encoder::Visible_Layer_Desc &vld = enc.get_visible_layer_desc(vli); 403 | 404 | int diam = vld.radius * 2 + 1; 405 | int area = diam * diam; 406 | 407 | aon::Int2 column_pos(std::get<0>(pos), std::get<1>(pos)); 408 | 409 | int hidden_column_index = aon::address2(column_pos, aon::Int2(hidden_size.x, hidden_size.y)); 410 | int hidden_cells_start = hidden_size.z * hidden_column_index; 411 | 412 | // projection 413 | aon::Float2 h_to_v = aon::Float2(static_cast(vld.size.x) / static_cast(hidden_size.x), 414 | static_cast(vld.size.y) / static_cast(hidden_size.y)); 415 | 416 | aon::Int2 visible_center = project(column_pos, h_to_v); 417 | 418 | // lower corner 419 | aon::Int2 field_lower_bound(visible_center.x - vld.radius, visible_center.y - vld.radius); 420 | 421 | // bounds of receptive field, clamped to input size 422 | aon::Int2 iter_lower_bound(aon::max(0, field_lower_bound.x), aon::max(0, field_lower_bound.y)); 423 | aon::Int2 iter_upper_bound(aon::min(vld.size.x - 1, visible_center.x + vld.radius), aon::min(vld.size.y - 1, visible_center.y + vld.radius)); 424 | 425 | int field_count = area * vld.size.z; 426 | 427 | py::array_t field(field_count); 428 | 429 | auto view = field.mutable_unchecked(); 430 | 431 | // first clear 432 | for (int i = 0; i < field_count; i++) 433 | view(i) = 0; 434 | 435 | for (int ix = iter_lower_bound.x; ix <= iter_upper_bound.x; ix++) 436 | for (int iy = iter_lower_bound.y; iy <= iter_upper_bound.y; iy++) { 437 | int visible_column_index = address2(aon::Int2(ix, iy), aon::Int2(vld.size.x, vld.size.y)); 438 | 439 | aon::Int2 offset(ix - field_lower_bound.x, iy - field_lower_bound.y); 440 | 441 | for (int vc = 0; vc < vld.size.z; vc++) { 442 | int wi = std::get<2>(pos) + hidden_size.z * (offset.y + diam * (offset.x + diam * (vc + vld.size.z * hidden_column_index))); 443 | 444 | view(vc + vld.size.z * (offset.y + diam * offset.x)) = vl.weights[wi]; 445 | } 446 | } 447 | 448 | std::tuple field_size(diam, diam, vld.size.z); 449 | 450 | return std::make_tuple(field, field_size); 451 | } 452 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 2 | Public License 3 | 4 | By exercising the Licensed Rights (defined below), You accept and agree 5 | to be bound by the terms and conditions of this Creative Commons 6 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 7 | ("Public License"). To the extent this Public License may be 8 | interpreted as a contract, You are granted the Licensed Rights in 9 | consideration of Your acceptance of these terms and conditions, and the 10 | Licensor grants You such rights in consideration of benefits the 11 | Licensor receives from making the Licensed Material available under 12 | these terms and conditions. 13 | 14 | 15 | Section 1 -- Definitions. 16 | 17 | a. Adapted Material means material subject to Copyright and Similar 18 | Rights that is derived from or based upon the Licensed Material 19 | and in which the Licensed Material is translated, altered, 20 | arranged, transformed, or otherwise modified in a manner requiring 21 | permission under the Copyright and Similar Rights held by the 22 | Licensor. For purposes of this Public License, where the Licensed 23 | Material is a musical work, performance, or sound recording, 24 | Adapted Material is always produced where the Licensed Material is 25 | synched in timed relation with a moving image. 26 | 27 | b. Adapter's License means the license You apply to Your Copyright 28 | and Similar Rights in Your contributions to Adapted Material in 29 | accordance with the terms and conditions of this Public License. 30 | 31 | c. BY-NC-SA Compatible License means a license listed at 32 | creativecommons.org/compatiblelicenses, approved by Creative 33 | Commons as essentially the equivalent of this Public License. 34 | 35 | d. Copyright and Similar Rights means copyright and/or similar rights 36 | closely related to copyright including, without limitation, 37 | performance, broadcast, sound recording, and Sui Generis Database 38 | Rights, without regard to how the rights are labeled or 39 | categorized. For purposes of this Public License, the rights 40 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 41 | Rights. 42 | 43 | e. Effective Technological Measures means those measures that, in the 44 | absence of proper authority, may not be circumvented under laws 45 | fulfilling obligations under Article 11 of the WIPO Copyright 46 | Treaty adopted on December 20, 1996, and/or similar international 47 | agreements. 48 | 49 | f. Exceptions and Limitations means fair use, fair dealing, and/or 50 | any other exception or limitation to Copyright and Similar Rights 51 | that applies to Your use of the Licensed Material. 52 | 53 | g. License Elements means the license attributes listed in the name 54 | of a Creative Commons Public License. The License Elements of this 55 | Public License are Attribution, NonCommercial, and ShareAlike. 56 | 57 | h. Licensed Material means the artistic or literary work, database, 58 | or other material to which the Licensor applied this Public 59 | License. 60 | 61 | i. Licensed Rights means the rights granted to You subject to the 62 | terms and conditions of this Public License, which are limited to 63 | all Copyright and Similar Rights that apply to Your use of the 64 | Licensed Material and that the Licensor has authority to license. 65 | 66 | j. Licensor means the individual(s) or entity(ies) granting rights 67 | under this Public License. 68 | 69 | k. NonCommercial means not primarily intended for or directed towards 70 | commercial advantage or monetary compensation. For purposes of 71 | this Public License, the exchange of the Licensed Material for 72 | other material subject to Copyright and Similar Rights by digital 73 | file-sharing or similar means is NonCommercial provided there is 74 | no payment of monetary compensation in connection with the 75 | exchange. 76 | 77 | l. Share means to provide material to the public by any means or 78 | process that requires permission under the Licensed Rights, such 79 | as reproduction, public display, public performance, distribution, 80 | dissemination, communication, or importation, and to make material 81 | available to the public including in ways that members of the 82 | public may access the material from a place and at a time 83 | individually chosen by them. 84 | 85 | m. Sui Generis Database Rights means rights other than copyright 86 | resulting from Directive 96/9/EC of the European Parliament and of 87 | the Council of 11 March 1996 on the legal protection of databases, 88 | as amended and/or succeeded, as well as other essentially 89 | equivalent rights anywhere in the world. 90 | 91 | n. You means the individual or entity exercising the Licensed Rights 92 | under this Public License. Your has a corresponding meaning. 93 | 94 | 95 | Section 2 -- Scope. 96 | 97 | a. License grant. 98 | 99 | 1. Subject to the terms and conditions of this Public License, 100 | the Licensor hereby grants You a worldwide, royalty-free, 101 | non-sublicensable, non-exclusive, irrevocable license to 102 | exercise the Licensed Rights in the Licensed Material to: 103 | 104 | a. reproduce and Share the Licensed Material, in whole or 105 | in part, for NonCommercial purposes only; and 106 | 107 | b. produce, reproduce, and Share Adapted Material for 108 | NonCommercial purposes only. 109 | 110 | 2. Exceptions and Limitations. For the avoidance of doubt, where 111 | Exceptions and Limitations apply to Your use, this Public 112 | License does not apply, and You do not need to comply with 113 | its terms and conditions. 114 | 115 | 3. Term. The term of this Public License is specified in Section 116 | 6(a). 117 | 118 | 4. Media and formats; technical modifications allowed. The 119 | Licensor authorizes You to exercise the Licensed Rights in 120 | all media and formats whether now known or hereafter created, 121 | and to make technical modifications necessary to do so. The 122 | Licensor waives and/or agrees not to assert any right or 123 | authority to forbid You from making technical modifications 124 | necessary to exercise the Licensed Rights, including 125 | technical modifications necessary to circumvent Effective 126 | Technological Measures. For purposes of this Public License, 127 | simply making modifications authorized by this Section 2(a) 128 | (4) never produces Adapted Material. 129 | 130 | 5. Downstream recipients. 131 | 132 | a. Offer from the Licensor -- Licensed Material. Every 133 | recipient of the Licensed Material automatically 134 | receives an offer from the Licensor to exercise the 135 | Licensed Rights under the terms and conditions of this 136 | Public License. 137 | 138 | b. Additional offer from the Licensor -- Adapted Material. 139 | Every recipient of Adapted Material from You 140 | automatically receives an offer from the Licensor to 141 | exercise the Licensed Rights in the Adapted Material 142 | under the conditions of the Adapter's License You apply. 143 | 144 | c. No downstream restrictions. You may not offer or impose 145 | any additional or different terms or conditions on, or 146 | apply any Effective Technological Measures to, the 147 | Licensed Material if doing so restricts exercise of the 148 | Licensed Rights by any recipient of the Licensed 149 | Material. 150 | 151 | 6. No endorsement. Nothing in this Public License constitutes or 152 | may be construed as permission to assert or imply that You 153 | are, or that Your use of the Licensed Material is, connected 154 | with, or sponsored, endorsed, or granted official status by, 155 | the Licensor or others designated to receive attribution as 156 | provided in Section 3(a)(1)(A)(i). 157 | 158 | b. Other rights. 159 | 160 | 1. Moral rights, such as the right of integrity, are not 161 | licensed under this Public License, nor are publicity, 162 | privacy, and/or other similar personality rights; however, to 163 | the extent possible, the Licensor waives and/or agrees not to 164 | assert any such rights held by the Licensor to the limited 165 | extent necessary to allow You to exercise the Licensed 166 | Rights, but not otherwise. 167 | 168 | 2. Patent and trademark rights are not licensed under this 169 | Public License. 170 | 171 | 3. To the extent possible, the Licensor waives any right to 172 | collect royalties from You for the exercise of the Licensed 173 | Rights, whether directly or through a collecting society 174 | under any voluntary or waivable statutory or compulsory 175 | licensing scheme. In all other cases the Licensor expressly 176 | reserves any right to collect such royalties, including when 177 | the Licensed Material is used other than for NonCommercial 178 | purposes. 179 | 180 | 181 | Section 3 -- License Conditions. 182 | 183 | Your exercise of the Licensed Rights is expressly made subject to the 184 | following conditions. 185 | 186 | a. Attribution. 187 | 188 | 1. If You Share the Licensed Material (including in modified 189 | form), You must: 190 | 191 | a. retain the following if it is supplied by the Licensor 192 | with the Licensed Material: 193 | 194 | i. identification of the creator(s) of the Licensed 195 | Material and any others designated to receive 196 | attribution, in any reasonable manner requested by 197 | the Licensor (including by pseudonym if 198 | designated); 199 | 200 | ii. a copyright notice; 201 | 202 | iii. a notice that refers to this Public License; 203 | 204 | iv. a notice that refers to the disclaimer of 205 | warranties; 206 | 207 | v. a URI or hyperlink to the Licensed Material to the 208 | extent reasonably practicable; 209 | 210 | b. indicate if You modified the Licensed Material and 211 | retain an indication of any previous modifications; and 212 | 213 | c. indicate the Licensed Material is licensed under this 214 | Public License, and include the text of, or the URI or 215 | hyperlink to, this Public License. 216 | 217 | 2. You may satisfy the conditions in Section 3(a)(1) in any 218 | reasonable manner based on the medium, means, and context in 219 | which You Share the Licensed Material. For example, it may be 220 | reasonable to satisfy the conditions by providing a URI or 221 | hyperlink to a resource that includes the required 222 | information. 223 | 3. If requested by the Licensor, You must remove any of the 224 | information required by Section 3(a)(1)(A) to the extent 225 | reasonably practicable. 226 | 227 | b. ShareAlike. 228 | 229 | In addition to the conditions in Section 3(a), if You Share 230 | Adapted Material You produce, the following conditions also apply. 231 | 232 | 1. The Adapter's License You apply must be a Creative Commons 233 | license with the same License Elements, this version or 234 | later, or a BY-NC-SA Compatible License. 235 | 236 | 2. You must include the text of, or the URI or hyperlink to, the 237 | Adapter's License You apply. You may satisfy this condition 238 | in any reasonable manner based on the medium, means, and 239 | context in which You Share Adapted Material. 240 | 241 | 3. You may not offer or impose any additional or different terms 242 | or conditions on, or apply any Effective Technological 243 | Measures to, Adapted Material that restrict exercise of the 244 | rights granted under the Adapter's License You apply. 245 | 246 | 247 | Section 4 -- Sui Generis Database Rights. 248 | 249 | Where the Licensed Rights include Sui Generis Database Rights that 250 | apply to Your use of the Licensed Material: 251 | 252 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 253 | to extract, reuse, reproduce, and Share all or a substantial 254 | portion of the contents of the database for NonCommercial purposes 255 | only; 256 | 257 | b. if You include all or a substantial portion of the database 258 | contents in a database in which You have Sui Generis Database 259 | Rights, then the database in which You have Sui Generis Database 260 | Rights (but not its individual contents) is Adapted Material, 261 | including for purposes of Section 3(b); and 262 | 263 | c. You must comply with the conditions in Section 3(a) if You Share 264 | all or a substantial portion of the contents of the database. 265 | 266 | For the avoidance of doubt, this Section 4 supplements and does not 267 | replace Your obligations under this Public License where the Licensed 268 | Rights include other Copyright and Similar Rights. 269 | 270 | 271 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 272 | 273 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 274 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 275 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 276 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 277 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 278 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 279 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 280 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 281 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 282 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 283 | 284 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 285 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 286 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 287 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 288 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 289 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 290 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 291 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 292 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 293 | 294 | c. The disclaimer of warranties and limitation of liability provided 295 | above shall be interpreted in a manner that, to the extent 296 | possible, most closely approximates an absolute disclaimer and 297 | waiver of all liability. 298 | 299 | 300 | Section 6 -- Term and Termination. 301 | 302 | a. This Public License applies for the term of the Copyright and 303 | Similar Rights licensed here. However, if You fail to comply with 304 | this Public License, then Your rights under this Public License 305 | terminate automatically. 306 | 307 | b. Where Your right to use the Licensed Material has terminated under 308 | Section 6(a), it reinstates: 309 | 310 | 1. automatically as of the date the violation is cured, provided 311 | it is cured within 30 days of Your discovery of the 312 | violation; or 313 | 314 | 2. upon express reinstatement by the Licensor. 315 | 316 | For the avoidance of doubt, this Section 6(b) does not affect any 317 | right the Licensor may have to seek remedies for Your violations 318 | of this Public License. 319 | 320 | c. For the avoidance of doubt, the Licensor may also offer the 321 | Licensed Material under separate terms or conditions or stop 322 | distributing the Licensed Material at any time; however, doing so 323 | will not terminate this Public License. 324 | 325 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 326 | License. 327 | 328 | 329 | Section 7 -- Other Terms and Conditions. 330 | 331 | a. The Licensor shall not be bound by any additional or different 332 | terms or conditions communicated by You unless expressly agreed. 333 | 334 | b. Any arrangements, understandings, or agreements regarding the 335 | Licensed Material not stated herein are separate from and 336 | independent of the terms and conditions of this Public License. 337 | 338 | 339 | Section 8 -- Interpretation. 340 | 341 | a. For the avoidance of doubt, this Public License does not, and 342 | shall not be interpreted to, reduce, limit, restrict, or impose 343 | conditions on any use of the Licensed Material that could lawfully 344 | be made without permission under this Public License. 345 | 346 | b. To the extent possible, if any provision of this Public License is 347 | deemed unenforceable, it shall be automatically reformed to the 348 | minimum extent necessary to make it enforceable. If the provision 349 | cannot be reformed, it shall be severed from this Public License 350 | without affecting the enforceability of the remaining terms and 351 | conditions. 352 | 353 | c. No term or condition of this Public License will be waived and no 354 | failure to comply consented to unless expressly agreed to by the 355 | Licensor. 356 | 357 | d. Nothing in this Public License constitutes or may be interpreted 358 | as a limitation upon, or waiver of, any privileges and immunities 359 | that apply to the Licensor or You, including from the legal 360 | processes of any jurisdiction or authority. 361 | 362 | ======================================================================= 363 | 364 | Creative Commons is not a party to its public 365 | licenses. Notwithstanding, Creative Commons may elect to apply one of 366 | its public licenses to material it publishes and in those instances 367 | will be considered the “Licensor.” The text of the Creative Commons 368 | public licenses is dedicated to the public domain under the CC0 Public 369 | Domain Dedication. Except for the limited purpose of indicating that 370 | material is shared under a Creative Commons public license or as 371 | otherwise permitted by the Creative Commons policies published at 372 | creativecommons.org/policies, Creative Commons does not authorize the 373 | use of the trademark "Creative Commons" or any other trademark or logo 374 | of Creative Commons without its prior written consent including, 375 | without limitation, in connection with any unauthorized modifications 376 | to any of its public licenses or any other arrangements, 377 | understandings, or agreements concerning use of licensed material. For 378 | the avoidance of doubt, this paragraph does not form part of the 379 | public licenses. 380 | 381 | Creative Commons may be contacted at creativecommons.org. 382 | --------------------------------------------------------------------------------