├── test ├── __init__.py ├── config │ ├── database_global.json │ └── database_config.json ├── test_port_util.py ├── sonic_db_dump_load_test.py └── test_moduleLoad.py ├── setup.cfg ├── pytest.ini ├── .github ├── codeql │ └── codeql-config.yml └── workflows │ ├── semgrep.yml │ └── codeql-analysis.yml ├── MAINTAINERS ├── LICENSE ├── src └── swsssdk │ ├── __init__.py │ ├── config │ └── database_config.json │ ├── exceptions.py │ ├── util.py │ ├── sonic_db_dump_load.py │ ├── port_util.py │ ├── dbconnector.py │ ├── interface.py │ └── configdb.py ├── setup.py ├── README.md ├── azure-pipelines.yml ├── .gitignore └── ThirdPartyLicenses.txt /test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --cov=src --cov-report html --cov-report term --cov-report xml --junitxml=test-results.xml -vv 3 | -------------------------------------------------------------------------------- /.github/codeql/codeql-config.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL config" 2 | queries: 3 | - uses: security-and-quality 4 | - uses: security-extended 5 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | # This file describes the maintainers for sonic-py-swsssdk 2 | # See the SONiC project governance document for more information 3 | 4 | Name = "Thomas Booth" 5 | Email = "thomasbo@microsoft.com" 6 | Github = tombo315 7 | Mailinglist = sonicproject@googlegroups.com 8 | -------------------------------------------------------------------------------- /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | name: Semgrep 2 | 3 | on: 4 | pull_request: {} 5 | push: 6 | branches: 7 | - master 8 | - '201[7-9][0-1][0-9]' 9 | - '202[0-9][0-1][0-9]' 10 | 11 | jobs: 12 | semgrep: 13 | if: github.repository_owner == 'sonic-net' 14 | name: Semgrep 15 | runs-on: ubuntu-latest 16 | container: 17 | image: returntocorp/semgrep 18 | steps: 19 | - uses: actions/checkout@v3 20 | - run: semgrep ci 21 | env: 22 | SEMGREP_RULES: p/default 23 | -------------------------------------------------------------------------------- /test/config/database_global.json: -------------------------------------------------------------------------------- 1 | { 2 | "INCLUDES" : [ 3 | { 4 | "include" : "database_config.json" 5 | }, 6 | { 7 | "namespace" : "asic0", 8 | "include" : "../config/database_config.json" 9 | }, 10 | { 11 | "namespace" : "asic1", 12 | "include" : "../config/database_config.json" 13 | }, 14 | { 15 | "namespace" : "asic2", 16 | "include" : "database_config.json" 17 | } 18 | ], 19 | "VERSION" : "1.0" 20 | } 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2016 Microsoft, Inc 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /test/test_port_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | if sys.version_info.major == 3: 5 | from unittest import mock 6 | else: 7 | import mock 8 | 9 | modules_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 10 | sys.path.insert(0, os.path.join(modules_path, 'src')) 11 | 12 | class TestPortUtil: 13 | def test_get_vlan_interface_oid_map(self): 14 | db = mock.MagicMock() 15 | db.get_all = mock.MagicMock() 16 | db.get_all.return_value = {} 17 | 18 | from swsssdk.port_util import get_vlan_interface_oid_map 19 | assert not get_vlan_interface_oid_map(db, True) 20 | -------------------------------------------------------------------------------- /src/swsssdk/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility library for Switch-state Redis database access and syslog reporting. 3 | """ 4 | import sys 5 | import logging 6 | 7 | logger = logging.getLogger(__name__) 8 | logger.setLevel(logging.INFO) 9 | logger.addHandler(logging.NullHandler()) 10 | 11 | if ('unittest' not in sys.modules.keys() and 12 | 'mockredis' not in sys.modules.keys() and 13 | 'mock' not in sys.modules.keys()): 14 | msg = "sonic-py-swsssdk been deprecated, please switch to sonic-swss-common." 15 | logger.exception(msg) 16 | raise ImportError("sonic-py-swsssdk been deprecated, please switch to sonic-swss-common.") 17 | 18 | try: 19 | from .dbconnector import SonicDBConfig, SonicV2Connector 20 | from .configdb import ConfigDBConnector, ConfigDBPipeConnector 21 | from .sonic_db_dump_load import sonic_db_dump_load 22 | except (KeyError, ValueError): 23 | msg = "Failed to database connector objects -- incorrect database config schema." 24 | logger.exception(msg) 25 | raise RuntimeError(msg) 26 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For more infomation, please visit: https://github.com/github/codeql-action 2 | 3 | name: "CodeQL" 4 | 5 | on: 6 | push: 7 | branches: 8 | - 'master' 9 | - '202[0-9][0-9][0-9]' 10 | pull_request_target: 11 | branches: 12 | - 'master' 13 | - '202[0-9][0-9][0-9]' 14 | 15 | jobs: 16 | analyze: 17 | name: Analyze 18 | runs-on: ubuntu-latest 19 | permissions: 20 | actions: read 21 | contents: read 22 | security-events: write 23 | 24 | strategy: 25 | fail-fast: false 26 | matrix: 27 | language: [ 'python' ] 28 | 29 | steps: 30 | - name: Checkout repository 31 | uses: actions/checkout@v3 32 | 33 | # Initializes the CodeQL tools for scanning. 34 | - name: Initialize CodeQL 35 | uses: github/codeql-action/init@v2 36 | with: 37 | config-file: ./.github/codeql/codeql-config.yml 38 | languages: ${{ matrix.language }} 39 | 40 | - name: Perform CodeQL Analysis 41 | uses: github/codeql-action/analyze@v2 42 | with: 43 | category: "/language:${{matrix.language}}" 44 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | dependencies = [ 4 | 'redis>=4.5.4;python_version >= "3.0"', 5 | 'redis>=3.5.3;python_version < "3.0"', 6 | 'redis-dump-load', 7 | ] 8 | 9 | high_performance_deps = [ 10 | 'hiredis>=0.1.4' 11 | ] 12 | 13 | console_scripts={ 14 | 'sonic-db-load = swsssdk:sonic_db_dump_load', 15 | 'sonic-db-dump = swsssdk:sonic_db_dump_load', 16 | } 17 | 18 | setup( 19 | name='swsssdk', 20 | version='2.0.1', 21 | package_dir={'swsssdk': 'src/swsssdk'}, 22 | packages=['swsssdk'], 23 | package_data={'swsssdk': ['config/*.json']}, 24 | scripts=[], 25 | license='Apache 2.0', 26 | author='SONiC Team', 27 | author_email='linuxnetdev@microsoft.com', 28 | maintainer="Thomas Booth", 29 | maintainer_email='thomasbo@microsoft.com', 30 | description='Switch State service Python utility library.', 31 | install_requires=dependencies, 32 | extras_require={ 33 | 'high_perf': high_performance_deps 34 | }, 35 | entry_points={}, 36 | classifiers=[ 37 | 'Intended Audience :: Developers', 38 | 'Operating System :: Linux', 39 | 'Programming Language :: Python :: 2.7', 40 | 'Programming Language :: Python :: 3.5', 41 | 'Programming Language :: Python', 42 | ] 43 | ) 44 | -------------------------------------------------------------------------------- /test/sonic_db_dump_load_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import pytest 4 | if sys.version_info.major == 3: 5 | from unittest import mock 6 | else: 7 | import mock 8 | from mock import patch, MagicMock 9 | 10 | modules_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 11 | sys.path.insert(0, os.path.join(modules_path, 'src')) 12 | import swsssdk 13 | 14 | class TestSonicDbDumpLoad(object): 15 | def setup(self): 16 | print("SETUP") 17 | 18 | @patch('optparse.OptionParser.print_help') 19 | @patch('optparse.OptionParser.parse_args', MagicMock(return_value=('options', ['-p']))) 20 | @patch('sys.argv', ['dump']) 21 | def test_sonic_db_dump_exit(self, mock_print_help): 22 | with pytest.raises(SystemExit) as e: 23 | swsssdk.sonic_db_dump_load() 24 | mock_print_help.assert_called_once() 25 | assert e.value.code == 4 26 | 27 | @patch('optparse.OptionParser.print_help') 28 | @patch('optparse.OptionParser.parse_args', MagicMock(return_value=('options', ['-p', '-o']))) 29 | @patch('sys.argv', ['load']) 30 | def test_sonic_db_load_exit(self, mock_print_help): 31 | with pytest.raises(SystemExit) as e: 32 | swsssdk.sonic_db_dump_load() 33 | mock_print_help.assert_called_once() 34 | assert e.value.code == 4 35 | 36 | def teardown(self): 37 | print("TEARDOWN") 38 | 39 | -------------------------------------------------------------------------------- /test/config/database_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "INSTANCES": { 3 | "redis":{ 4 | "hostname" : "127.0.0.1", 5 | "port" : 6379, 6 | "unix_socket_path" : "/var/run/redis/redis.sock" 7 | } 8 | }, 9 | "DATABASES" : { 10 | "APPL_DB" : { 11 | "id" : 0, 12 | "separator": ":", 13 | "instance" : "redis" 14 | }, 15 | "ASIC_DB" : { 16 | "id" : 1, 17 | "separator": ":", 18 | "instance" : "redis" 19 | }, 20 | "COUNTERS_DB" : { 21 | "id" : 2, 22 | "separator": ":", 23 | "instance" : "redis" 24 | }, 25 | "CONFIG_DB" : { 26 | "id" : 4, 27 | "separator": "|", 28 | "instance" : "redis" 29 | }, 30 | "PFC_WD_DB" : { 31 | "id" : 5, 32 | "separator": ":", 33 | "instance" : "redis" 34 | }, 35 | "FLEX_COUNTER_DB" : { 36 | "id" : 5, 37 | "separator": ":", 38 | "instance" : "redis" 39 | }, 40 | "STATE_DB" : { 41 | "id" : 6, 42 | "separator": "|", 43 | "instance" : "redis" 44 | }, 45 | "SNMP_OVERLAY_DB" : { 46 | "id" : 7, 47 | "separator": "|", 48 | "instance" : "redis" 49 | } 50 | }, 51 | "VERSION" : "1.0" 52 | } 53 | -------------------------------------------------------------------------------- /src/swsssdk/config/database_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "INSTANCES": { 3 | "redis":{ 4 | "hostname" : "127.0.0.1", 5 | "port" : 6379, 6 | "unix_socket_path" : "/var/run/redis/redis.sock" 7 | } 8 | }, 9 | "DATABASES" : { 10 | "APPL_DB" : { 11 | "id" : 0, 12 | "separator": ":", 13 | "instance" : "redis" 14 | }, 15 | "ASIC_DB" : { 16 | "id" : 1, 17 | "separator": ":", 18 | "instance" : "redis" 19 | }, 20 | "COUNTERS_DB" : { 21 | "id" : 2, 22 | "separator": ":", 23 | "instance" : "redis" 24 | }, 25 | "CONFIG_DB" : { 26 | "id" : 4, 27 | "separator": "|", 28 | "instance" : "redis" 29 | }, 30 | "PFC_WD_DB" : { 31 | "id" : 5, 32 | "separator": ":", 33 | "instance" : "redis" 34 | }, 35 | "FLEX_COUNTER_DB" : { 36 | "id" : 5, 37 | "separator": ":", 38 | "instance" : "redis" 39 | }, 40 | "STATE_DB" : { 41 | "id" : 6, 42 | "separator": "|", 43 | "instance" : "redis" 44 | }, 45 | "SNMP_OVERLAY_DB" : { 46 | "id" : 7, 47 | "separator": "|", 48 | "instance" : "redis" 49 | } 50 | }, 51 | "VERSION" : "1.0" 52 | } 53 | -------------------------------------------------------------------------------- /src/swsssdk/exceptions.py: -------------------------------------------------------------------------------- 1 | class SwSSQueryError(Exception): 2 | """ Base exception class """ 3 | 4 | 5 | class UnavailableDataError(SwSSQueryError): 6 | def __init__(self, message, data, *args, **kwargs): 7 | super(UnavailableDataError, self).__init__(message, *args, **kwargs) 8 | """ 9 | In Python2: 10 | # default strings are ascii (or byte [b'']) strings 11 | >>> type(b'port_name_map') 12 | 13 | >>> type('port_name_map') 14 | 15 | >>> 'port_name_map'.encode('ascii') 16 | 'port_name_map' 17 | >>> type('test') is bytes 18 | True 19 | 20 | In Python3: 21 | # default strings are unicode 22 | >>> type('port_name_map') 23 | 24 | >>> type(b'port_name_map') 25 | 26 | >>> 'port_name_map'.encode('ascii') 27 | b'port_name_map' 28 | >>> type(b'test') is bytes 29 | True 30 | 31 | 32 | Redis /always/ utilizes byte-strings regardless of Python version. 33 | This cast ensures consistency across all platforms when waiting for available data. 34 | 35 | { 36 | 'data': b'hset', 37 | 'channel': b'__keyspace@0__:0000000100000020', 38 | 'pattern': b'__key*__:*', 39 | 'type': 'pmessage' 40 | } 41 | """ 42 | self.data = data if type(data) is bytes else data.encode('ascii') 43 | 44 | 45 | class MissingClientError(SwSSQueryError): 46 | """ Raised when a queried client wasn't found. """ 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Total alerts](https://img.shields.io/lgtm/alerts/g/Azure/sonic-py-swsssdk.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-py-swsssdk/alerts/) 2 | [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Azure/sonic-py-swsssdk.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-py-swsssdk/context:python) 3 | 4 | # Python SwSS SDK 5 | Python utility library for SONiC Switch State Service database access 6 | 7 | See the [SONiC Website](http://azure.github.io/SONiC/) for more information about the SONiC project 8 | 9 | Database names are defined by Switch State Service. See the [sonic-swss-common](https://github.com/Azure/sonic-swss-common/blob/master/common/schema.h) project. 10 | 11 | ### Example Usage 12 | ```python 13 | >>> import swsssdk 14 | >>> swss = swsssdk.SonicV2Connector() 15 | >>> swss.db_list 16 | dict_keys(['COUNTERS_DB', 'ASIC_DB', 'APPL_DB']) 17 | >>> dir(swss) 18 | ['APPL_DB', 'ASIC_DB', 'CONNECT_RETRY_WAIT_TIME', 'COUNTERS_DB', 'DATA_RETRIEVAL_WAIT_TIME', 'KEYSPACE_EVENTS', 'KEYSPACE_PATTERN', 'PUB_SUB_MAXIMUM_DATA_WAIT', 'PUB_SUB_NOTIFICATION_TIMEOUT', 'REDIS_HOST', 'REDIS_PORT', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_connection_error_handler', '_onetime_connect', '_persistent_connect', '_subscribe_keyspace_notification', '_unavailable_data_handler', 'close', 'connect', 'db_list', 'db_map', 'get', 'get_all', 'get_dbid', 'get_redis_client', 'keys', 'keyspace_notification_channels', 'redis_clients', 'set'] 19 | >>> swss.connect(swss.APPL_DB) 20 | >>> swss.keys(swss.APPL_DB) 21 | [b'PORT_TABLE:Ethernet8', b'INTF_TABLE:Ethernet16:10.0.0.8/31', b'LLDP_ENTRY_TABLE:Ethernet4', b'PORT_TABLE:Ethernet76', b'PORT_TABLE_VALUE_QUEUE', b'NEIGH_TABLE:eth0:10.3.147.40', ...] 22 | ``` 23 | -------------------------------------------------------------------------------- /test/test_moduleLoad.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | modules_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 5 | sys.path.insert(0, os.path.join(modules_path, 'src')) 6 | 7 | from unittest import TestCase 8 | import subprocess 9 | 10 | class Test_load_sonic_db_config(TestCase): 11 | def test__db_map_attributes(self): 12 | import swsssdk 13 | db = swsssdk.SonicV2Connector() 14 | self.assertTrue(all(hasattr(db, db_name) for db_name in db.get_db_list())) 15 | 16 | # This is the test to check if the global config file extraction of namespace is correct. 17 | def test__namespace_list(self): 18 | import swsssdk 19 | dbConfig = swsssdk.SonicDBConfig() 20 | filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), './config', 'database_global.json') 21 | dbConfig.load_sonic_global_db_config(global_db_file_path=filepath) 22 | ns_input = ['', 'asic0', 'asic1', 'asic2'] 23 | ns_list = list(dbConfig.get_ns_list()) 24 | ns_input.sort() 25 | ns_list.sort() 26 | self.assertEqual(ns_input, ns_list) 27 | 28 | # This is the test to check if the global config file and get the correct DB in a namespace 29 | def test__dbConfig(self): 30 | import swsssdk 31 | dbConfig = swsssdk.SonicDBConfig() 32 | filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), './config', 'database_global.json') 33 | dbConfig.load_sonic_global_db_config(global_db_file_path=filepath) 34 | for namespace in list(dbConfig.get_ns_list()): 35 | self.assertEqual(dbConfig.get_dbid('PFC_WD_DB', namespace), 5) 36 | self.assertEqual(dbConfig.get_dbid('APPL_DB', namespace), 0) 37 | 38 | def test_BlockUseSwsssdk(): 39 | # Import swsssdk will throw exception with deprecated message. 40 | swsssdk_path = os.path.join(modules_path, 'src') 41 | result = None 42 | python_command = "python" 43 | 44 | if sys.version_info.major == 3: 45 | python_command = "python3" 46 | 47 | try: 48 | subprocess.check_output([python_command, "-c", "import swsssdk;exit()"], stderr=subprocess.STDOUT, cwd=swsssdk_path) 49 | except subprocess.CalledProcessError as e: 50 | result = e.output.decode("utf-8") 51 | 52 | assert "deprecated" in result 53 | -------------------------------------------------------------------------------- /src/swsssdk/util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Syslog and daemon script utility library. 3 | """ 4 | 5 | from __future__ import print_function 6 | import json 7 | import logging 8 | import logging.config 9 | import sys 10 | from getopt import getopt 11 | 12 | 13 | # TODO: move to dbsync project. 14 | def usage(script_name): 15 | print('Usage: python ', script_name, 16 | '-t [host] -p [port] -s [unix_socket_path] -d [logging_level] -f [update_frequency] -h [help]') 17 | 18 | 19 | # TODO: move to dbsync project. 20 | def process_options(script_name): 21 | """ 22 | Process command line options 23 | """ 24 | options, remainders = getopt(sys.argv[1:], "t:p:s:d:f:h", ["host=", "port=", "unix_socket_path=", "debug=", "frequency=", "help"]) 25 | 26 | args = {} 27 | for (opt, arg) in options: 28 | try: 29 | if opt in ('-d', '--debug'): 30 | args['log_level'] = int(arg) 31 | elif opt in ('-t', '--host'): 32 | args['host'] = arg 33 | elif opt in ('-p', '--port'): 34 | args['port'] = int(arg) 35 | elif opt in ('-s', 'unix_socket_path'): 36 | args['unix_socket_path'] = arg 37 | elif opt in ('-f', '--frequency'): 38 | args['update_frequency'] = int(arg) 39 | elif opt in ('-h', '--help'): 40 | usage(script_name) 41 | except ValueError as e: 42 | print('Invalid option for {}: {}'.format(opt, e)) 43 | sys.exit(1) 44 | 45 | return args 46 | 47 | 48 | # TODO: move 49 | def setup_logging(config_file_path, log_level=logging.INFO): 50 | """ 51 | Logging configuration helper. 52 | 53 | :param config_file_path: file path to logging configuration file. 54 | https://docs.python.org/3/library/logging.config.html#object-connections 55 | :param log_level: defaults to logging.INFO 56 | :return: None - access the logger by name as described in the config--or the "root" logger as a backup. 57 | """ 58 | try: 59 | with open(config_file_path, 'rt') as f: 60 | config = json.load(f) 61 | logging.config.dictConfig(config) 62 | except (ValueError, IOError, OSError): 63 | # json.JSONDecodeError is throwable in Python3.5+ -- subclass of ValueError 64 | logging.basicConfig(log_level=log_level) 65 | logging.root.exception( 66 | "Could not load specified logging configuration '{}'. Verify the filepath exists and is compliant with: " 67 | "[https://docs.python.org/3/library/logging.config.html#object-connections]".format(config_file_path)) 68 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | # Starter pipeline 2 | # Start with a minimal pipeline that you can customize to build and deploy your code. 3 | # Add steps that build, run tests, deploy, and more: 4 | # https://aka.ms/yaml 5 | 6 | trigger: 7 | branches: 8 | include: 9 | - master 10 | 11 | pr: 12 | branches: 13 | include: 14 | - master 15 | 16 | stages: 17 | - stage: Build 18 | jobs: 19 | - job: 20 | displayName: "build" 21 | timeoutInMinutes: 60 22 | variables: 23 | DIFF_COVER_CHECK_THRESHOLD: 80 24 | DIFF_COVER_ENABLE: 'true' 25 | pool: 26 | vmImage: ubuntu-latest 27 | 28 | container: 29 | image: sonicdev-microsoft.azurecr.io:443/sonic-slave-buster:latest 30 | 31 | steps: 32 | - checkout: self 33 | clean: true 34 | submodules: recursive 35 | displayName: 'Checkout code' 36 | - script: | 37 | set -x 38 | wget https://packages.microsoft.com/config/debian/10/packages-microsoft-prod.deb -O packages-microsoft-prod.deb 39 | sudo dpkg -i packages-microsoft-prod.deb 40 | sudo apt-get update 41 | sudo apt-get install -y dotnet-sdk-5.0 42 | # Note: pytest-azurepipelines 1.0.3 requires new version of pytest (dist version 3.10.1), which will break pytest-cov 2.6.0 43 | sudo pip3 install pytest pytest-azurepipelines==0.8.0 44 | sudo pip3 install pytest-cov 45 | displayName: "Install build tools" 46 | - script: | 47 | set -ex 48 | 49 | python3 setup.py bdist_wheel 50 | mkdir -p $(Build.ArtifactStagingDirectory)/target/python-wheels 51 | cp dist/*.whl $(Build.ArtifactStagingDirectory)/target/python-wheels/ 52 | displayName: "Build" 53 | - script: | 54 | set -ex 55 | 56 | sudo python3 -m pip install dist/swsssdk-2.0.1-py3-none-any.whl 57 | python3 -m pytest --doctest-modules --junitxml=junit/test-results.xml --cov=. --cov-report=xml 58 | displayName: "Unit tests" 59 | - publish: $(Build.ArtifactStagingDirectory) 60 | artifact: sonic-py-swsssdk 61 | displayName: "Archive artifacts" 62 | - task: PublishTestResults@2 63 | inputs: 64 | testResultsFiles: '**/test-*.xml' 65 | testRunTitle: Python 3 66 | failTaskOnFailedTests: true 67 | condition: succeededOrFailed() 68 | displayName: 'Publish Python 3 test results' 69 | - task: PublishCodeCoverageResults@1 70 | inputs: 71 | codeCoverageTool: Cobertura 72 | summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml' 73 | additionalCodeCoverageFiles: '$(System.DefaultWorkingDirectory)/**/*.coverage' 74 | displayName: 'Publish Python 3 test coverage' 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | ### Python template 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *,cover 48 | .hypothesis/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # IPython Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # dotenv 81 | .env 82 | 83 | # virtualenv 84 | venv/ 85 | ENV/ 86 | 87 | # Spyder project settings 88 | .spyderproject 89 | 90 | # Rope project settings 91 | .ropeproject 92 | 93 | ### JetBrains template 94 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 95 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 96 | 97 | # User-specific stuff: 98 | .idea/workspace.xml 99 | .idea/tasks.xml 100 | .idea/dictionaries 101 | .idea/vcs.xml 102 | .idea/jsLibraryMappings.xml 103 | 104 | # Sensitive or high-churn files: 105 | .idea/dataSources.ids 106 | .idea/dataSources.xml 107 | .idea/dataSources.local.xml 108 | .idea/sqlDataSources.xml 109 | .idea/dynamic.xml 110 | .idea/uiDesigner.xml 111 | 112 | # Gradle: 113 | .idea/gradle.xml 114 | .idea/libraries 115 | 116 | # Mongo Explorer plugin: 117 | .idea/mongoSettings.xml 118 | 119 | ## File-based project format: 120 | *.iws 121 | 122 | ## Plugin-specific files: 123 | 124 | # IntelliJ 125 | /out/ 126 | 127 | # mpeltonen/sbt-idea plugin 128 | .idea_modules/ 129 | 130 | # JIRA plugin 131 | atlassian-ide-plugin.xml 132 | 133 | # Crashlytics plugin (for Android Studio and IntelliJ) 134 | com_crashlytics_export_strings.xml 135 | crashlytics.properties 136 | crashlytics-build.properties 137 | fabric.properties 138 | 139 | -------------------------------------------------------------------------------- /ThirdPartyLicenses.txt: -------------------------------------------------------------------------------- 1 | Third Party Notices 2 | 3 | This Microsoft Open Source project incorporates material from the project(s) listed below 4 | (Third Party Code). Microsoft is not the original author of the Third Party Code. Microsoft 5 | reserves all other rights not expressly granted, whether by implication, estoppel or otherwise. 6 | 7 | 1. redis-py https://github.com/andymccurdy/redis-py 8 | 2. pyagentx https://github.com/rayed/pyagentx 9 | 3. redis-dump-load https://github.com/p/redis-dump-load 10 | 11 | 1. redis-py https://github.com/andymccurdy/redis-py 12 | Copyright (c) 2012 Andy McCurdy 13 | 14 | Permission is hereby granted, free of charge, to any person 15 | obtaining a copy of this software and associated documentation 16 | files (the "Software"), to deal in the Software without 17 | restriction, including without limitation the rights to use, 18 | copy, modify, merge, publish, distribute, sublicense, and/or sell 19 | copies of the Software, and to permit persons to whom the 20 | Software is furnished to do so, subject to the following 21 | conditions: 22 | 23 | The above copyright notice and this permission notice shall be 24 | included in all copies or substantial portions of the Software. 25 | 26 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 28 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 30 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 31 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 32 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 | OTHER DEALINGS IN THE SOFTWARE. 34 | 35 | 36 | 2. pyagentx https://github.com/rayed/pyagentx 37 | Copyright (c) 2013, Rayed A Alrashed 38 | All rights reserved. 39 | 40 | Redistribution and use in source and binary forms, with or without 41 | modification, are permitted provided that the following conditions are met: 42 | 43 | 1. Redistributions of source code must retain the above copyright notice, this 44 | list of conditions and the following disclaimer. 45 | 2. Redistributions in binary form must reproduce the above copyright notice, 46 | this list of conditions and the following disclaimer in the documentation 47 | and/or other materials provided with the distribution. 48 | 49 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 50 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 51 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 52 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 53 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 54 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 55 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 56 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 57 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 58 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59 | 60 | 61 | 3. redis-dump-load https://github.com/p/redis-dump-load 62 | Copyright (c) 2011-2016 Oleg Pudeyev 63 | All rights reserved. 64 | 65 | Redistribution and use in source and binary forms, with or without 66 | modification, are permitted provided that the following conditions are met: 67 | 68 | 1. Redistributions of source code must retain the above copyright notice, this 69 | list of conditions and the following disclaimer. 70 | 71 | 2. Redistributions in binary form must reproduce the above copyright notice, 72 | this list of conditions and the following disclaimer in the documentation 73 | and/or other materials provided with the distribution. 74 | 75 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 76 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 77 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 78 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 79 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 80 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 81 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 82 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 83 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 84 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 85 | -------------------------------------------------------------------------------- /src/swsssdk/sonic_db_dump_load.py: -------------------------------------------------------------------------------- 1 | ## ref: https://github.com/p/redis-dump-load/blob/7bbdb1eaea0a51ed4758d3ce6ca01d497a4e7429/redisdl.py 2 | 3 | def sonic_db_dump_load(): 4 | import optparse 5 | import os.path 6 | import re 7 | import sys 8 | from redisdl import dump, load 9 | from swsssdk import SonicDBConfig 10 | 11 | DUMP = 1 12 | LOAD = 2 13 | 14 | def options_to_kwargs(options): 15 | args = {} 16 | if options.password: 17 | args['password'] = options.password 18 | if options.encoding: 19 | args['encoding'] = options.encoding 20 | # dump only 21 | if hasattr(options, 'pretty') and options.pretty: 22 | args['pretty'] = True 23 | if hasattr(options, 'keys') and options.keys: 24 | args['keys'] = options.keys 25 | # load only 26 | if hasattr(options, 'use_expireat') and options.use_expireat: 27 | args['use_expireat'] = True 28 | if hasattr(options, 'empty') and options.empty: 29 | args['empty'] = True 30 | if hasattr(options, 'backend') and options.backend: 31 | args['streaming_backend'] = options.backend 32 | if hasattr(options, 'dbname') and options.dbname: 33 | if options.conntype == 'tcp': 34 | args['host'] = SonicDBConfig.get_hostname(options.dbname) 35 | args['port'] = SonicDBConfig.get_port(options.dbname) 36 | args['db'] = SonicDBConfig.get_dbid(options.dbname) 37 | args['unix_socket_path'] = None 38 | elif options.conntype == "unix_socket": 39 | args['host'] = None 40 | args['port'] = None 41 | args['db'] = SonicDBConfig.get_dbid(options.dbname) 42 | args['unix_socket_path'] = SonicDBConfig.get_socket(options.dbname) 43 | else: 44 | raise TypeError('redis connection type is tcp or unix_socket') 45 | 46 | return args 47 | 48 | def do_dump(options): 49 | if options.output: 50 | output = open(options.output, 'w') 51 | else: 52 | output = sys.stdout 53 | 54 | kwargs = options_to_kwargs(options) 55 | dump(output, **kwargs) 56 | 57 | if options.output: 58 | output.close() 59 | 60 | def do_load(options, args): 61 | if len(args) > 0: 62 | input = open(args[0], 'rb') 63 | else: 64 | input = sys.stdin 65 | 66 | kwargs = options_to_kwargs(options) 67 | load(input, **kwargs) 68 | 69 | if len(args) > 0: 70 | input.close() 71 | 72 | script_name = os.path.basename(sys.argv[0]) 73 | if re.search(r'load(?:$|\.)', script_name): 74 | action = help = LOAD 75 | elif re.search(r'dump(?:$|\.)', script_name): 76 | action = help = DUMP 77 | else: 78 | # default is dump, however if dump is specifically requested 79 | # we don't show help text for toggling between dumping and loading 80 | action = DUMP 81 | help = None 82 | 83 | if help == LOAD: 84 | usage = "Usage: %prog [options] [FILE]" 85 | usage += "\n\nLoad data from FILE (which must be a JSON dump previously created" 86 | usage += "\nby redisdl) into specified or default redis." 87 | usage += "\n\nIf FILE is omitted standard input is read." 88 | elif help == DUMP: 89 | usage = "Usage: %prog [options]" 90 | usage += "\n\nDump data from specified or default redis." 91 | usage += "\n\nIf no output file is specified, dump to standard output." 92 | else: 93 | usage = "Usage: %prog [options]" 94 | usage += "\n %prog -l [options] [FILE]" 95 | usage += "\n\nDump data from redis or load data into redis." 96 | usage += "\n\nIf input or output file is specified, dump to standard output and load" 97 | usage += "\nfrom standard input." 98 | parser = optparse.OptionParser(usage=usage) 99 | parser.add_option('-w', '--password', help='connect with PASSWORD') 100 | if help == DUMP: 101 | parser.add_option('-n', '--dbname', help='dump DATABASE (APPL_DB/ASIC_DB...)') 102 | parser.add_option('-t', '--conntype', help='indicate redis connection type (tcp[default] or unix_socket)', default='tcp') 103 | parser.add_option('-k', '--keys', help='dump only keys matching specified glob-style pattern') 104 | parser.add_option('-o', '--output', help='write to OUTPUT instead of stdout') 105 | parser.add_option('-y', '--pretty', help='split output on multiple lines and indent it', action='store_true') 106 | parser.add_option('-E', '--encoding', help='set encoding to use while decoding data from redis', default='utf-8') 107 | elif help == LOAD: 108 | parser.add_option('-n', '--dbname', help='dump DATABASE (APPL_DB/ASIC_DB...)') 109 | parser.add_option('-t', '--conntype', help='indicate redis connection type (tcp[default] or unix_socket)', default='tcp') 110 | parser.add_option('-e', '--empty', help='delete all keys in destination db prior to loading', action='store_true') 111 | parser.add_option('-E', '--encoding', help='set encoding to use while encoding data to redis', default='utf-8') 112 | parser.add_option('-B', '--backend', help='use specified streaming backend') 113 | parser.add_option('-A', '--use-expireat', help='use EXPIREAT rather than TTL/EXPIRE', action='store_true') 114 | else: 115 | parser.add_option('-l', '--load', help='load data into redis (default is to dump data from redis)', action='store_true') 116 | parser.add_option('-n', '--dbname', help='dump DATABASE (APPL_DB/ASIC_DB/COUNTERS_DB/CONFIG_DB...)') 117 | parser.add_option('-t', '--conntype', help='indicate redis connection type (tcp[default] or unix_socket)', default='tcp') 118 | parser.add_option('-k', '--keys', help='dump only keys matching specified glob-style pattern') 119 | parser.add_option('-o', '--output', help='write to OUTPUT instead of stdout (dump mode only)') 120 | parser.add_option('-y', '--pretty', help='split output on multiple lines and indent it (dump mode only)', action='store_true') 121 | parser.add_option('-e', '--empty', help='delete all keys in destination db prior to loading (load mode only)', action='store_true') 122 | parser.add_option('-E', '--encoding', help='set encoding to use while decoding data from redis', default='utf-8') 123 | parser.add_option('-A', '--use-expireat', help='use EXPIREAT rather than TTL/EXPIRE', action='store_true') 124 | parser.add_option('-B', '--backend', help='use specified streaming backend (load mode only)') 125 | options, args = parser.parse_args() 126 | 127 | if hasattr(options, 'load') and options.load: 128 | action = LOAD 129 | 130 | if action == DUMP: 131 | if len(args) > 0: 132 | parser.print_help() 133 | sys.exit(4) 134 | do_dump(options) 135 | else: 136 | if len(args) > 1: 137 | parser.print_help() 138 | sys.exit(4) 139 | do_load(options, args) 140 | -------------------------------------------------------------------------------- /src/swsssdk/port_util.py: -------------------------------------------------------------------------------- 1 | """ 2 | Bridge/Port mapping utility library. 3 | """ 4 | import swsssdk 5 | import re 6 | 7 | 8 | SONIC_ETHERNET_RE_PATTERN = "^Ethernet(\d+)$" 9 | """ 10 | Ethernet-BP refers to BackPlane interfaces 11 | in multi-asic platform. 12 | """ 13 | SONIC_ETHERNET_BP_RE_PATTERN = "^Ethernet-BP(\d+)$" 14 | SONIC_VLAN_RE_PATTERN = "^Vlan(\d+)$" 15 | SONIC_PORTCHANNEL_RE_PATTERN = "^PortChannel(\d+)$" 16 | SONIC_MGMT_PORT_RE_PATTERN = "^eth(\d+)$" 17 | SONIC_ETHERNET_IB_RE_PATTERN = "^Ethernet-IB(\d+)$" 18 | SONIC_ETHERNET_REC_RE_PATTERN = "^Ethernet-Rec(\d+)$" 19 | 20 | class BaseIdx: 21 | ethernet_base_idx = 1 22 | vlan_interface_base_idx = 2000 23 | ethernet_bp_base_idx = 9000 24 | portchannel_base_idx = 1000 25 | mgmt_port_base_idx = 10000 26 | ethernet_ib_base_idx = 11000 27 | ethernet_rec_base_idx = 12000 28 | 29 | def get_index(if_name): 30 | """ 31 | OIDs are 1-based, interfaces are 0-based, return the 1-based index 32 | Ethernet N = N + 1 33 | Vlan N = N + 2000 34 | Ethernet_BP N = N + 9000 35 | PortChannel N = N + 1000 36 | eth N = N + 10000 37 | Ethernet_IB N = N + 11000 38 | Ethernet_Rec N = N + 12000 39 | """ 40 | return get_index_from_str(if_name.decode()) 41 | 42 | 43 | def get_index_from_str(if_name): 44 | """ 45 | OIDs are 1-based, interfaces are 0-based, return the 1-based index 46 | Ethernet N = N + 1 47 | Vlan N = N + 2000 48 | Ethernet_BP N = N + 9000 49 | PortChannel N = N + 1000 50 | eth N = N + 10000 51 | Ethernet_IB N = N + 11000 52 | Ethernet_Rec N = N + 12000 53 | """ 54 | patterns = { 55 | SONIC_ETHERNET_RE_PATTERN: BaseIdx.ethernet_base_idx, 56 | SONIC_ETHERNET_BP_RE_PATTERN: BaseIdx.ethernet_bp_base_idx, 57 | SONIC_VLAN_RE_PATTERN: BaseIdx.vlan_interface_base_idx, 58 | SONIC_PORTCHANNEL_RE_PATTERN: BaseIdx.portchannel_base_idx, 59 | SONIC_MGMT_PORT_RE_PATTERN: BaseIdx.mgmt_port_base_idx, 60 | SONIC_ETHERNET_IB_RE_PATTERN: BaseIdx.ethernet_ib_base_idx, 61 | SONIC_ETHERNET_REC_RE_PATTERN: BaseIdx.ethernet_rec_base_idx 62 | } 63 | 64 | for pattern, baseidx in patterns.items(): 65 | match = re.match(pattern, if_name) 66 | if match: 67 | return int(match.group(1)) + baseidx 68 | 69 | def get_interface_oid_map(db, blocking=True): 70 | """ 71 | Get the Interface names from Counters DB 72 | """ 73 | db.connect('COUNTERS_DB') 74 | if_name_map = db.get_all('COUNTERS_DB', 'COUNTERS_PORT_NAME_MAP', blocking=blocking) 75 | if_lag_name_map = db.get_all('COUNTERS_DB', 'COUNTERS_LAG_NAME_MAP', blocking=blocking) 76 | if_name_map.update(if_lag_name_map) 77 | 78 | if not if_name_map: 79 | return {}, {} 80 | 81 | oid_pfx = len("oid:0x") 82 | if_name_map = {if_name: sai_oid[oid_pfx:] for if_name, sai_oid in if_name_map.items()} 83 | 84 | # TODO: remove the first branch after all SonicV2Connector are migrated to decode_responses 85 | if isinstance(db, swsssdk.SonicV2Connector) and db.dbintf.redis_kwargs.get('decode_responses', False) == False: 86 | get_index_func = get_index 87 | else: 88 | get_index_func = get_index_from_str 89 | 90 | if_id_map = {sai_oid: if_name for if_name, sai_oid in if_name_map.items() 91 | # only map the interface if it's a style understood to be a SONiC interface. 92 | if get_index_func(if_name) is not None} 93 | 94 | return if_name_map, if_id_map 95 | 96 | def get_bridge_port_map(db): 97 | """ 98 | Get the Bridge port mapping from ASIC DB 99 | """ 100 | db.connect('ASIC_DB') 101 | br_port_str = db.keys('ASIC_DB', "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:*") 102 | if not br_port_str: 103 | return {} 104 | 105 | if_br_oid_map = {} 106 | offset = len("ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:") 107 | oid_pfx = len("oid:0x") 108 | for br_s in br_port_str: 109 | # Example output: ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT:oid:0x3a000000000616 110 | br_port_id = br_s[(offset + oid_pfx):] 111 | ent = db.get_all('ASIC_DB', br_s, blocking=True) 112 | # TODO: remove the first branch after all SonicV2Connector are migrated to decode_responses 113 | if isinstance(db, swsssdk.SonicV2Connector) and db.dbintf.redis_kwargs.get('decode_responses', False) == False: 114 | if b"SAI_BRIDGE_PORT_ATTR_PORT_ID" in ent: 115 | port_id = ent[b"SAI_BRIDGE_PORT_ATTR_PORT_ID"][oid_pfx:] 116 | if_br_oid_map[br_port_id] = port_id 117 | else: 118 | if "SAI_BRIDGE_PORT_ATTR_PORT_ID" in ent: 119 | port_id = ent["SAI_BRIDGE_PORT_ATTR_PORT_ID"][oid_pfx:] 120 | if_br_oid_map[br_port_id] = port_id 121 | 122 | return if_br_oid_map 123 | 124 | def get_vlan_id_from_bvid(db, bvid): 125 | """ 126 | Get the Vlan Id from Bridge Vlan Object 127 | """ 128 | db.connect('ASIC_DB') 129 | vlan_obj = db.keys('ASIC_DB', str("ASIC_STATE:SAI_OBJECT_TYPE_VLAN:" + bvid)) 130 | vlan_entry = db.get_all('ASIC_DB', vlan_obj[0], blocking=True) 131 | vlan_id = None 132 | # TODO: remove the first branch after all SonicV2Connector are migrated to decode_responses 133 | if isinstance(db, swsssdk.SonicV2Connector) and db.dbintf.redis_kwargs.get('decode_responses', False) == False: 134 | if b"SAI_VLAN_ATTR_VLAN_ID" in vlan_entry: 135 | vlan_id = vlan_entry[b"SAI_VLAN_ATTR_VLAN_ID"] 136 | else: 137 | if "SAI_VLAN_ATTR_VLAN_ID" in vlan_entry: 138 | vlan_id = vlan_entry["SAI_VLAN_ATTR_VLAN_ID"] 139 | 140 | return vlan_id 141 | 142 | def get_rif_port_map(db): 143 | """ 144 | Get the RIF port mapping from ASIC DB 145 | """ 146 | db.connect('ASIC_DB') 147 | rif_keys_str = db.keys('ASIC_DB', "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:*") 148 | if not rif_keys_str: 149 | return {} 150 | 151 | rif_port_oid_map = {} 152 | for rif_s in rif_keys_str: 153 | rif_id = rif_s[len("ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x"):] 154 | ent = db.get_all('ASIC_DB', rif_s, blocking=True) 155 | # TODO: remove the first branch after all SonicV2Connector are migrated to decode_responses 156 | if isinstance(db, swsssdk.SonicV2Connector) and db.dbintf.redis_kwargs.get('decode_responses', False) == False: 157 | if b"SAI_ROUTER_INTERFACE_ATTR_PORT_ID" in ent: 158 | port_id = ent[b"SAI_ROUTER_INTERFACE_ATTR_PORT_ID"].lstrip(b"oid:0x") 159 | rif_port_oid_map[rif_id] = port_id 160 | else: 161 | if "SAI_ROUTER_INTERFACE_ATTR_PORT_ID" in ent: 162 | port_id = ent["SAI_ROUTER_INTERFACE_ATTR_PORT_ID"].lstrip("oid:0x") 163 | rif_port_oid_map[rif_id] = port_id 164 | 165 | return rif_port_oid_map 166 | 167 | def get_vlan_interface_oid_map(db, blocking=True): 168 | """ 169 | Get Vlan Interface names and sai oids 170 | """ 171 | db.connect('COUNTERS_DB') 172 | 173 | rif_name_map = db.get_all('COUNTERS_DB', 'COUNTERS_RIF_NAME_MAP', blocking=blocking) 174 | rif_type_name_map = db.get_all('COUNTERS_DB', 'COUNTERS_RIF_TYPE_MAP', blocking=blocking) 175 | 176 | if not rif_name_map or not rif_type_name_map: 177 | return {} 178 | 179 | oid_pfx = len("oid:0x") 180 | vlan_if_name_map = {} 181 | 182 | # TODO: remove the first branch after all SonicV2Connector are migrated to decode_responses 183 | if isinstance(db, swsssdk.SonicV2Connector) and db.dbintf.redis_kwargs.get('decode_responses', False) == False: 184 | get_index_func = get_index 185 | else: 186 | get_index_func = get_index_from_str 187 | 188 | for if_name, sai_oid in rif_name_map.items(): 189 | # Check if RIF is l3 vlan interface 190 | # TODO: remove the first candidate after all SonicV2Connector are migrated to decode_responses 191 | if rif_type_name_map[sai_oid] in (b'SAI_ROUTER_INTERFACE_TYPE_VLAN', 'SAI_ROUTER_INTERFACE_TYPE_VLAN'): 192 | # Check if interface name is in style understood to be a SONiC interface 193 | if get_index_func(if_name): 194 | vlan_if_name_map[sai_oid[oid_pfx:]] = if_name 195 | 196 | return vlan_if_name_map 197 | -------------------------------------------------------------------------------- /src/swsssdk/dbconnector.py: -------------------------------------------------------------------------------- 1 | """ 2 | Database connection module for SwSS 3 | """ 4 | import os 5 | import sys 6 | import json 7 | from . import logger 8 | from .interface import DBInterface 9 | 10 | PY3K = sys.version_info >= (3, 0) 11 | 12 | # FIXME: Convert to metaclasses when Py2 support is removed. Metaclasses have unique interfaces to Python2/Python3. 13 | 14 | class SonicDBConfig(object): 15 | SONIC_DB_GLOBAL_CONFIG_FILE = "/var/run/redis/sonic-db/database_global.json" 16 | SONIC_DB_CONFIG_FILE = "/var/run/redis/sonic-db/database_config.json" 17 | _sonic_db_config_dir = "/var/run/redis/sonic-db" 18 | _sonic_db_global_config_init = False 19 | _sonic_db_config_init = False 20 | _sonic_db_config = {} 21 | 22 | """This is the database_global.json parse and load API. This file has the namespace name and 23 | the corresponding database_config.json file. The global file is significant for the 24 | applications running in the linux host namespace, like eg: config/show cli, snmp etc which 25 | needs to connect to databases running in other namespaces. If the "namespace" attribute is not 26 | specified for an "include" attribute, it refers to the linux host namespace. 27 | If the user passes namespace parameter, this API loads json file for that namespace alone. 28 | """ 29 | @staticmethod 30 | def load_sonic_global_db_config(global_db_file_path=SONIC_DB_GLOBAL_CONFIG_FILE, namespace=None): 31 | """ 32 | Parse and load the global database config json file 33 | """ 34 | if SonicDBConfig._sonic_db_global_config_init: 35 | return 36 | 37 | if os.path.isfile(global_db_file_path): 38 | global_db_config_dir = os.path.dirname(global_db_file_path) 39 | with open(global_db_file_path, "r") as read_file: 40 | all_ns_dbs = json.load(read_file) 41 | for entry in all_ns_dbs['INCLUDES']: 42 | if 'namespace' not in entry.keys(): 43 | # If the user already invoked load_sonic_db_config() explicitly to load the 44 | # database_config.json file for current namesapce, skip loading the file 45 | # referenced here in the global config file. 46 | if SonicDBConfig._sonic_db_config_init: 47 | continue 48 | ns = '' 49 | else: 50 | ns = entry['namespace'] 51 | 52 | # If API is called with a namespace parameter, load the json file only for that namespace. 53 | if namespace is not None and ns != namespace: 54 | continue 55 | 56 | # Check if _sonic_db_config already have this namespace present 57 | if ns in SonicDBConfig._sonic_db_config: 58 | msg = "The database_config for this namespace '{}' is already parsed. !!".format(ns) 59 | logger.warning(msg) 60 | continue 61 | 62 | db_include_file = os.path.join(global_db_config_dir, entry['include']) 63 | 64 | # Not finding the database_config.json file for the namespace 65 | if not os.path.isfile(db_include_file): 66 | msg = "'{}' file is not found !!".format(db_include_file) 67 | logger.warning(msg) 68 | continue 69 | 70 | # As we load the database_config.json file for current namesapce, 71 | # set the _sonic_db_config_init flag to True to prevent loading again 72 | # by the API load_sonic_db_config() 73 | if not ns: 74 | SonicDBConfig._sonic_db_config_init = True 75 | 76 | with open(db_include_file, "r") as inc_file: 77 | SonicDBConfig._sonic_db_config[ns] = json.load(inc_file) 78 | 79 | # If API is called with a namespace parameter,we break here as we loaded the json file. 80 | if namespace is not None and ns == namespace: 81 | break 82 | 83 | SonicDBConfig._sonic_db_global_config_init = True 84 | 85 | @staticmethod 86 | def load_sonic_db_config(sonic_db_file_path=SONIC_DB_CONFIG_FILE): 87 | """ 88 | Get multiple database config from the database_config.json 89 | """ 90 | if SonicDBConfig._sonic_db_config_init: 91 | return 92 | 93 | try: 94 | if not os.path.isfile(sonic_db_file_path): 95 | msg = "'{}' is not found, it is not expected in production devices!!".format(sonic_db_file_path) 96 | logger.warning(msg) 97 | sonic_db_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config', 'database_config.json') 98 | with open(sonic_db_file_path, "r") as read_file: 99 | # The database_config.json is loaded with '' as key. This refers to the local namespace. 100 | SonicDBConfig._sonic_db_config[''] = json.load(read_file) 101 | except (OSError, IOError): 102 | msg = "Could not open sonic database config file '{}'".format(sonic_db_file_path) 103 | logger.exception(msg) 104 | raise RuntimeError(msg) 105 | SonicDBConfig._sonic_db_config_init = True 106 | 107 | @staticmethod 108 | def isInit(): 109 | return SonicDBConfig._sonic_db_config_init 110 | 111 | @staticmethod 112 | def namespace_validation(namespace): 113 | # Check the namespace is valid. 114 | if namespace is None: 115 | msg = "invalid namespace name given as input" 116 | logger.warning(msg) 117 | raise RuntimeError(msg) 118 | 119 | # Check if the global config is loaded entirely or for the namespace 120 | if namespace != '' and not SonicDBConfig._sonic_db_global_config_init: 121 | msg = "Load the global DB config first using API load_sonic_global_db_config" 122 | logger.warning(msg) 123 | raise RuntimeError(msg) 124 | 125 | if not SonicDBConfig._sonic_db_config_init: 126 | SonicDBConfig.load_sonic_db_config() 127 | 128 | if namespace not in SonicDBConfig._sonic_db_config: 129 | msg = "{} is not a valid namespace name in configuration file".format(namespace) 130 | logger.warning(msg) 131 | raise RuntimeError(msg) 132 | 133 | @staticmethod 134 | def EMPTY_NAMESPACE(ns): 135 | if ns is None: 136 | return '' 137 | else: 138 | return ns 139 | 140 | @staticmethod 141 | def db_name_validation(db_name, namespace=None): 142 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 143 | if not SonicDBConfig._sonic_db_config_init: 144 | SonicDBConfig.load_sonic_db_config() 145 | SonicDBConfig.namespace_validation(namespace) 146 | db = SonicDBConfig._sonic_db_config[namespace]["DATABASES"] 147 | if db_name not in db: 148 | msg = "{} is not a valid database name in configuration file".format(db_name) 149 | logger.warning(msg) 150 | raise RuntimeError(msg) 151 | 152 | @staticmethod 153 | def inst_name_validation(inst_name, namespace=None): 154 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 155 | if not SonicDBConfig._sonic_db_config_init: 156 | SonicDBConfig.load_sonic_db_config() 157 | SonicDBConfig.namespace_validation(namespace) 158 | instances = SonicDBConfig._sonic_db_config[namespace]["INSTANCES"] 159 | if inst_name not in instances: 160 | msg = "{} is not a valid instance name in configuration file".format(inst_name) 161 | logger.warning(msg) 162 | raise RuntimeError(msg) 163 | 164 | @staticmethod 165 | def get_dblist(namespace=None): 166 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 167 | if not SonicDBConfig._sonic_db_config_init: 168 | SonicDBConfig.load_sonic_db_config() 169 | SonicDBConfig.namespace_validation(namespace) 170 | return SonicDBConfig._sonic_db_config[namespace]["DATABASES"].keys() 171 | 172 | @staticmethod 173 | def get_ns_list(): 174 | if not SonicDBConfig._sonic_db_config_init: 175 | SonicDBConfig.load_sonic_db_config() 176 | return SonicDBConfig._sonic_db_config.keys() 177 | 178 | @staticmethod 179 | def get_instance(db_name, namespace=None): 180 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 181 | inst_name = SonicDBConfig.get_instancename(db_name, namespace) 182 | return SonicDBConfig._sonic_db_config[namespace]["INSTANCES"][inst_name] 183 | 184 | @staticmethod 185 | def get_instancename(db_name, namespace=None): 186 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 187 | if not SonicDBConfig._sonic_db_config_init: 188 | SonicDBConfig.load_sonic_db_config() 189 | SonicDBConfig.db_name_validation(db_name, namespace) 190 | inst_name = SonicDBConfig._sonic_db_config[namespace]["DATABASES"][db_name]["instance"] 191 | SonicDBConfig.inst_name_validation(inst_name, namespace) 192 | return inst_name 193 | 194 | @staticmethod 195 | def get_instancelist(namespace=None): 196 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 197 | if not SonicDBConfig._sonic_db_config_init: 198 | SonicDBConfig.load_sonic_db_config() 199 | SonicDBConfig.namespace_validation(namespace) 200 | return SonicDBConfig._sonic_db_config[namespace]["INSTANCES"] 201 | 202 | @staticmethod 203 | def get_socket(db_name, namespace=None): 204 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 205 | if not SonicDBConfig._sonic_db_config_init: 206 | SonicDBConfig.load_sonic_db_config() 207 | return SonicDBConfig.get_instance(db_name, namespace)["unix_socket_path"] 208 | 209 | @staticmethod 210 | def get_hostname(db_name, namespace=None): 211 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 212 | if not SonicDBConfig._sonic_db_config_init: 213 | SonicDBConfig.load_sonic_db_config() 214 | return SonicDBConfig.get_instance(db_name, namespace)["hostname"] 215 | 216 | @staticmethod 217 | def get_port(db_name, namespace=None): 218 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 219 | if not SonicDBConfig._sonic_db_config_init: 220 | SonicDBConfig.load_sonic_db_config() 221 | return SonicDBConfig.get_instance(db_name, namespace)["port"] 222 | 223 | @staticmethod 224 | def get_dbid(db_name, namespace=None): 225 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 226 | if not SonicDBConfig._sonic_db_config_init: 227 | SonicDBConfig.load_sonic_db_config() 228 | SonicDBConfig.db_name_validation(db_name, namespace) 229 | return SonicDBConfig._sonic_db_config[namespace]["DATABASES"][db_name]["id"] 230 | 231 | @staticmethod 232 | def get_separator(db_name, namespace=None): 233 | namespace = SonicDBConfig.EMPTY_NAMESPACE(namespace) 234 | if not SonicDBConfig._sonic_db_config_init: 235 | SonicDBConfig.load_sonic_db_config() 236 | SonicDBConfig.db_name_validation(db_name, namespace) 237 | return SonicDBConfig._sonic_db_config[namespace]["DATABASES"][db_name]["separator"] 238 | 239 | class SonicV2Connector(object): 240 | def __init__(self, use_unix_socket_path=False, namespace=None, decode_responses=True, **kwargs): 241 | if PY3K: 242 | if not decode_responses: 243 | raise NotImplementedError('SonicV2Connector with decode_responses=False is not supported in python3') 244 | kwargs['decode_responses'] = True 245 | 246 | self.dbintf = DBInterface(**kwargs) 247 | self.use_unix_socket_path = True if use_unix_socket_path and os.getuid() == 0 else False 248 | 249 | """If the user don't give the namespace as input, it refers to the local namespace 250 | where this application is run. (It could be a network namespace or linux host namesapce) 251 | """ 252 | self.namespace = namespace 253 | 254 | for db_name in self.get_db_list(): 255 | # set a database name as a constant value attribute. 256 | setattr(self, db_name, db_name) 257 | 258 | def connect(self, db_name, retry_on=True): 259 | if self.use_unix_socket_path: 260 | self.dbintf.redis_kwargs["unix_socket_path"] = self.get_db_socket(db_name) 261 | self.dbintf.redis_kwargs["host"] = None 262 | self.dbintf.redis_kwargs["port"] = None 263 | else: 264 | self.dbintf.redis_kwargs["host"] = self.get_db_hostname(db_name) 265 | self.dbintf.redis_kwargs["port"] = self.get_db_port(db_name) 266 | self.dbintf.redis_kwargs["unix_socket_path"] = None 267 | db_id = self.get_dbid(db_name) 268 | self.dbintf.connect(db_id, db_name, retry_on) 269 | 270 | def close(self, db_name): 271 | self.dbintf.close(db_name) 272 | 273 | def get_db_list(self): 274 | return SonicDBConfig.get_dblist(self.namespace) 275 | 276 | def get_db_instance(self, db_name): 277 | return SonicDBConfig.get_instance(db_name, self.namespace) 278 | 279 | def get_db_socket(self, db_name): 280 | return SonicDBConfig.get_socket(db_name, self.namespace) 281 | 282 | def get_db_hostname(self, db_name): 283 | return SonicDBConfig.get_hostname(db_name, self.namespace) 284 | 285 | def get_db_port(self, db_name): 286 | return SonicDBConfig.get_port(db_name, self.namespace) 287 | 288 | def get_dbid(self, db_name): 289 | return SonicDBConfig.get_dbid(db_name, self.namespace) 290 | 291 | def get_db_separator(self, db_name): 292 | return SonicDBConfig.get_separator(db_name, self.namespace) 293 | 294 | def get_redis_client(self, db_name): 295 | return self.dbintf.get_redis_client(db_name) 296 | 297 | def publish(self, db_name, channel, message): 298 | return self.dbintf.publish(db_name, channel, message) 299 | 300 | def expire(self, db_name, key, timeout_sec): 301 | return self.dbintf.expire(db_name, key, timeout_sec) 302 | 303 | def exists(self, db_name, key): 304 | return self.dbintf.exists(db_name, key) 305 | 306 | def keys(self, db_name, pattern='*', *args, **kwargs): 307 | return self.dbintf.keys(db_name, pattern, *args, **kwargs) 308 | 309 | def get(self, db_name, _hash, key, *args, **kwargs): 310 | return self.dbintf.get(db_name, _hash, key, *args, **kwargs) 311 | 312 | def get_all(self, db_name, _hash, *args, **kwargs): 313 | return self.dbintf.get_all(db_name, _hash, *args, **kwargs) 314 | 315 | def set(self, db_name, _hash, key, val, *args, **kwargs): 316 | return self.dbintf.set(db_name, _hash, key, val, *args, **kwargs) 317 | 318 | def delete(self, db_name, key, *args, **kwargs): 319 | return self.dbintf.delete(db_name, key, *args, **kwargs) 320 | 321 | def delete_all_by_pattern(self, db_name, pattern, *args, **kwargs): 322 | self.dbintf.delete_all_by_pattern(db_name, pattern, *args, **kwargs) 323 | 324 | pass 325 | -------------------------------------------------------------------------------- /src/swsssdk/interface.py: -------------------------------------------------------------------------------- 1 | import time 2 | from functools import wraps 3 | 4 | import redis 5 | from redis import RedisError 6 | 7 | from . import logger 8 | from .exceptions import UnavailableDataError, MissingClientError 9 | 10 | BLOCKING_ATTEMPT_ERROR_THRESHOLD = 10 11 | BLOCKING_ATTEMPT_SUPPRESSION = BLOCKING_ATTEMPT_ERROR_THRESHOLD + 5 12 | 13 | 14 | def blockable(f): 15 | """ 16 | "blocking" decorator for Redis accessor methods. Wrapped functions that specify kwarg 'blocking' 17 | will wait for the specified accessor to return with data.:: 18 | 19 | class SonicV2Connector: 20 | @blockable 21 | def keys(self, db_name): 22 | # ... 23 | 24 | # call with: 25 | db = SonicV2Connector() 26 | # ... 27 | db.keys('DATABASE', blocking=True) 28 | 29 | """ 30 | 31 | @wraps(f) 32 | def wrapped(inst, db_name, *args, **kwargs): 33 | 34 | blocking = kwargs.pop('blocking', False) 35 | attempts = 0 36 | while True: 37 | try: 38 | ret_data = f(inst, db_name, *args, **kwargs) 39 | inst._unsubscribe_keyspace_notification(db_name) 40 | return ret_data 41 | except UnavailableDataError as e: 42 | if blocking: 43 | logger.warning(e.message) 44 | if db_name in inst.keyspace_notification_channels: 45 | result = inst._unavailable_data_handler(db_name, e.data) 46 | if result: 47 | continue # received updates, try to read data again 48 | else: 49 | inst._unsubscribe_keyspace_notification(db_name) 50 | raise # No updates was received. Raise exception 51 | else: # Subscribe to updates and try it again (avoiding race condition) 52 | inst._subscribe_keyspace_notification(db_name) 53 | else: 54 | return None 55 | except redis.exceptions.ResponseError: 56 | """ 57 | A response error indicates that something is fundamentally wrong with the request itself. 58 | Retrying the request won't pass unless the schema itself changes. In this case, the error 59 | should be attributed to the application itself. Re-raise the error. 60 | """ 61 | logger.exception("Bad DB request [{}:{}]{{ {} }}".format(db_name, f.__name__, str(args))) 62 | raise 63 | except (redis.exceptions.RedisError, OSError): 64 | attempts += 1 65 | inst._connection_error_handler(db_name) 66 | msg = "DB access failure by [{}:{}]{{ {} }}".format(db_name, f.__name__, str(args)) 67 | if BLOCKING_ATTEMPT_ERROR_THRESHOLD < attempts < BLOCKING_ATTEMPT_SUPPRESSION: 68 | # Repeated access failures implies the database itself is unhealthy. 69 | logger.exception(msg=msg) 70 | else: 71 | logger.warning(msg=msg) 72 | 73 | return wrapped 74 | 75 | 76 | class DBRegistry(dict): 77 | def __getitem__(self, item): 78 | if item not in self: 79 | raise MissingClientError("No client connected for db_name '{}'".format(item)) 80 | return dict.__getitem__(self, item) 81 | 82 | 83 | class DBInterface(object): 84 | REDIS_HOST = '127.0.0.1' 85 | """ 86 | SONiC does not use a password-protected database. By default, Redis will only allow connections to unprotected 87 | DBs over the loopback ip. 88 | """ 89 | 90 | REDIS_PORT = 6379 91 | """ 92 | SONiC uses the default port. 93 | """ 94 | 95 | REDIS_UNIX_SOCKET_PATH = "/var/run/redis/redis.sock" 96 | """ 97 | SONiC uses the default unix socket. 98 | """ 99 | 100 | CONNECT_RETRY_WAIT_TIME = 10 101 | """ 102 | Wait period in seconds before attempting to reconnect to Redis. 103 | """ 104 | 105 | DATA_RETRIEVAL_WAIT_TIME = 3 106 | """ 107 | Wait period in seconds to wait before attempting to retrieve missing data. 108 | """ 109 | 110 | PUB_SUB_NOTIFICATION_TIMEOUT = 10.0 # seconds 111 | """ 112 | Time to wait for any given message to arrive via pub-sub. 113 | """ 114 | 115 | PUB_SUB_MAXIMUM_DATA_WAIT = 60.0 # seconds 116 | """ 117 | Maximum allowable time to wait on a specific pub-sub notification. 118 | """ 119 | 120 | KEYSPACE_PATTERN = '__key*__:*' 121 | """ 122 | Pub-sub keyspace pattern 123 | """ 124 | 125 | KEYSPACE_EVENTS = 'KEA' 126 | """ 127 | In Redis, by default keyspace events notifications are disabled because while not 128 | very sensible the feature uses some CPU power. Notifications are enabled using 129 | the notify-keyspace-events of redis.conf or via the CONFIG SET. 130 | In order to enable the feature a non-empty string is used, composed of multiple characters, 131 | where every character has a special meaning according to the following table: 132 | K - Keyspace events, published with __keyspace@__ prefix. 133 | E - Keyevent events, published with __keyevent@__ prefix. 134 | g - Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 135 | $ - String commands 136 | l - List commands 137 | s - Set commands 138 | h - Hash commands 139 | z - Sorted set commands 140 | x - Expired events (events generated every time a key expires) 141 | e - Evicted events (events generated when a key is evicted for maxmemory) 142 | A - Alias for g$lshzxe, so that the "AKE" string means all the events. 143 | ACS Redis db mainly uses hash, therefore h is selected. 144 | """ 145 | 146 | def __init__(self, **kwargs): 147 | 148 | super(DBInterface, self).__init__() 149 | 150 | # Store the arguments for redis client 151 | self.redis_kwargs = kwargs 152 | if len(self.redis_kwargs) == 0: 153 | self.redis_kwargs['unix_socket_path'] = self.REDIS_UNIX_SOCKET_PATH 154 | 155 | # For thread safety as recommended by python-redis 156 | # Create a separate client for each database 157 | self.redis_clients = DBRegistry() 158 | 159 | # record db_name to db_id mapping on local 160 | self.redis_db_map = {} 161 | 162 | # Create a channel for receiving needed keyspace event 163 | # notifications for each client 164 | self.keyspace_notification_channels = DBRegistry() 165 | 166 | def connect(self, db_id, db_name, retry_on=True): 167 | """ 168 | :param db_id: database id to connect to 169 | :param db_name: database name to connect to 170 | :param retry_on: if ``True`` -- will attempt to connect continuously. 171 | if ``False``, only one attempt will be made. 172 | """ 173 | if retry_on: 174 | self._persistent_connect(db_id, db_name) 175 | else: 176 | self._onetime_connect(db_id, db_name) 177 | 178 | def _onetime_connect(self, db_id, db_name): 179 | """ 180 | Connect to database id. 181 | """ 182 | if db_id is None: 183 | raise ValueError("No database ID configured for '{}'".format(db_id)) 184 | 185 | if db_name is None: 186 | raise ValueError("No database Name configured for '{}'".format(db_name)) 187 | 188 | if db_name not in self.redis_clients.keys(): 189 | self.redis_db_map[db_name] = db_id 190 | client = redis.StrictRedis(db=db_id, **self.redis_kwargs) 191 | 192 | # Enable the notification mechanism for keyspace events in Redis 193 | client.config_set('notify-keyspace-events', self.KEYSPACE_EVENTS) 194 | self.redis_clients[db_name] = client 195 | 196 | def _persistent_connect(self, db_id, db_name): 197 | """ 198 | Keep reconnecting to Database 'db_id' until success 199 | """ 200 | while True: 201 | try: 202 | self._onetime_connect(db_id, db_name) 203 | return 204 | except RedisError: 205 | t_wait = self.CONNECT_RETRY_WAIT_TIME 206 | logger.warning("Connecting to DB '{} {}' failed, will retry in {}s".format(db_id, db_name, t_wait)) 207 | self.close(db_name) 208 | time.sleep(t_wait) 209 | 210 | def close(self, db_name): 211 | """ 212 | Close all client(s) / keyspace channels. 213 | :param db_name: DB to disconnect from. 214 | """ 215 | if db_name in self.redis_clients: 216 | self.redis_clients[db_name].connection_pool.disconnect() 217 | del self.redis_clients[db_name] 218 | if db_name in self.keyspace_notification_channels: 219 | self.keyspace_notification_channels[db_name].close() 220 | del self.keyspace_notification_channels[db_name] 221 | 222 | def _subscribe_keyspace_notification(self, db_name): 223 | """ 224 | Subscribe the chosent client to keyspace event notifications 225 | """ 226 | logger.debug("Subscribe to keyspace notification") 227 | client = self.redis_clients[db_name] 228 | pubsub = client.pubsub() 229 | pubsub.psubscribe(self.KEYSPACE_PATTERN) 230 | self.keyspace_notification_channels[db_name] = pubsub 231 | 232 | def _unsubscribe_keyspace_notification(self, db_name): 233 | """ 234 | Unsubscribe the chosent client from keyspace event notifications 235 | """ 236 | if db_name in self.keyspace_notification_channels: 237 | logger.debug("Unsubscribe from keyspace notification") 238 | self.keyspace_notification_channels[db_name].close() 239 | del self.keyspace_notification_channels[db_name] 240 | 241 | def get_redis_client(self, db_name): 242 | """ 243 | :param db_name: Name of the DB to query 244 | :return: The Redis client instance. 245 | """ 246 | return self.redis_clients[db_name] 247 | 248 | def publish(self, db_name, channel, message): 249 | """ 250 | Publish message via the channel 251 | """ 252 | client = self.redis_clients[db_name] 253 | return client.publish(channel, message) 254 | 255 | def expire(self, db_name, key, timeout_sec): 256 | """ 257 | Set a timeout on a key 258 | """ 259 | client = self.redis_clients[db_name] 260 | return client.expire(key, timeout_sec) 261 | 262 | def exists(self, db_name, key): 263 | """ 264 | Check if a key exist in the db 265 | """ 266 | client = self.redis_clients[db_name] 267 | return client.exists(key) 268 | 269 | @blockable 270 | def keys(self, db_name, pattern='*'): 271 | """ 272 | Retrieve all the keys of DB %db_name 273 | """ 274 | client = self.redis_clients[db_name] 275 | keys = client.keys(pattern=pattern) 276 | if not keys: 277 | message = "DB '{}' is empty!".format(db_name) 278 | raise UnavailableDataError(message, b'hset') 279 | else: 280 | return keys 281 | 282 | @blockable 283 | def get(self, db_name, _hash, key): 284 | """ 285 | Retrieve the value of Key %key from Hashtable %hash 286 | in Database %db_name 287 | 288 | Parameter %blocking indicates whether to wait 289 | when the query fails 290 | """ 291 | client = self.redis_clients[db_name] 292 | val = client.hget(_hash, key) 293 | if not val: 294 | message = "Key '{}' field '{}' unavailable in database '{}'".format(_hash, key, db_name) 295 | raise UnavailableDataError(message, _hash) 296 | else: 297 | # redis only supports strings. if any item is set to string 'None', cast it back to the appropriate type. 298 | return None if val == b'None' else val 299 | 300 | @blockable 301 | def get_all(self, db_name, _hash): 302 | """ 303 | Get Hashtable %hash from DB %db_name 304 | 305 | Parameter %blocking indicates whether to wait 306 | if the hashtable has not been created yet 307 | """ 308 | client = self.redis_clients[db_name] 309 | table = client.hgetall(_hash) 310 | if not table: 311 | message = "Key '{}' unavailable in database '{}'".format(_hash, db_name) 312 | raise UnavailableDataError(message, _hash) 313 | else: 314 | # redis only supports strings. if any item is set to string 'None', cast it back to the appropriate type. 315 | return {k: None if v == b'None' else v for k, v in table.items()} 316 | 317 | @blockable 318 | def set(self, db_name, _hash, key, val): 319 | """ 320 | Add %(key, val) to Hashtable %hash in DB %db_name 321 | Parameter %blocking indicates whether to retry in case of failure 322 | """ 323 | client = self.redis_clients[db_name] 324 | return client.hset(_hash, key, val) 325 | 326 | @blockable 327 | def delete(self, db_name, key): 328 | """ 329 | Delete %key from DB %db_name 330 | Parameter %blocking indicates whether to retry in case of failure 331 | """ 332 | client = self.redis_clients[db_name] 333 | return client.delete(key) 334 | 335 | @blockable 336 | def delete_all_by_pattern(self, db_name, pattern): 337 | """ 338 | Delete all keys which match %pattern from DB %db_name 339 | Parameter %blocking indicates whether to retry in case of failure 340 | """ 341 | client = self.redis_clients[db_name] 342 | keys = client.keys(pattern) 343 | for key in keys: 344 | client.delete(key) 345 | 346 | def _unavailable_data_handler(self, db_name, data): 347 | """ 348 | When the queried config is not available in Redis--wait until it is available. 349 | Two timeouts are at work here: 350 | 1. Notification timeout - how long to wait before giving up on receiving any given pub-sub message. 351 | 2. Max data wait - swsssdk-specific. how long to wait for the data to populate (in absolute time) 352 | """ 353 | start = time.time() 354 | logger.debug("Listening on pubsub channel '{}'".format(db_name)) 355 | while time.time() - start < self.PUB_SUB_MAXIMUM_DATA_WAIT: 356 | msg = self.keyspace_notification_channels[db_name].get_message(timeout=self.PUB_SUB_NOTIFICATION_TIMEOUT) 357 | if msg is not None and msg.get('data') == data: 358 | logger.info("'{}' acquired via pub-sub. Unblocking...".format(data, db_name)) 359 | # Wait for a "settling" period before releasing the wait. 360 | time.sleep(self.DATA_RETRIEVAL_WAIT_TIME) 361 | return True 362 | 363 | logger.warning("No notification for '{}' from '{}' received before timeout.".format(data, db_name)) 364 | return False 365 | 366 | def _connection_error_handler(self, db_name): 367 | """ 368 | In the event Redis is unavailable, close existing connections, and try again. 369 | """ 370 | logger.warning('Could not connect to Redis--waiting before trying again.') 371 | self.close(db_name) 372 | time.sleep(self.CONNECT_RETRY_WAIT_TIME) 373 | self.connect(self.redis_db_map[db_name], db_name, True) 374 | -------------------------------------------------------------------------------- /src/swsssdk/configdb.py: -------------------------------------------------------------------------------- 1 | """ 2 | SONiC ConfigDB connection module 3 | 4 | Example: 5 | # Write to config DB 6 | config_db = ConfigDBConnector() 7 | config_db.connect() 8 | config_db.mod_entry('BGP_NEIGHBOR', '10.0.0.1', { 9 | 'admin_status': state 10 | }) 11 | 12 | # Daemon to watch config change in certain table: 13 | config_db = ConfigDBConnector() 14 | handler = lambda table, key, data: print (key, data) 15 | config_db.subscribe('BGP_NEIGHBOR', handler) 16 | config_db.connect() 17 | config_db.listen() 18 | 19 | """ 20 | import sys 21 | import time 22 | from .dbconnector import SonicV2Connector 23 | 24 | PY3K = sys.version_info >= (3, 0) 25 | 26 | class ConfigDBConnector(SonicV2Connector): 27 | 28 | INIT_INDICATOR = 'CONFIG_DB_INITIALIZED' 29 | 30 | def __init__(self, decode_responses=True, **kwargs): 31 | # By default, connect to Redis through TCP, which does not requires root. 32 | if len(kwargs) == 0: 33 | kwargs['host'] = '127.0.0.1' 34 | 35 | if PY3K: 36 | if not decode_responses: 37 | raise NotImplementedError('ConfigDBConnector with decode_responses=False is not supported in python3') 38 | kwargs['decode_responses'] = True 39 | 40 | """The ConfigDBConnector class will accept the parameter 'namespace' which is used to 41 | load the database_config and connect to the redis DB instances in that namespace. 42 | By default namespace is set to None, which means it connects to local redis DB instances. 43 | 44 | When connecting to a different namespace set the use_unix_socket_path flag to true. 45 | Eg. ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) 46 | 47 | 'namespace' is implicitly passed to the parent SonicV2Connector class. 48 | """ 49 | super(ConfigDBConnector, self).__init__(**kwargs) 50 | # Trick: to achieve static/instance method "overload", we must use initize the function in ctor 51 | # ref: https://stackoverflow.com/a/28766809/2514803 52 | self.serialize_key = self._serialize_key 53 | self.deserialize_key = self._deserialize_key 54 | 55 | self.TABLE_NAME_SEPARATOR = '|' 56 | self.KEY_SEPARATOR = '|' 57 | self.handlers = {} 58 | 59 | def __wait_for_db_init(self): 60 | client = self.get_redis_client(self.db_name) 61 | pubsub = client.pubsub() 62 | initialized = client.get(ConfigDBConnector.INIT_INDICATOR) 63 | if not initialized: 64 | pattern = "__keyspace@{}__:{}".format(self.get_dbid(self.db_name), ConfigDBConnector.INIT_INDICATOR) 65 | pubsub.psubscribe(pattern) 66 | for item in pubsub.listen(): 67 | if item['type'] == 'pmessage': 68 | key = item['channel'].split(':', 1)[1] 69 | if key == ConfigDBConnector.INIT_INDICATOR: 70 | initialized = client.get(self.INIT_INDICATOR) 71 | if initialized: 72 | break 73 | pubsub.punsubscribe(pattern) 74 | 75 | 76 | def db_connect(self, dbname, wait_for_init=False, retry_on=False): 77 | self.db_name = dbname 78 | self.KEY_SEPARATOR = self.TABLE_NAME_SEPARATOR = self.get_db_separator(self.db_name) 79 | SonicV2Connector.connect(self, self.db_name, retry_on) 80 | if wait_for_init: 81 | self.__wait_for_db_init() 82 | 83 | def connect(self, wait_for_init=True, retry_on=False): 84 | self.db_connect('CONFIG_DB', wait_for_init, retry_on) 85 | 86 | def subscribe(self, table, handler): 87 | """Set a handler to handle config change in certain table. 88 | Note that a single handler can be registered to different tables by 89 | calling this fuction multiple times. 90 | Args: 91 | table: Table name. 92 | handler: a handler function that has signature of handler(table_name, key, data) 93 | """ 94 | self.handlers[table] = handler 95 | 96 | def unsubscribe(self, table): 97 | """Remove registered handler from a certain table. 98 | Args: 99 | table: Table name. 100 | """ 101 | if table in self.handlers: 102 | self.handlers.pop(table) 103 | 104 | def __fire(self, table, key, data): 105 | if table in self.handlers: 106 | handler = self.handlers[table] 107 | handler(table, key, data) 108 | 109 | def listen(self): 110 | """Start listen Redis keyspace events and will trigger corresponding handlers when content of a table changes. 111 | """ 112 | self.pubsub = self.get_redis_client(self.db_name).pubsub() 113 | self.pubsub.psubscribe("__keyspace@{}__:*".format(self.get_dbid(self.db_name))) 114 | for item in self.pubsub.listen(): 115 | if item['type'] == 'pmessage': 116 | key = item['channel'].split(':', 1)[1] 117 | try: 118 | (table, row) = key.split(self.TABLE_NAME_SEPARATOR, 1) 119 | if table in self.handlers: 120 | client = self.get_redis_client(self.db_name) 121 | data = self.raw_to_typed(client.hgetall(key)) 122 | self.__fire(table, row, data) 123 | except ValueError: 124 | pass #Ignore non table-formated redis entries 125 | 126 | def raw_to_typed(self, raw_data): 127 | if raw_data is None: 128 | return None 129 | typed_data = {} 130 | for raw_key in raw_data: 131 | key = raw_key 132 | 133 | # "NULL:NULL" is used as a placeholder for objects with no attributes 134 | if key == "NULL": 135 | pass 136 | # A column key with ending '@' is used to mark list-typed table items 137 | # TODO: Replace this with a schema-based typing mechanism. 138 | elif key.endswith("@"): 139 | value = raw_data[raw_key].split(',') 140 | typed_data[key[:-1]] = value 141 | else: 142 | typed_data[key] = raw_data[raw_key] 143 | return typed_data 144 | 145 | def typed_to_raw(self, typed_data): 146 | if typed_data is None: 147 | return None 148 | elif typed_data == {}: 149 | return { "NULL": "NULL" } 150 | raw_data = {} 151 | for key in typed_data: 152 | value = typed_data[key] 153 | if type(value) is list: 154 | raw_data[key+'@'] = ','.join(value) 155 | else: 156 | raw_data[key] = str(value) 157 | return raw_data 158 | 159 | # Note: we could not use a class variable for KEY_SEPARATOR, but original dependent code is using 160 | # these static functions. So we implement both static and instance functions with the same name. 161 | # The static function will behave according to ConfigDB separators. 162 | @staticmethod 163 | def serialize_key(key, separator='|'): 164 | if type(key) is tuple: 165 | return separator.join(key) 166 | else: 167 | return str(key) 168 | 169 | def _serialize_key(self, key): 170 | return ConfigDBConnector.serialize_key(key, self.KEY_SEPARATOR) 171 | 172 | @staticmethod 173 | def deserialize_key(key, separator='|'): 174 | tokens = key.split(separator) 175 | if len(tokens) > 1: 176 | return tuple(tokens) 177 | else: 178 | return key 179 | 180 | def _deserialize_key(self, key): 181 | return ConfigDBConnector.deserialize_key(key, self.KEY_SEPARATOR) 182 | 183 | def set_entry(self, table, key, data): 184 | """Write a table entry to config db. 185 | Remove extra fields in the db which are not in the data. 186 | Args: 187 | table: Table name. 188 | key: Key of table entry, or a tuple of keys if it is a multi-key table. 189 | data: Table row data in a form of dictionary {'column_key': 'value', ...}. 190 | Pass {} as data will create an entry with no column if not already existed. 191 | Pass None as data will delete the entry. 192 | """ 193 | key = self.serialize_key(key) 194 | client = self.get_redis_client(self.db_name) 195 | _hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key) 196 | if data is None: 197 | client.delete(_hash) 198 | else: 199 | original = self.get_entry(table, key) 200 | client.hmset(_hash, self.typed_to_raw(data)) 201 | for k in [ k for k in original if k not in data ]: 202 | if type(original[k]) == list: 203 | k = k + '@' 204 | client.hdel(_hash, self.serialize_key(k)) 205 | 206 | def mod_entry(self, table, key, data): 207 | """Modify a table entry to config db. 208 | Args: 209 | table: Table name. 210 | key: Key of table entry, or a tuple of keys if it is a multi-key table. 211 | data: Table row data in a form of dictionary {'column_key': 'value', ...}. 212 | Pass {} as data will create an entry with no column if not already existed. 213 | Pass None as data will delete the entry. 214 | """ 215 | key = self.serialize_key(key) 216 | client = self.get_redis_client(self.db_name) 217 | _hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key) 218 | if data is None: 219 | client.delete(_hash) 220 | else: 221 | client.hmset(_hash, self.typed_to_raw(data)) 222 | 223 | def get_entry(self, table, key): 224 | """Read a table entry from config db. 225 | Args: 226 | table: Table name. 227 | key: Key of table entry, or a tuple of keys if it is a multi-key table. 228 | Returns: 229 | Table row data in a form of dictionary {'column_key': 'value', ...} 230 | Empty dictionary if table does not exist or entry does not exist. 231 | """ 232 | key = self.serialize_key(key) 233 | client = self.get_redis_client(self.db_name) 234 | _hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key) 235 | return self.raw_to_typed(client.hgetall(_hash)) 236 | 237 | def get_keys(self, table, split=True): 238 | """Read all keys of a table from config db. 239 | Args: 240 | table: Table name. 241 | split: split the first part and return second. 242 | Useful for keys with two parts : 243 | Returns: 244 | List of keys. 245 | """ 246 | client = self.get_redis_client(self.db_name) 247 | pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR) 248 | keys = client.keys(pattern) 249 | data = [] 250 | for key in keys: 251 | try: 252 | if split: 253 | (_, row) = key.split(self.TABLE_NAME_SEPARATOR, 1) 254 | data.append(self.deserialize_key(row)) 255 | else: 256 | data.append(self.deserialize_key(key)) 257 | except ValueError: 258 | pass #Ignore non table-formated redis entries 259 | return data 260 | 261 | def get_table(self, table): 262 | """Read an entire table from config db. 263 | Args: 264 | table: Table name. 265 | Returns: 266 | Table data in a dictionary form of 267 | { 'row_key': {'column_key': value, ...}, ...} 268 | or { ('l1_key', 'l2_key', ...): {'column_key': value, ...}, ...} for a multi-key table. 269 | Empty dictionary if table does not exist. 270 | """ 271 | client = self.get_redis_client(self.db_name) 272 | pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR) 273 | keys = client.keys(pattern) 274 | data = {} 275 | for key in keys: 276 | try: 277 | entry = self.raw_to_typed(client.hgetall(key)) 278 | if entry is not None: 279 | (_, row) = key.split(self.TABLE_NAME_SEPARATOR, 1) 280 | data[self.deserialize_key(row)] = entry 281 | except ValueError: 282 | pass #Ignore non table-formated redis entries 283 | return data 284 | 285 | def delete_table(self, table): 286 | """Delete an entire table from config db. 287 | Args: 288 | table: Table name. 289 | """ 290 | client = self.get_redis_client(self.db_name) 291 | pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR) 292 | keys = client.keys(pattern) 293 | for key in keys: 294 | client.delete(key) 295 | 296 | def mod_config(self, data): 297 | """Write multiple tables into config db. 298 | Extra entries/fields in the db which are not in the data are kept. 299 | Args: 300 | data: config data in a dictionary form 301 | { 302 | 'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...}, 303 | 'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...}, 304 | ... 305 | } 306 | """ 307 | for table_name in data: 308 | table_data = data[table_name] 309 | if table_data == None: 310 | self.delete_table(table_name) 311 | continue 312 | for key in table_data: 313 | self.mod_entry(table_name, key, table_data[key]) 314 | 315 | def get_config(self): 316 | """Read all config data. 317 | Returns: 318 | Config data in a dictionary form of 319 | { 320 | 'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...}, 321 | 'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...}, 322 | ... 323 | } 324 | """ 325 | client = self.get_redis_client(self.db_name) 326 | keys = client.keys('*') 327 | data = {} 328 | for key in keys: 329 | try: 330 | (table_name, row) = key.split(self.TABLE_NAME_SEPARATOR, 1) 331 | entry = self.raw_to_typed(client.hgetall(key)) 332 | if entry != None: 333 | data.setdefault(table_name, {})[self.deserialize_key(row)] = entry 334 | except ValueError: 335 | pass #Ignore non table-formated redis entries 336 | return data 337 | 338 | 339 | class ConfigDBPipeConnector(ConfigDBConnector): 340 | REDIS_SCAN_BATCH_SIZE = 30 341 | 342 | def __init__(self, **kwargs): 343 | super(ConfigDBPipeConnector, self).__init__(**kwargs) 344 | 345 | def __delete_entries(self, client, pipe, pattern, cursor): 346 | """Helper method to delete table entries from config db using Redis pipeline 347 | with batch size of REDIS_SCAN_BATCH_SIZE. 348 | The caller should call pipeline execute once ready 349 | Args: 350 | client: Redis client 351 | pipe: Redis DB pipe 352 | pattern: key pattern 353 | cursor: position to start scanning from 354 | 355 | Returns: 356 | cur: poition of next item to scan 357 | """ 358 | cur, keys = client.scan(cursor=cursor, match=pattern, count=self.REDIS_SCAN_BATCH_SIZE) 359 | for key in keys: 360 | pipe.delete(key) 361 | 362 | return cur 363 | 364 | def __delete_table(self, client, pipe, table): 365 | """Helper method to delete table entries from config db using Redis pipeline. 366 | The caller should call pipeline execute once ready 367 | Args: 368 | client: Redis client 369 | pipe: Redis DB pipe 370 | table: Table name. 371 | """ 372 | pattern = '{}{}*'.format(table.upper(), self.TABLE_NAME_SEPARATOR) 373 | cur = self.__delete_entries(client, pipe, pattern, 0) 374 | while cur != 0: 375 | cur = self.__delete_entries(client, pipe, pattern, cur) 376 | 377 | def __mod_entry(self, pipe, table, key, data): 378 | """Modify a table entry to config db. 379 | Args: 380 | table: Table name. 381 | pipe: Redis DB pipe 382 | table: Table name. 383 | key: Key of table entry, or a tuple of keys if it is a multi-key table. 384 | data: Table row data in a form of dictionary {'column_key': 'value', ...}. 385 | Pass {} as data will create an entry with no column if not already existed. 386 | Pass None as data will delete the entry. 387 | """ 388 | key = self.serialize_key(key) 389 | _hash = '{}{}{}'.format(table.upper(), self.TABLE_NAME_SEPARATOR, key) 390 | if data is None: 391 | pipe.delete(_hash) 392 | else: 393 | pipe.hmset(_hash, self.typed_to_raw(data)) 394 | 395 | def mod_config(self, data): 396 | """Write multiple tables into config db. 397 | Extra entries/fields in the db which are not in the data are kept. 398 | Args: 399 | data: config data in a dictionary form 400 | { 401 | 'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...}, 402 | 'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...}, 403 | ... 404 | } 405 | """ 406 | client = self.get_redis_client(self.db_name) 407 | pipe = client.pipeline() 408 | for table_name in data: 409 | table_data = data[table_name] 410 | if table_data is None: 411 | self.__delete_table(client, pipe, table_name) 412 | continue 413 | for key in table_data: 414 | self.__mod_entry(pipe, table_name, key, table_data[key]) 415 | pipe.execute() 416 | 417 | def __get_config(self, client, pipe, data, cursor): 418 | """Read config data in batches of size REDIS_SCAN_BATCH_SIZE using Redis pipelines 419 | Args: 420 | client: Redis client 421 | pipe: Redis DB pipe 422 | data: config dictionary 423 | cursor: position to start scanning from 424 | 425 | Returns: 426 | cur: poition of next item to scan 427 | """ 428 | cur, keys = client.scan(cursor=cursor, match='*', count=self.REDIS_SCAN_BATCH_SIZE) 429 | keys = [key for key in keys if key != self.INIT_INDICATOR] 430 | for key in keys: 431 | pipe.hgetall(key) 432 | records = pipe.execute() 433 | 434 | for index, key in enumerate(keys): 435 | (table_name, row) = key.split(self.TABLE_NAME_SEPARATOR, 1) 436 | entry = self.raw_to_typed(records[index]) 437 | if entry is not None: 438 | data.setdefault(table_name, {})[self.deserialize_key(row)] = entry 439 | 440 | return cur 441 | 442 | def get_config(self): 443 | """Read all config data. 444 | Returns: 445 | Config data in a dictionary form of 446 | { 447 | 'TABLE_NAME': { 'row_key': {'column_key': 'value', ...}, ...}, 448 | 'MULTI_KEY_TABLE_NAME': { ('l1_key', 'l2_key', ...) : {'column_key': 'value', ...}, ...}, 449 | ... 450 | } 451 | """ 452 | client = self.get_redis_client(self.db_name) 453 | pipe = client.pipeline() 454 | data = {} 455 | 456 | cur = self.__get_config(client, pipe, data, 0) 457 | while cur != 0: 458 | cur = self.__get_config(client, pipe, data, cur) 459 | 460 | return data 461 | 462 | --------------------------------------------------------------------------------