├── piaxe
├── __init__.py
├── boards
│ ├── __init__.py
│ ├── board.py
│ ├── zeroxaxe.py
│ ├── bitcrane.py
│ ├── piaxe.py
│ ├── flex4axe.py
│ ├── qaxe.py
│ └── coms_pb2.py
├── ssd1306.py
├── manager
│ ├── index1.html
│ └── index.html
├── discord.py
├── smartplug.py
├── utils.py
├── crc_functions.py
├── rest.py
├── influx.py
├── bm1366.py
└── miner.py
├── shared
├── __init__.py
├── __pycache__
│ └── utils.cpython-310.pyc
└── shared.py
├── .gitignore
├── start_mainnet_publicpool_example.sh
├── docker
└── monitoring
│ ├── create_data_directories.sh
│ ├── assets
│ ├── dashboards
│ │ ├── dashboard.yaml
│ │ └── PiAxe Miner.json
│ └── datasources
│ │ └── influx.yml
│ ├── README.md
│ └── docker-compose.yml
├── requirements.txt
├── config.yml.example
├── README.md
├── cpu_miner
└── miner.py
├── pyminer.py
└── LICENSE.txt
/piaxe/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/shared/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/piaxe/boards/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.so
2 | docker/monitoring/data/*
3 | *.pyc
4 |
5 |
--------------------------------------------------------------------------------
/shared/__pycache__/utils.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shufps/piaxe-miner/HEAD/shared/__pycache__/utils.cpython-310.pyc
--------------------------------------------------------------------------------
/start_mainnet_publicpool_example.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | python3 ./pyminer.py -o stratum+tcp://public-pool.io:21496 -d -P -u bc1YOURBTCADDRESS.piaxe -p x
3 |
--------------------------------------------------------------------------------
/docker/monitoring/create_data_directories.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mkdir -p ./data/influxdb
4 | mkdir ./data/grafana
5 | chown 472:472 ./data/grafana -R
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | bech32==1.2.0
2 | influxdb_client==1.38.0
3 | pyserial==3.5b0
4 | pytz==2021.1
5 | PyYAML==6.0.1
6 | Requests==2.31.0
7 | rpi_hardware_pwm==0.1.4
8 | smbus==1.1.post2
9 | google==3.0.0
10 | protobuf==3.20.0
11 |
--------------------------------------------------------------------------------
/docker/monitoring/assets/dashboards/dashboard.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | - name: 'Dashboards'
5 | orgId: 1
6 | folder: ''
7 | type: file
8 | disableDeletion: true
9 | allowUiUpdates: false
10 | options:
11 | path: /etc/grafana/provisioning/dashboards
12 |
--------------------------------------------------------------------------------
/docker/monitoring/assets/datasources/influx.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | datasources:
4 | - name: influxdb
5 | type: influxdb
6 | access: proxy
7 | url: http://influxdb:8086
8 | jsonData:
9 | version: Flux
10 | organization: piaxe
11 | defaultBucket: piaxe
12 | tlsSkipVerify: true
13 | secureJsonData:
14 | token: f37fh783hf8hq
15 | uid: f79a7dc2-e573-47cc-a345-892d52f5f3d5
16 |
--------------------------------------------------------------------------------
/piaxe/boards/board.py:
--------------------------------------------------------------------------------
1 | class Board:
2 | def set_fan_speed(self, channel, speed):
3 | raise NotImplementedError
4 |
5 | def read_temperature(self):
6 | raise NotImplementedError
7 |
8 | def set_led(self, state):
9 | raise NotImplementedError
10 |
11 | def reset_func(self, state):
12 | raise NotImplementedError
13 |
14 | def shutdown(self):
15 | raise NotImplementedError
16 |
17 | def serial_port(self):
18 | raise NotImplementedError
19 |
20 | def get_asic_frequency(self):
21 | return self.config['asic_frequency']
22 |
23 | def get_name(self):
24 | return self.config['name']
25 |
26 | def get_chip_count(self):
27 | return self.config['chips']
--------------------------------------------------------------------------------
/piaxe/boards/zeroxaxe.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import threading
3 | import time
4 |
5 | from . import flex4axe
6 |
7 | try:
8 | from . import coms_pb2
9 | import binascii
10 | except:
11 | pass
12 |
13 | class ZeroxAxe(flex4axe.Flex4AxeHardware):
14 | def __init__(self, config):
15 | super().__init__(config)
16 |
17 | def read_temperature_and_voltage(self):
18 | data = super().read_temperature_and_voltage()
19 | # for simpler board layout the voltage domains are reversed but the firmware is the
20 | # same as the flex4 to not have another firmware
21 | data['voltage'].reverse()
22 |
23 | for i in range(0, 4):
24 | # don't ask ... :weird-smiley-guy:
25 | data['voltage'][i] *= 1.02568560
26 |
27 | return data
--------------------------------------------------------------------------------
/docker/monitoring/README.md:
--------------------------------------------------------------------------------
1 | ### Run the setup
2 |
3 | ## Install Docker
4 | First you need to install Docker:
5 |
6 | ```bash
7 | # Install Docker
8 | sudo apt update && sudo apt install docker.io docker-compose -y
9 |
10 | # Start Docker
11 | sudo systemctl start docker && sudo systemctl enable docker
12 | ```
13 |
14 | ## Clone Repository
15 |
16 | First clone the repository and change into the monitoring directors.
17 |
18 | ```bash
19 | git clone https://github.com/shufps/piaxe-miner
20 | cd piaxe-miner/docker/monitoring
21 | ```
22 |
23 | ## Prepare and run Grafana and Influx
24 |
25 | Before the setup is run, the data directories need to be created:
26 |
27 | ```
28 | sudo ./create_data_directories.sh
29 | ```
30 |
31 | Afterwards start with:
32 | ```
33 | docker compose up -d
34 | ```
35 |
36 | Then, Grafana should be available at `http://localhost:3000`.
37 |
38 | Default Username and Password is `admin` and `foobar`
39 |
40 | To stop the monitoring use:
41 | ```
42 | docker compose down
43 | ```
44 |
--------------------------------------------------------------------------------
/docker/monitoring/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.3'
2 |
3 | services:
4 | influxdb:
5 | image: influxdb:latest
6 | container_name: influxdb
7 | volumes:
8 | - ./data/influxdb:/var/lib/influxdb2
9 | ports:
10 | - "127.0.0.1:8086:8086/tcp"
11 | environment:
12 | - DOCKER_INFLUXDB_INIT_MODE=setup
13 | - DOCKER_INFLUXDB_INIT_USERNAME=influx
14 | - DOCKER_INFLUXDB_INIT_PASSWORD=f37fh783hf8hq
15 | - DOCKER_INFLUXDB_INIT_ORG=piaxe
16 | - DOCKER_INFLUXDB_INIT_BUCKET=piaxe
17 | - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=f37fh783hf8hq
18 | restart: unless-stopped
19 |
20 | grafana:
21 | image: grafana/grafana:latest
22 | container_name: grafana
23 | depends_on:
24 | - influxdb
25 | volumes:
26 | - ./data/grafana:/var/lib/grafana
27 | - ./assets/:/etc/grafana/provisioning/
28 | ports:
29 | - "3000:3000"
30 | environment:
31 | - GF_SECURITY_ADMIN_USER=admin
32 | - GF_SECURITY_ADMIN_PASSWORD=foobar
33 | restart: unless-stopped
34 |
35 |
--------------------------------------------------------------------------------
/piaxe/ssd1306.py:
--------------------------------------------------------------------------------
1 | try:
2 | import Adafruit_SSD1306
3 | from PIL import Image, ImageDraw, ImageFont
4 | except:
5 | pass
6 |
7 | RST = None
8 |
9 | class SSD1306:
10 | def __init__(self,stats):
11 | self.stats = stats
12 |
13 | def init(self):
14 | self.disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
15 | self.disp.begin()
16 | self.disp.clear()
17 | self.disp.display()
18 | self.width = self.disp.width
19 | self.height = self.disp.height
20 | self.image = Image.new('1', (self.width, self.height))
21 | self.draw = ImageDraw.Draw(self.image)
22 | self.font = ImageFont.load_default()
23 | self.draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)
24 |
25 | def update(self):
26 | # Format temperature and hash rate with two decimal places
27 | formatted_temp = "{:.2f}".format(self.stats.temp)
28 | formatted_hash_rate = "{:.2f}".format(self.stats.hashing_speed)
29 | self.draw.text((0, 0), "Temp: " + formatted_temp, font=self.font, fill=255)
30 | self.draw.text((0, 10), "HR: " + formatted_hash_rate + " GH", font=self.font, fill=255)
31 | self.disp.image(self.image)
32 | self.disp.display()
33 |
--------------------------------------------------------------------------------
/config.yml.example:
--------------------------------------------------------------------------------
1 | debug_bm1366: true
2 | verify_solo: false
3 | miner: qaxe+
4 |
5 | # maybe doesn't work with all pools
6 | suggest_difficulty: 2048
7 |
8 | piaxe:
9 | name: PiAxe
10 | chips: 1
11 | asic_frequency: 485
12 | sdn_pin: 11
13 | pgood_pin: 13
14 | nrst_pin: 15
15 | led_pin: 19
16 | pwm_hz: 1000
17 | pwm_duty_cycle: 80
18 | lm75_address: 0x48
19 | extranonce2_interval: 1.5
20 | serial_port: "/dev/ttyS0"
21 |
22 | bitcrane:
23 | name: BitCrane
24 | chips: 1000
25 | asic_frequency: 300
26 | extranonce2_interval: 1.5
27 | fan_speed: 0.25
28 |
29 | qaxe:
30 | name: QAxe
31 | chips: 4
32 | fan_speed_1: 1.0
33 | fan_speed_2: 1.0
34 | asic_frequency: 485
35 | extranonce2_interval: 1.9
36 | serial_port_asic: "/dev/ttyACM0"
37 | serial_port_ctrl: "/dev/ttyACM1"
38 |
39 | qaxe+:
40 | name: QAxe+
41 | chips: 4
42 | fan_speed_1: 1.0
43 | fan_speed_2: 1.0
44 | asic_frequency: 490
45 | extranonce2_interval: 1.5
46 | serial_port_asic: "/dev/ttyACM0"
47 | serial_port_ctrl: "/dev/ttyACM1"
48 |
49 | flex4axe:
50 | name: Flex4Axe
51 | chips: 16
52 | fan_speed_1: 1.0
53 | asic_frequency: 480
54 | extranonce2_interval: 1.9
55 | serial_port_asic: "/dev/ttyACM0"
56 | serial_port_ctrl: "/dev/ttyACM1"
57 |
58 | 0xaxe:
59 | name: 0xAxe
60 | chips: 16
61 | fan_speed_1: 1.0
62 | asic_frequency: 480
63 | extranonce2_interval: 1.9
64 | serial_port_asic: "/dev/ttyACM0"
65 | serial_port_ctrl: "/dev/ttyACM1"
66 |
67 | alerter:
68 | enabled: false
69 | type: discord-webhook
70 | # file: load from file
71 | url: file://webhook.url
72 | name: Piaxe1
73 | retrigger_time: 3600
74 |
75 |
76 | influx:
77 | enabled: true
78 | host: localhost
79 | port: 8086
80 | token: f37fh783hf8hq
81 | org: piaxe
82 | bucket: piaxe
83 | timezone: Europe/Berlin
84 |
85 | # for Refoss Tasmota
86 | smartplug:
87 | enabled: false
88 | # URL of smart plug data
89 | url: http://192.168.0.166/cm?cmnd=Status%208
90 |
91 | i2c_display:
92 | enabled: false
93 |
94 | rest_api:
95 | enabled: false
96 | host: 127.0.0.1
97 | port: 5000
98 |
99 |
--------------------------------------------------------------------------------
/piaxe/manager/index1.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ASIC Frequency Adjuster
6 |
7 |
8 |
9 | ASIC Frequency Management
10 |
11 |
12 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/piaxe/discord.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import json
3 | import logging
4 | import time
5 |
6 | class Alerter:
7 | def __init__(self, config):
8 | self.config = config
9 | self.triggered_alerts = dict()
10 | self.name = self.config["name"]
11 | self.retrigger_time = self.config["retrigger_time"]
12 |
13 | def alert(self, key, msg):
14 | raise NotImplementedError
15 |
16 | def alert_if(self, key, msg, cond):
17 | if not cond:
18 | if key in self.triggered_alerts:
19 | self.alert(key, "recovered")
20 | del self.triggered_alerts[key]
21 | return
22 | else:
23 | # check if msg in triggered alerts map
24 | if key in self.triggered_alerts:
25 | trigger_time = self.triggered_alerts[key]
26 | # don't trigger too often
27 | if (time.time() - trigger_time) < self.retrigger_time:
28 | return
29 |
30 | if self.alert(key, msg):
31 | self.triggered_alerts[key] = time.time()
32 |
33 | class DiscordWebhookAlerter(Alerter):
34 | def __init__(self, config):
35 | super().__init__(config)
36 |
37 | # get webhook url
38 | self.url = self.config["url"]
39 |
40 | # if it starts with file:// load content from file
41 | if self.url.startswith('file://'):
42 | file_path = self.url[7:]
43 | try:
44 | with open(file_path, 'r') as file:
45 | self.url = file.read().strip()
46 | except FileNotFoundError:
47 | raise Exception(f"The file specified in the URL does not exist: {file_path}")
48 |
49 | def alert(self, key, msg):
50 | try:
51 | response = requests.post(self.url, data=json.dumps({"content": f"[{key}] {msg}", "username": self.name}), headers={'Content-Type': 'application/json'})
52 | if response.status_code != 204:
53 | raise Exception(response.status_code)
54 | #logging.debug(f"would alert {key} {msg}")
55 | return True
56 | except Exception as e:
57 | logging.error("alerter error: %s", e)
58 | return False
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/piaxe/smartplug.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 | import threading
4 | import time
5 | from influxdb_client import Point, WritePrecision
6 |
7 | class Tasmota:
8 | def __init__(self, config):
9 | self.url = config['url']
10 | self.data_lock = threading.Lock()
11 | self.latest_data = None
12 | self.fetch_thread = threading.Thread(target=self._fetch_data_loop)
13 | self.shutdown_event = threading.Event()
14 |
15 | def _fetch_data_loop(self):
16 | """Thread loop for fetching data."""
17 | logging.info("smartplug monitoring started ...")
18 | while not self.shutdown_event.is_set():
19 | try:
20 | response = requests.get(self.url, timeout=5)
21 | response.raise_for_status() # will raise an exception for 4XX/5XX errors
22 | with self.data_lock:
23 | self.latest_data = response.json()
24 | except Exception as e:
25 | logging.error("Failed to fetch data from smart plug: %s", e)
26 | with self.data_lock:
27 | self.latest_data = None
28 | time.sleep(5)
29 |
30 | def start(self):
31 | """Start the fetch thread."""
32 | self.fetch_thread.start()
33 |
34 | def shutdown(self):
35 | """Shutdown fetch thread and wait for it to finish."""
36 | self.shutdown_event.set()
37 | self.fetch_thread.join()
38 |
39 | def add_smart_plug_energy_data(self, point):
40 | """Fetch latest data and add to point, if available."""
41 | with self.data_lock:
42 | if self.latest_data is None:
43 | return
44 |
45 | data = self.latest_data
46 | energy_data = data["StatusSNS"]["ENERGY"]
47 | point.field("temperature_sp", float(data["StatusSNS"]["ANALOG"]["Temperature"])) \
48 | .field("power", float(energy_data["Power"])) \
49 | .field("apparent_power", float(energy_data["ApparentPower"])) \
50 | .field("reactive_power", float(energy_data["ReactivePower"])) \
51 | .field("factor", float(energy_data["Factor"])) \
52 | .field("voltage", float(energy_data["Voltage"])) \
53 | .field("current", float(energy_data["Current"])) \
54 | .field("total_consumption", float(energy_data["Total"]))
--------------------------------------------------------------------------------
/piaxe/boards/bitcrane.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 |
4 |
5 | try:
6 | import pyftdi.serialext
7 | from pyftdi.gpio import GpioSyncController
8 | from pyftdi.i2c import I2cController, I2cIOError
9 | except:
10 | pass
11 |
12 | from . import board
13 |
14 | class BitcraneHardware(board.Board):
15 | TMP75_ADDRESSES = [ 0x48, 0x4C ]
16 | EMC2305_ADDRESS = 0x4D
17 | FAN_PWM_REGISTERS = [0x30, 0x40, 0x50, 0x60, 0x70]
18 |
19 | def __init__(self, config):
20 | self.config = config
21 |
22 | i2c = I2cController()
23 | i2c.configure('ftdi://ftdi:4232/2',
24 | frequency=100000,
25 | clockstretching=False,
26 | debug=True)
27 | self.rst_plug_gpio = i2c.get_gpio()
28 | self.rst_plug_gpio.set_direction(0x30, 0x30)
29 | self.temp_sensors = []
30 | for address in BitcraneHardware.TMP75_ADDRESSES:
31 | self.temp_sensors.append(i2c.get_port(address))
32 |
33 | self.fan_controller = i2c.get_port(BitcraneHardware.EMC2305_ADDRESS)
34 |
35 | # Initialize serial communication
36 | self._serial_port = pyftdi.serialext.serial_for_url('ftdi://ftdi:4232/1',
37 | baudrate=115200,
38 | timeout=1)
39 |
40 | self.set_fan_speed(0, config['fan_speed'])
41 |
42 | def set_fan_speed(self, channel, percent):
43 | pwm_value = int(255 * percent)
44 | for fan_reg in BitcraneHardware.FAN_PWM_REGISTERS:
45 | self.fan_controller.write_to(fan_reg, [pwm_value])
46 | print(f"Set fan to {percent * 100}% speed.")
47 |
48 | def read_temperature_and_voltage(self):
49 | highest_temp = 0
50 | for sensor in self.temp_sensors:
51 | temp = sensor.read_from(0x00, 2)
52 | if highest_temp < temp[0]:
53 | highest_temp = temp[0]
54 |
55 | return {
56 | "temp": [highest_temp + 5, None, None, None],
57 | "voltage": [None, None, None, None],
58 | }
59 |
60 | def set_led(self, state):
61 | pass
62 |
63 | def reset_func(self, state):
64 | self.rst_plug_gpio.write(0x00)
65 | time.sleep(0.5)
66 | self.rst_plug_gpio.write(0x30)
67 | time.sleep(0.5)
68 |
69 | def shutdown(self):
70 | # disable buck converter
71 | logging.info("shutdown miner ...")
72 | self.reset_func(True)
73 |
74 | def serial_port(self):
75 | return self._serial_port
76 |
--------------------------------------------------------------------------------
/piaxe/utils.py:
--------------------------------------------------------------------------------
1 | # translated from: https://github.com/skot/ESP-Miner
2 |
3 | import sys
4 | import hashlib
5 | import struct
6 | import binascii
7 |
8 | def swab32(v):
9 | return ((v << 24) & 0xFF000000) | \
10 | ((v << 8) & 0x00FF0000) | \
11 | ((v >> 8) & 0x0000FF00) | \
12 | ((v >> 24) & 0x000000FF)
13 |
14 | def flip32bytes(src):
15 | # Ensure the input is a bytearray for mutability.
16 | if not isinstance(src, bytearray):
17 | raise TypeError("src must be a bytearray")
18 | # Ensure the input length is a multiple of 4.
19 | if len(src) % 4 != 0:
20 | raise ValueError("src length must be a multiple of 4")
21 |
22 | dest = bytearray(len(src))
23 | for i in range(0, len(src), 4):
24 | # Interpret the next 4 bytes of `src` as a 32-bit integer in native endianness
25 | word, = struct.unpack('I', word))
86 |
87 | return swapped_array
88 |
--------------------------------------------------------------------------------
/piaxe/manager/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ASIC Frequency Adjuster
6 |
7 |
20 |
21 |
22 | ASIC Frequency Management
23 |
24 |
25 |
84 |
85 |
86 |
--------------------------------------------------------------------------------
/piaxe/boards/piaxe.py:
--------------------------------------------------------------------------------
1 | # piaxe
2 | import logging
3 | import serial
4 | import time
5 |
6 | try:
7 | import RPi.GPIO as GPIO
8 | from rpi_hardware_pwm import HardwarePWM
9 | import smbus
10 | except:
11 | pass
12 |
13 | from . import board
14 |
15 | class RPiHardware(board.Board):
16 | def __init__(self, config):
17 | # Setup GPIO
18 | GPIO.setmode(GPIO.BOARD) # Use Physical pin numbering
19 |
20 | # Load settings from config
21 | self.config = config
22 | self.sdn_pin = self.config['sdn_pin']
23 | self.pgood_pin = self.config['pgood_pin']
24 | self.nrst_pin = self.config['nrst_pin']
25 | self.led_pin = self.config['led_pin']
26 | self.lm75_address = self.config['lm75_address']
27 |
28 | # Initialize GPIO Pins
29 | GPIO.setup(self.sdn_pin, GPIO.OUT, initial=GPIO.LOW)
30 | GPIO.setup(self.pgood_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
31 | GPIO.setup(self.nrst_pin, GPIO.OUT, initial=GPIO.HIGH)
32 | GPIO.setup(self.led_pin, GPIO.OUT, initial=GPIO.LOW)
33 |
34 | # Create an SMBus instance
35 | self._bus = smbus.SMBus(1) # 1 indicates /dev/i2c-1
36 |
37 | pwm = HardwarePWM(pwm_channel=0, hz=self.config['pwm_hz'])
38 | pwm.start(self.config['pwm_duty_cycle'])
39 |
40 | # Initialize serial communication
41 | self._serial_port = serial.Serial(
42 | port=self.config['serial_port'], # For GPIO serial communication use /dev/ttyS0
43 | baudrate=115200, # Set baud rate to 115200
44 | bytesize=serial.EIGHTBITS, # Number of data bits
45 | parity=serial.PARITY_NONE, # No parity
46 | stopbits=serial.STOPBITS_ONE, # Number of stop bits
47 | timeout=1 # Set a read timeout
48 | )
49 |
50 | GPIO.output(self.sdn_pin, True)
51 |
52 | while (not self._is_power_good()):
53 | print("power not good ... waiting ...")
54 | time.sleep(1)
55 |
56 | def _is_power_good(self):
57 | return GPIO.input(self.pgood_pin)
58 |
59 | def set_fan_speed(self, channel, speed):
60 | pass
61 |
62 | def read_temperature_and_voltage(self):
63 | data = self._bus.read_i2c_block_data(self.lm75_address, 0, 2)
64 | # Convert the data to 12-bits
65 | temp = (data[0] << 4) | (data[1] >> 4)
66 | # Convert to a signed 12-bit value
67 | if temp > 2047:
68 | temp -= 4096
69 |
70 | # Convert to Celsius
71 | celsius = temp * 0.0625
72 | return {
73 | "temp": [celsius, None, None, None],
74 | "voltage": [None, None, None, None],
75 | }
76 |
77 | def set_led(self, state):
78 | GPIO.output(self.led_pin, True if state else False)
79 |
80 | def reset_func(self):
81 | GPIO.output(self.nrst_pin, True)
82 | time.sleep(0.5)
83 | GPIO.output(self.nrst_pin, False)
84 | time.sleep(0.5)
85 |
86 |
87 | def shutdown(self):
88 | # disable buck converter
89 | logging.info("shutdown miner ...")
90 | GPIO.output(self.sdn_pin, False)
91 | self.set_led(False)
92 |
93 | def serial_port(self):
94 | return self._serial_port
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Hardware
2 |
3 | PiAxe-Miner is the software needed to run the PiAxe and QAxe.
4 |
5 | The repository with design files, BOM, ... can be found [here](https://github.com/shufps/piaxe)!
6 |
7 |
8 | # Stratum Client Software
9 |
10 | Fork of: https://github.com/crypto-jeronimo/pyminer
11 | Changes:
12 |
13 | - Removed Scrypt hashing and added Miner class
14 | - Made it work with Python3
15 | - added [PiAxe](https://github.com/shufps/piaxe) and [QAxe](https://github.com/shufps/qaxe) as miner
16 | - added reconnect logic on broken connections
17 |
18 | Influx and Grafana
19 | ==================
20 |
21 | The repository contains a dockered setup running on the Pi that shows some statistics:
22 |
23 |
24 |
25 |
26 |
27 | The "blocks found" counter is static of course ...
28 |
29 | PyMiner
30 | =======
31 |
32 | Currently supported algorithms:
33 | - `sha256d`: SHA256d
34 |
35 |
36 | Usage
37 | -----
38 | ```
39 | python pyminer.py [-h] [-o URL] [-u USERNAME] [-p PASSWORD]
40 | [-O USERNAME:PASSWORD] [-a ALGO] [-B] [-q]
41 | [-P] [-d] [-v]
42 |
43 | -o URL, --url= stratum mining server url
44 | -u USERNAME, --user= username for mining server
45 | -p PASSWORD, --pass= password for mining server
46 | -O USER:PASS, --userpass= username:password pair for mining server
47 |
48 | -B, --background run in the background as a daemon
49 |
50 | -q, --quiet suppress non-errors
51 | -P, --dump-protocol show all JSON-RPC chatter
52 | -d, --debug show extra debug information
53 |
54 | -h, --help show the help message and exit
55 | -v, --version show program's version number and exit
56 |
57 |
58 | Example:
59 | python pyminer.py -o stratum+tcp://foobar.com:3333 -u user -p passwd
60 | ```
61 |
62 | ---
63 |
64 | # Setup Instructions
65 |
66 | ## Requirements
67 | - Raspberry Pi 3 (Pi Zero doesn't run influx)
68 | - Python 3.x PIP
69 |
70 | ## Installation
71 |
72 | ```
73 | # install pip3
74 | sudo apt install python3-pip
75 |
76 | # clone repository
77 | git clone https://github.com/shufps/piaxe-miner
78 | cd piaxe-miner
79 |
80 | # and install requirements
81 | pip3 install -r requirements.txt --break-system-packages
82 |
83 | # copy example files
84 | cp config.yml.example config.yml
85 | cp start_mainnet_publicpool_example.sh start.sh
86 | ```
87 | In the new `start.sh` insert your `bc1...` address.
88 |
89 | After copying the example files, edit them. The `config.yml` probably doesn't need changes if connecting a QAxe+.
90 |
91 | ### PiAxe
92 | Depending on your Device change between
93 | `piaxe` and `qaxe` in the `miner` setting.
94 |
95 | Make sure to change to the correct USB Serial `PiAxe`:
96 | ```
97 | serial_port: "/dev/ttyS0"
98 | ```
99 |
100 | ### If running on Pi Zero (1 or 2)
101 | Disable the influx or point it to your externally managed influxdb, with the most recent changes the pi zero can no longer run grafana.
102 |
103 |
104 | ## Start the miner
105 |
106 | Change `start_mainnet_publicpool_example.sh` to your needs.
107 |
108 |
109 | ### For more detailed logging
110 | Activate debug_bm1366 to get a more detailed output in shell.
111 |
--------------------------------------------------------------------------------
/piaxe/boards/flex4axe.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import threading
3 | import time
4 | import binascii
5 |
6 | from . import qaxe
7 |
8 | try:
9 | from . import coms_pb2
10 | except:
11 | pass
12 |
13 | class Flex4AxeHardware(qaxe.QaxeHardware):
14 | def __init__(self, config):
15 | # Load settings from config
16 | self.config = config
17 |
18 | self.state_power = 0;
19 | self.pwm1 = self.config.get('fan_speed_1', 100)
20 |
21 | self.reqid = 0
22 | self.serial_port_ctrl_lock = threading.Lock()
23 |
24 | # Initialize serial communication
25 | self._serial_port_asic = serial.Serial(
26 | port=self.config['serial_port_asic'], # For GPIO serial communication use /dev/ttyS0
27 | baudrate=115200, # Set baud rate to 115200
28 | bytesize=serial.EIGHTBITS, # Number of data bits
29 | parity=serial.PARITY_NONE, # No parity
30 | stopbits=serial.STOPBITS_ONE, # Number of stop bits
31 | timeout=1 # Set a read timeout
32 | )
33 |
34 | # Initialize serial communication
35 | self._serial_port_ctrl = serial.Serial(
36 | port=self.config['serial_port_ctrl'], # For GPIO serial communication use /dev/ttyS0
37 | baudrate=115200, # Set baud rate to 115200
38 | bytesize=serial.EIGHTBITS, # Number of data bits
39 | parity=serial.PARITY_NONE, # No parity
40 | stopbits=serial.STOPBITS_ONE, # Number of stop bits
41 | timeout=1 # Set a read timeout
42 | )
43 |
44 | self.set_fan_speed(0, self.pwm1)
45 |
46 | self._switch_power(False)
47 | time.sleep(1)
48 | self._switch_power(True)
49 | time.sleep(1)
50 |
51 |
52 | def read_temperature_and_voltage(self):
53 | with self.serial_port_ctrl_lock:
54 | resp = self._request(2, None)
55 | if resp is None or resp.error != 0:
56 | raise Exception("failed reading status!")
57 |
58 | status = coms_pb2.QState()
59 | status.ParseFromString(resp.data[1:])
60 |
61 | return {
62 | "temp": [status.temp1 * 0.0625, status.temp2 * 0.0625, status.temp3 * 0.0625, status.temp4 * 0.0625],
63 | "voltage": [status.domain1 * 0.95, status.domain2 * 0.95, status.domain3 * 0.95, status.domain4 * 0.95],
64 | }
65 |
66 | def _set_state(self):
67 | with self.serial_port_ctrl_lock:
68 | qcontrol = coms_pb2.QControl()
69 | qcontrol.pwm1 = int(min(100, self.pwm1 * 100.0))
70 | if self._request(1, qcontrol).error != 0:
71 | raise Exception("couldn't switch power!")
72 |
73 | def _switch_power(self, state):
74 | if state:
75 | self.power_on()
76 | else:
77 | self.shutdown()
78 |
79 | time.sleep(1)
80 |
81 | def reset_func(self):
82 | # reset generated by the stm32 on power on
83 | pass
84 |
85 | def power_on(self):
86 | with self.serial_port_ctrl_lock:
87 | if self._request(5, None).error != 0:
88 | raise Exception("error powering on qaxe!")
89 | time.sleep(5)
90 |
91 | def shutdown(self):
92 | with self.serial_port_ctrl_lock:
93 | if self._request(4, None).error != 0:
94 | raise Exception("error shutting down qaxe!")
95 | time.sleep(5)
--------------------------------------------------------------------------------
/piaxe/crc_functions.py:
--------------------------------------------------------------------------------
1 | # translated from: https://github.com/skot/ESP-Miner
2 |
3 | CRC5_MASK = 0x1F
4 |
5 | def crc5(data):
6 | length = len(data)
7 |
8 | crc = CRC5_MASK
9 | crcin = [1, 1, 1, 1, 1]
10 | crcout = [1, 1, 1, 1, 1]
11 |
12 | length *= 8 # Convert length from bytes to bits
13 | bit_pos = 7
14 | byte_index = 0
15 |
16 | for i in range(length):
17 | din = (data[byte_index] & (1 << bit_pos)) != 0
18 | crcout[0] = crcin[4] ^ din
19 | crcout[1] = crcin[0]
20 | crcout[2] = crcin[1] ^ crcin[4] ^ din
21 | crcout[3] = crcin[2]
22 | crcout[4] = crcin[3]
23 | if bit_pos == 0:
24 | bit_pos = 7
25 | byte_index += 1
26 | else:
27 | bit_pos -= 1
28 | crcin[:] = crcout[:]
29 |
30 | crc = 0
31 | for bit in range(5):
32 | crc |= crcin[bit] << bit
33 |
34 | return crc
35 |
36 | # CRC-16/CCITT Lookup Table
37 | crc16_table = [
38 | 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
39 | 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
40 | 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
41 | 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
42 | 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
43 | 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
44 | 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
45 | 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
46 | 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
47 | 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
48 | 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
49 | 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
50 | 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
51 | 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
52 | 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
53 | 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
54 | 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
55 | 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
56 | 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
57 | 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
58 | 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
59 | 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
60 | 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
61 | 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
62 | 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
63 | 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
64 | 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
65 | 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
66 | 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
67 | 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
68 | 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
69 | 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
70 | ]
71 |
72 | def crc16(buffer):
73 | crc = 0
74 | for byte in buffer:
75 | crc = (crc << 8) ^ crc16_table[((crc >> 8) ^ byte) & 0xFF]
76 | return crc & 0xFFFF # Ensure CRC is within 16-bit range
77 |
78 | def crc16_false(buffer):
79 | crc = 0xffff
80 | for byte in buffer:
81 | crc = (crc << 8) ^ crc16_table[((crc >> 8) ^ byte) & 0xFF]
82 | return crc & 0xFFFF # Ensure CRC is within 16-bit range
83 |
--------------------------------------------------------------------------------
/cpu_miner/miner.py:
--------------------------------------------------------------------------------
1 | import threading
2 | import struct
3 | import binascii
4 | from shared import shared
5 | import time
6 | import logging
7 |
8 | class Job(shared.Job):
9 | def __init__(
10 | self,
11 | job_id,
12 | prevhash,
13 | coinb1,
14 | coinb2,
15 | merkle_branches,
16 | version,
17 | nbits,
18 | ntime,
19 | extranonce1,
20 | extranonce2_size,
21 | max_nonce=0x7fffffff,
22 | ):
23 | super().__init__(job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime, extranonce1, extranonce2_size, max_nonce)
24 |
25 | # Hash metrics (start time, delta time, total hashes)
26 | self._dt = 0.0
27 | self._hash_count = 0
28 |
29 | class CPUMiner:
30 | def __init__(self):
31 | self.current_job = None
32 | self.job_thread = None
33 | self.job_lock = threading.Lock()
34 | self.stop_event = threading.Event()
35 |
36 | self.set_difficulty(64)
37 |
38 | def init(self):
39 | pass
40 |
41 | def hashrate(self):
42 | '''The current hashrate, or if stopped hashrate for the job's lifetime.'''
43 | if self._dt == 0:
44 | return 0.0
45 | return self._hash_count / self._dt
46 |
47 | def _set_target(self, target):
48 | self._target = '%064x' % target
49 |
50 | def set_difficulty(self, difficulty):
51 | self._difficulty = difficulty
52 | self._set_target(shared.calculate_target(difficulty))
53 |
54 | def set_submit_callback(self, cb):
55 | self.submit_cb = cb
56 |
57 |
58 | def _mine(self):
59 | t0 = time.time()
60 |
61 | job = self.current_job
62 |
63 | for rolled_version in range(12345, 2**16):
64 | version_hex = shared.int_to_hex32(int(job._version, 16) | (rolled_version << 13))
65 |
66 | header_prefix_bin = shared.swap_endian_word(version_hex)
67 | header_prefix_bin += shared.swap_endian_words(job._prevhash)
68 | header_prefix_bin += shared.swap_endian_words(job._merkle_root)
69 | header_prefix_bin += shared.swap_endian_word(job._ntime)
70 | header_prefix_bin += shared.swap_endian_word(job._nbits)
71 | for nonce in range(0, job._max_nonce):
72 | # This job has been asked to stop
73 | if self.stop_event.is_set():
74 | job._dt += (time.time() - t0)
75 | return
76 |
77 | # Proof-of-work attempt
78 | nonce_bin = shared.int_to_bytes32(nonce)
79 | pow = shared.bytes_to_hex(shared.sha256d(header_prefix_bin + nonce_bin)[::-1])
80 | # Did we reach or exceed our target?
81 | result = None
82 | if pow <= self._target: # or pow < job._target:
83 | result = dict(
84 | job_id = job._job_id,
85 | extranonce2 = job._extranonce2,
86 | ntime = str(job._ntime), # Convert to str from json unicode
87 | nonce = shared.bytes_to_hex(nonce_bin[::-1]),
88 | version = shared.int_to_hex32(rolled_version << 13),
89 | )
90 | job._dt += (time.time() - t0)
91 | if not shared.verify_work(self._difficulty, job, result):
92 | logging.error("invalid result!")
93 | self.submit_cb(result)
94 |
95 | t0 = time.time()
96 | job._hash_count += 1
97 |
98 | def start_job(self, job):
99 | self.stop_event.set()
100 | if self.job_thread:
101 | self.job_thread.join()
102 |
103 | self.stop_event.clear()
104 | self.current_job = job
105 | self.job_thread = threading.Thread(target=self._mine)
106 | self.job_thread.start()
107 |
108 | def stop(self):
109 | self.stop_event.set()
110 | self.job_thread.join()
111 |
112 |
113 |
--------------------------------------------------------------------------------
/piaxe/rest.py:
--------------------------------------------------------------------------------
1 | try:
2 | from flask import Flask, request, jsonify, send_from_directory
3 | except:
4 | pass
5 | import threading
6 | import logging
7 | import random
8 |
9 | class RestAPI:
10 | def __init__(self, config, miner, stats):
11 | self.miner = miner
12 | self.hardware = miner.hardware
13 | self.asics = miner.asics
14 | self.app = Flask(__name__)
15 | self.cm = self.asics.clock_manager
16 | self.config = config
17 | self.stats = stats
18 |
19 | # Define routes
20 | self.app.add_url_rule('/clocks', 'get', self.get_clocks, methods=['GET'])
21 | self.app.add_url_rule('/clock/', 'set', self.set_clock, methods=['POST'])
22 | self.app.add_url_rule('/stats', 'get_stats', self.get_stats, methods=['GET'])
23 | self.app.add_url_rule('/pwm//set', 'set_pwm', self.set_pwm, methods=['POST']) # Updated to accept variable PWM ID
24 | self.app.add_url_rule('/influx/stats', 'get_influx_stats', self.get_influx_stats, methods=['GET'])
25 |
26 |
27 | # Route to serve the index.html
28 | self.app.add_url_rule('/', 'root', self.root)
29 |
30 | def root(self):
31 | # Serve index.html
32 | return send_from_directory("./manager", 'index.html')
33 |
34 | def get_clocks(self):
35 | clocks = self.cm.get_clock(-1)
36 | return jsonify(clocks)
37 |
38 | def get_stats(self):
39 | # Dummy data for example purposes:
40 | stats = {
41 | "hashrates": [random.randint(50, 100) for _ in range(16)], # Random hash rates for 16 ASICs
42 | "voltages": [random.uniform(1.0, 1.5) for _ in range(4)] # Random voltages for 4 domains
43 | }
44 | return jsonify(stats)
45 |
46 | def set_clock(self, id):
47 | if id < 0 or id >= self.cm.num_asics:
48 | return jsonify({"error": "Invalid ASIC ID"}), 400
49 | new_frequency = float(request.json.get('frequency'))
50 | if new_frequency is None or not (50.0 <= new_frequency <= 550.0):
51 | return jsonify({"error": f"Invalid frequency {new_frequency}"}), 400
52 | try:
53 | self.cm.set_clock(id, new_frequency)
54 | except Exception as e:
55 | logging.error(e)
56 | return jsonify({"error": f"Error setting clock to {new_frequency}"}), 400
57 | return jsonify({"success": True, "frequency": new_frequency})
58 |
59 | def set_pwm(self, id):
60 | pwm_value = float(request.json.get('pwm_value'))
61 | if 0.0 <= pwm_value <= 1.0:
62 | try:
63 | self.hardware.set_fan_speed(id-1, pwm_value)
64 | return jsonify({"success": True, "pwm_value": pwm_value, "channel_id": id})
65 | except Exception as e:
66 | logging.error(e)
67 | return jsonify({"error": f"Error setting PWM value for channel {id}"}), 400
68 | else:
69 | return jsonify({"error": f"Invalid PWM value for channel {id}. Must be between 0.0 and 1.0"}), 400
70 |
71 | # InfluxDB Stats Endpoint
72 | def get_influx_stats(self):
73 | with self.stats.lock:
74 | stats = {
75 | "temperature": self.stats.temp,
76 | "temperature2": self.stats.temp2,
77 | "temperature3": self.stats.temp3,
78 | "temperature4": self.stats.temp4,
79 | "vdomain1": self.stats.vdomain1,
80 | "vdomain2": self.stats.vdomain2,
81 | "vdomain3": self.stats.vdomain3,
82 | "vdomain4": self.stats.vdomain4,
83 | "hashing_speed": self.stats.hashing_speed,
84 | "invalid_shares": self.stats.invalid_shares,
85 | "valid_shares": self.stats.valid_shares,
86 | "uptime": self.stats.uptime,
87 | "best_difficulty": self.stats.best_difficulty,
88 | "total_best_difficulty": self.stats.total_best_difficulty,
89 | "pool_errors": self.stats.pool_errors,
90 | "accepted": self.stats.accepted,
91 | "not_accepted": self.stats.not_accepted,
92 | "total_uptime": self.stats.total_uptime,
93 | "total_blocks_found": self.stats.total_blocks_found,
94 | "blocks_found": self.stats.blocks_found,
95 | "difficulty": self.stats.difficulty,
96 | "duplicate_hashes": self.stats.duplicate_hashes
97 | }
98 | return jsonify(stats)
99 |
100 | def run(self):
101 | host = self.config.get("host", "127.0.0.1")
102 | port = int(self.config.get("port", "5000"))
103 | def run_app():
104 | self.app.run(host=host, port=port, debug=True, use_reloader=False)
105 |
106 | self.server_thread = threading.Thread(target=run_app)
107 | self.server_thread.start()
108 |
--------------------------------------------------------------------------------
/piaxe/boards/qaxe.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import threading
3 | import serial
4 | import time
5 | import binascii
6 |
7 | try:
8 | from . import coms_pb2
9 | except:
10 | pass
11 |
12 | from . import board
13 |
14 | class QaxeHardware(board.Board):
15 | def __init__(self, config):
16 | # Load settings from config
17 | self.config = config
18 |
19 | self.state_power = 0;
20 | self.pwm1 = self.config.get('fan_speed_1', 100)
21 | self.pwm2 = self.config.get('fan_speed_2', 0)
22 |
23 | self.reqid = 0
24 | self.serial_port_ctrl_lock = threading.Lock()
25 |
26 | # Initialize serial communication
27 | self._serial_port_asic = serial.Serial(
28 | port=self.config['serial_port_asic'], # For GPIO serial communication use /dev/ttyS0
29 | baudrate=115200, # Set baud rate to 115200
30 | bytesize=serial.EIGHTBITS, # Number of data bits
31 | parity=serial.PARITY_NONE, # No parity
32 | stopbits=serial.STOPBITS_ONE, # Number of stop bits
33 | timeout=1 # Set a read timeout
34 | )
35 |
36 | # Initialize serial communication
37 | self._serial_port_ctrl = serial.Serial(
38 | port=self.config['serial_port_ctrl'], # For GPIO serial communication use /dev/ttyS0
39 | baudrate=115200, # Set baud rate to 115200
40 | bytesize=serial.EIGHTBITS, # Number of data bits
41 | parity=serial.PARITY_NONE, # No parity
42 | stopbits=serial.STOPBITS_ONE, # Number of stop bits
43 | timeout=1 # Set a read timeout
44 | )
45 |
46 | self._switch_power(True)
47 |
48 | def _is_power_good(self):
49 | return True
50 |
51 | def set_fan_speed(self, channel, speed):
52 | if channel == 0:
53 | self.pwm1 = speed
54 | elif channel == 1:
55 | self.pwm2 = speed
56 | self._set_state()
57 |
58 | def set_led(self, state):
59 | pass
60 |
61 | def _request(self, op, params):
62 | request = coms_pb2.QRequest()
63 | request.id = self.reqid # Set a unique ID for the request
64 | request.op = op
65 |
66 | if params is not None:
67 | request.data = params.SerializeToString()
68 | else:
69 | request.data = b'0x00'
70 | request.data = bytes([len(request.data)]) + request.data
71 |
72 | serialized_request = request.SerializeToString()
73 | serialized_request = bytes([len(serialized_request)]) + serialized_request
74 |
75 | logging.debug("-> %s", binascii.hexlify(serialized_request).decode('utf8'))
76 |
77 | self._serial_port_ctrl.write(serialized_request)
78 |
79 | response_len = self._serial_port_ctrl.read()
80 | logging.debug(f"rx len: {response_len}")
81 | if len(response_len) == 1 and response_len[0] == 0:
82 | self.reqid += 1
83 | return coms_pb2.QResponse()
84 |
85 | response_data = self._serial_port_ctrl.read(response_len[0])
86 |
87 | logging.debug("<- %s", binascii.hexlify(response_data).decode('utf8'))
88 |
89 | response = coms_pb2.QResponse()
90 | response.ParseFromString(response_data)
91 |
92 | if response.id != self.reqid:
93 | logging.error(f"request and response IDs mismatch! {response.id} vs {self.reqid}")
94 |
95 | self.reqid += 1
96 | return response
97 |
98 | def read_temperature_and_voltage(self):
99 | with self.serial_port_ctrl_lock:
100 | resp = self._request(2, None)
101 | if resp is None or resp.error != 0:
102 | raise Exception("failed reading status!")
103 |
104 | status = coms_pb2.QState()
105 | status.ParseFromString(resp.data[1:])
106 |
107 | return {
108 | "temp": [status.temp1 * 0.0625, status.temp2 * 0.0625, None, None],
109 | "voltage": [None, None, None, None],
110 | }
111 |
112 | def _set_state(self):
113 | with self.serial_port_ctrl_lock:
114 | qcontrol = coms_pb2.QControl()
115 | qcontrol.state_1v2 = self.state
116 | qcontrol.pwm1 = int(min(100, self.pwm1 * 100.0))
117 | qcontrol.pwm2 = int(min(100, self.pwm2 * 100.0))
118 | if self._request(1, qcontrol).error != 0:
119 | raise Exception("couldn't switch power!")
120 |
121 | def _switch_power(self, state):
122 | self.state = 0 if not state else 1
123 | self._set_state()
124 |
125 | time.sleep(5)
126 |
127 | def reset_func(self):
128 | with self.serial_port_ctrl_lock:
129 | # cmd reset
130 | if self._request(3, None).error != 0:
131 | raise Exception("error resetting qaxe!")
132 | time.sleep(5)
133 |
134 | def shutdown(self):
135 | # disable buck converter
136 | logging.info("shutdown miner ...")
137 | with self.serial_port_ctrl_lock:
138 | # cmd shutdown
139 | if self._request(4, None).error != 0:
140 | raise Exception("error resetting qaxe!")
141 |
142 | def serial_port(self):
143 | return self._serial_port_asic
144 |
--------------------------------------------------------------------------------
/piaxe/influx.py:
--------------------------------------------------------------------------------
1 | from influxdb_client import Point
2 | from influxdb_client.client.write_api import SYNCHRONOUS
3 | from influxdb_client import InfluxDBClient, Point, WriteOptions, WritePrecision
4 | from datetime import datetime
5 | import pytz
6 | import logging
7 | import threading
8 | import json
9 | import time
10 |
11 | class Stats:
12 | def __init__(self):
13 | self.temp = 25.0
14 | self.temp2 = 25.0
15 | self.temp3 = 25.0
16 | self.temp4 = 25.0
17 | self.vdomain1 = 1200
18 | self.vdomain2 = 1200
19 | self.vdomain3 = 1200
20 | self.vdomain4 = 1200
21 | self.hashing_speed = 0.0
22 | self.invalid_shares = 0
23 | self.valid_shares = 0
24 | self.difficulty = 512
25 | self.best_difficulty = 0.0
26 | self.pool_errors = 0
27 | self.accepted = 0
28 | self.not_accepted = 0
29 | self.total_uptime = 0
30 | self.total_best_difficulty = 0.0
31 | self.uptime = 0
32 | self.blocks_found = 0
33 | self.total_blocks_found = 0
34 | self.duplicate_hashes = 0
35 | self.asic_temp1 = None
36 | self.asic_temp2 = None
37 | self.asic_temp3 = None
38 | self.asic_temp4 = None
39 |
40 | self.lock = threading.Lock()
41 |
42 | def import_dict(self, data):
43 | self.total_uptime = data.get('total_uptime', self.total_uptime)
44 | self.total_best_difficulty = data.get('total_best_difficulty', self.total_best_difficulty)
45 | self.total_blocks_found = data.get('total_blocks_found', self.total_blocks_found)
46 |
47 | logging.info("loaded total uptime: %s seconds", self.total_uptime)
48 | logging.info("loaded total best difficulty: %f.3", self.total_best_difficulty)
49 | logging.info("loaded total blocks found: %d", self.total_blocks_found)
50 |
51 | class Influx:
52 | def __init__(self, config, stats, stats_name):
53 | # InfluxDB settings (replace with your own settings)
54 | self.host = config['host']
55 | self.port = config['port']
56 | self.token = config['token']
57 | self.org = config['org']
58 | self.bucket = config['bucket']
59 | self.stats_name = stats_name
60 | self.client = None
61 | self.tz = pytz.timezone(config['timezone'])
62 | self.stats = stats
63 | self.stop_event = threading.Event()
64 | self.callbacks = []
65 | self.connect()
66 |
67 | def add_stats_callback(self, callback):
68 | """Registers a callback function."""
69 | self.callbacks.append(callback)
70 |
71 | def start(self):
72 | self.submit_thread = threading.Thread(target=self._submit_thread)
73 | self.submit_thread.start()
74 |
75 | def shutdown(self):
76 | self.stop_event.set()
77 | self.submit_thread.join()
78 |
79 | def _submit_thread(self):
80 | while not self.stop_event.is_set():
81 | if not self.client:
82 | time.sleep(10)
83 | continue
84 |
85 | with self.stats.lock:
86 | point = Point(f"{ self.stats_name }").time(datetime.now(self.tz), WritePrecision.NS) \
87 | .field("temperature", float(self.stats.temp or 0.0)) \
88 | .field("temperature2", float(self.stats.temp2 or 0.0)) \
89 | .field("temperature3", float(self.stats.temp3 or 0.0)) \
90 | .field("temperature4", float(self.stats.temp4 or 0.0)) \
91 | .field("vdomain1", float(self.stats.vdomain1 or 0.0)) \
92 | .field("vdomain2", float(self.stats.vdomain2 or 0.0)) \
93 | .field("vdomain3", float(self.stats.vdomain3 or 0.0)) \
94 | .field("vdomain4", float(self.stats.vdomain4 or 0.0)) \
95 | .field("hashing_speed", float(self.stats.hashing_speed)) \
96 | .field("invalid_shares", int(self.stats.invalid_shares)) \
97 | .field("valid_shares", int(self.stats.valid_shares)) \
98 | .field("uptime", int(self.stats.uptime)) \
99 | .field("best_difficulty", float(self.stats.best_difficulty)) \
100 | .field("total_best_difficulty", float(self.stats.total_best_difficulty)) \
101 | .field("pool_errors", int(self.stats.pool_errors)) \
102 | .field("accepted", int(self.stats.accepted)) \
103 | .field("not_accepted", int(self.stats.not_accepted)) \
104 | .field("total_uptime", int(self.stats.total_uptime)) \
105 | .field("total_blocks_found", int(self.stats.total_blocks_found)) \
106 | .field("blocks_found", int(self.stats.blocks_found)) \
107 | .field("difficulty", int(self.stats.difficulty)) \
108 | .field("duplicate_hashes", int(self.stats.duplicate_hashes)) \
109 | .field("asic_temp1", int(self.stats.asic_temp1 or 0)) \
110 | .field("asic_temp2", int(self.stats.asic_temp2 or 0)) \
111 | .field("asic_temp3", int(self.stats.asic_temp3 or 0)) \
112 | .field("asic_temp4", int(self.stats.asic_temp4 or 0))
113 |
114 | for callback in self.callbacks:
115 | callback(point)
116 |
117 | try:
118 | write_api = self.client.write_api(write_options=SYNCHRONOUS)
119 | write_api.write(bucket=self.bucket, org=self.org, record=point)
120 | logging.debug("influx data written: %s", point.to_line_protocol())
121 | except Exception as e:
122 | logging.error("writing to influx failed: %s", e)
123 |
124 | time.sleep(15)
125 |
126 | def connect(self):
127 | # Connect to InfluxDB
128 | try:
129 | self.client = InfluxDBClient(url=f"http://{self.host}:{self.port}", token=self.token, org=self.org)
130 | except Exception as e:
131 | logging.error("connecting influx failed: %s", e)
132 |
133 | def bucket_exists(self, bucket_name):
134 | # List all buckets
135 | buckets = self.client.buckets_api().find_buckets().buckets
136 |
137 | # Check if the specified bucket is in the list
138 | for bucket in buckets:
139 | if bucket.name == bucket_name:
140 | return True
141 | return False
142 |
143 | def load_last_values(self):
144 | if not self.bucket_exists(self.bucket):
145 | logging.debug(f"Bucket {self.bucket} does not exist. Nothing imported.")
146 | return
147 |
148 | # Create a query to fetch the latest records
149 | query = f'''
150 | from(bucket: "{ self.bucket }")
151 | |> range(start: -1y)
152 | |> filter(fn: (r) => r["_measurement"] == "{ self.stats_name }")
153 | |> last()
154 | '''
155 |
156 | # Execute the query
157 | # better to let it raise an exception instead of resetting the total counters
158 | tables = self.client.query_api().query(query, org=self.org)
159 |
160 |
161 | # Process the results
162 | last_data = dict()
163 | for table in tables:
164 | for record in table.records:
165 | last_data[record.get_field()] = record.get_value()
166 |
167 | logging.debug("loaded values:\n"+json.dumps(last_data, indent=4))
168 |
169 | self.stats.import_dict(last_data)
170 |
171 |
172 | def close(self):
173 | # Close the connection
174 | if self.client:
175 | self.client.close()
176 |
--------------------------------------------------------------------------------
/piaxe/boards/coms_pb2.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by the protocol buffer compiler. DO NOT EDIT!
3 | # source: coms.proto
4 |
5 | from google.protobuf import descriptor as _descriptor
6 | from google.protobuf import message as _message
7 | from google.protobuf import reflection as _reflection
8 | from google.protobuf import symbol_database as _symbol_database
9 | # @@protoc_insertion_point(imports)
10 |
11 | _sym_db = _symbol_database.Default()
12 |
13 |
14 |
15 |
16 | DESCRIPTOR = _descriptor.FileDescriptor(
17 | name='coms.proto',
18 | package='',
19 | syntax='proto3',
20 | serialized_options=None,
21 | create_key=_descriptor._internal_create_key,
22 | serialized_pb=b'\n\ncoms.proto\"0\n\x08QRequest\x12\n\n\x02id\x18\x01 \x01(\x05\x12\n\n\x02op\x18\x02 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"4\n\tQResponse\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"c\n\x08QControl\x12\x11\n\tstate_1v2\x18\x01 \x01(\x05\x12\x0c\n\x04pwm1\x18\x02 \x01(\x05\x12\x0c\n\x04pwm2\x18\x03 \x01(\x05\x12\x0c\n\x04pwm3\x18\x04 \x01(\x05\x12\x0c\n\x04pwm4\x18\x05 \x01(\x05\x12\x0c\n\x04led2\x18\x06 \x01(\x05\"\xb2\x01\n\x06QState\x12\x11\n\tpgood_1v2\x18\x01 \x01(\x05\x12\r\n\x05temp1\x18\x02 \x01(\x05\x12\r\n\x05temp2\x18\x03 \x01(\x05\x12\r\n\x05temp3\x18\x04 \x01(\x05\x12\r\n\x05temp4\x18\x05 \x01(\x05\x12\x0f\n\x07\x64omain1\x18\x06 \x01(\x05\x12\x0f\n\x07\x64omain2\x18\x07 \x01(\x05\x12\x0f\n\x07\x64omain3\x18\x08 \x01(\x05\x12\x0f\n\x07\x64omain4\x18\t \x01(\x05\x12\x15\n\rpower_enabled\x18\n \x01(\x05\x62\x06proto3'
23 | )
24 |
25 |
26 |
27 |
28 | _QREQUEST = _descriptor.Descriptor(
29 | name='QRequest',
30 | full_name='QRequest',
31 | filename=None,
32 | file=DESCRIPTOR,
33 | containing_type=None,
34 | create_key=_descriptor._internal_create_key,
35 | fields=[
36 | _descriptor.FieldDescriptor(
37 | name='id', full_name='QRequest.id', index=0,
38 | number=1, type=5, cpp_type=1, label=1,
39 | has_default_value=False, default_value=0,
40 | message_type=None, enum_type=None, containing_type=None,
41 | is_extension=False, extension_scope=None,
42 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
43 | _descriptor.FieldDescriptor(
44 | name='op', full_name='QRequest.op', index=1,
45 | number=2, type=5, cpp_type=1, label=1,
46 | has_default_value=False, default_value=0,
47 | message_type=None, enum_type=None, containing_type=None,
48 | is_extension=False, extension_scope=None,
49 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
50 | _descriptor.FieldDescriptor(
51 | name='data', full_name='QRequest.data', index=2,
52 | number=3, type=12, cpp_type=9, label=1,
53 | has_default_value=False, default_value=b"",
54 | message_type=None, enum_type=None, containing_type=None,
55 | is_extension=False, extension_scope=None,
56 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
57 | ],
58 | extensions=[
59 | ],
60 | nested_types=[],
61 | enum_types=[
62 | ],
63 | serialized_options=None,
64 | is_extendable=False,
65 | syntax='proto3',
66 | extension_ranges=[],
67 | oneofs=[
68 | ],
69 | serialized_start=14,
70 | serialized_end=62,
71 | )
72 |
73 |
74 | _QRESPONSE = _descriptor.Descriptor(
75 | name='QResponse',
76 | full_name='QResponse',
77 | filename=None,
78 | file=DESCRIPTOR,
79 | containing_type=None,
80 | create_key=_descriptor._internal_create_key,
81 | fields=[
82 | _descriptor.FieldDescriptor(
83 | name='id', full_name='QResponse.id', index=0,
84 | number=1, type=5, cpp_type=1, label=1,
85 | has_default_value=False, default_value=0,
86 | message_type=None, enum_type=None, containing_type=None,
87 | is_extension=False, extension_scope=None,
88 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
89 | _descriptor.FieldDescriptor(
90 | name='error', full_name='QResponse.error', index=1,
91 | number=2, type=5, cpp_type=1, label=1,
92 | has_default_value=False, default_value=0,
93 | message_type=None, enum_type=None, containing_type=None,
94 | is_extension=False, extension_scope=None,
95 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
96 | _descriptor.FieldDescriptor(
97 | name='data', full_name='QResponse.data', index=2,
98 | number=3, type=12, cpp_type=9, label=1,
99 | has_default_value=False, default_value=b"",
100 | message_type=None, enum_type=None, containing_type=None,
101 | is_extension=False, extension_scope=None,
102 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
103 | ],
104 | extensions=[
105 | ],
106 | nested_types=[],
107 | enum_types=[
108 | ],
109 | serialized_options=None,
110 | is_extendable=False,
111 | syntax='proto3',
112 | extension_ranges=[],
113 | oneofs=[
114 | ],
115 | serialized_start=64,
116 | serialized_end=116,
117 | )
118 |
119 |
120 | _QCONTROL = _descriptor.Descriptor(
121 | name='QControl',
122 | full_name='QControl',
123 | filename=None,
124 | file=DESCRIPTOR,
125 | containing_type=None,
126 | create_key=_descriptor._internal_create_key,
127 | fields=[
128 | _descriptor.FieldDescriptor(
129 | name='state_1v2', full_name='QControl.state_1v2', index=0,
130 | number=1, type=5, cpp_type=1, label=1,
131 | has_default_value=False, default_value=0,
132 | message_type=None, enum_type=None, containing_type=None,
133 | is_extension=False, extension_scope=None,
134 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
135 | _descriptor.FieldDescriptor(
136 | name='pwm1', full_name='QControl.pwm1', index=1,
137 | number=2, type=5, cpp_type=1, label=1,
138 | has_default_value=False, default_value=0,
139 | message_type=None, enum_type=None, containing_type=None,
140 | is_extension=False, extension_scope=None,
141 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
142 | _descriptor.FieldDescriptor(
143 | name='pwm2', full_name='QControl.pwm2', index=2,
144 | number=3, type=5, cpp_type=1, label=1,
145 | has_default_value=False, default_value=0,
146 | message_type=None, enum_type=None, containing_type=None,
147 | is_extension=False, extension_scope=None,
148 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
149 | _descriptor.FieldDescriptor(
150 | name='pwm3', full_name='QControl.pwm3', index=3,
151 | number=4, type=5, cpp_type=1, label=1,
152 | has_default_value=False, default_value=0,
153 | message_type=None, enum_type=None, containing_type=None,
154 | is_extension=False, extension_scope=None,
155 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
156 | _descriptor.FieldDescriptor(
157 | name='pwm4', full_name='QControl.pwm4', index=4,
158 | number=5, type=5, cpp_type=1, label=1,
159 | has_default_value=False, default_value=0,
160 | message_type=None, enum_type=None, containing_type=None,
161 | is_extension=False, extension_scope=None,
162 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
163 | _descriptor.FieldDescriptor(
164 | name='led2', full_name='QControl.led2', index=5,
165 | number=6, type=5, cpp_type=1, label=1,
166 | has_default_value=False, default_value=0,
167 | message_type=None, enum_type=None, containing_type=None,
168 | is_extension=False, extension_scope=None,
169 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
170 | ],
171 | extensions=[
172 | ],
173 | nested_types=[],
174 | enum_types=[
175 | ],
176 | serialized_options=None,
177 | is_extendable=False,
178 | syntax='proto3',
179 | extension_ranges=[],
180 | oneofs=[
181 | ],
182 | serialized_start=118,
183 | serialized_end=217,
184 | )
185 |
186 |
187 | _QSTATE = _descriptor.Descriptor(
188 | name='QState',
189 | full_name='QState',
190 | filename=None,
191 | file=DESCRIPTOR,
192 | containing_type=None,
193 | create_key=_descriptor._internal_create_key,
194 | fields=[
195 | _descriptor.FieldDescriptor(
196 | name='pgood_1v2', full_name='QState.pgood_1v2', index=0,
197 | number=1, type=5, cpp_type=1, label=1,
198 | has_default_value=False, default_value=0,
199 | message_type=None, enum_type=None, containing_type=None,
200 | is_extension=False, extension_scope=None,
201 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
202 | _descriptor.FieldDescriptor(
203 | name='temp1', full_name='QState.temp1', index=1,
204 | number=2, type=5, cpp_type=1, label=1,
205 | has_default_value=False, default_value=0,
206 | message_type=None, enum_type=None, containing_type=None,
207 | is_extension=False, extension_scope=None,
208 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
209 | _descriptor.FieldDescriptor(
210 | name='temp2', full_name='QState.temp2', index=2,
211 | number=3, type=5, cpp_type=1, label=1,
212 | has_default_value=False, default_value=0,
213 | message_type=None, enum_type=None, containing_type=None,
214 | is_extension=False, extension_scope=None,
215 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
216 | _descriptor.FieldDescriptor(
217 | name='temp3', full_name='QState.temp3', index=3,
218 | number=4, type=5, cpp_type=1, label=1,
219 | has_default_value=False, default_value=0,
220 | message_type=None, enum_type=None, containing_type=None,
221 | is_extension=False, extension_scope=None,
222 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
223 | _descriptor.FieldDescriptor(
224 | name='temp4', full_name='QState.temp4', index=4,
225 | number=5, type=5, cpp_type=1, label=1,
226 | has_default_value=False, default_value=0,
227 | message_type=None, enum_type=None, containing_type=None,
228 | is_extension=False, extension_scope=None,
229 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
230 | _descriptor.FieldDescriptor(
231 | name='domain1', full_name='QState.domain1', index=5,
232 | number=6, type=5, cpp_type=1, label=1,
233 | has_default_value=False, default_value=0,
234 | message_type=None, enum_type=None, containing_type=None,
235 | is_extension=False, extension_scope=None,
236 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
237 | _descriptor.FieldDescriptor(
238 | name='domain2', full_name='QState.domain2', index=6,
239 | number=7, type=5, cpp_type=1, label=1,
240 | has_default_value=False, default_value=0,
241 | message_type=None, enum_type=None, containing_type=None,
242 | is_extension=False, extension_scope=None,
243 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
244 | _descriptor.FieldDescriptor(
245 | name='domain3', full_name='QState.domain3', index=7,
246 | number=8, type=5, cpp_type=1, label=1,
247 | has_default_value=False, default_value=0,
248 | message_type=None, enum_type=None, containing_type=None,
249 | is_extension=False, extension_scope=None,
250 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
251 | _descriptor.FieldDescriptor(
252 | name='domain4', full_name='QState.domain4', index=8,
253 | number=9, type=5, cpp_type=1, label=1,
254 | has_default_value=False, default_value=0,
255 | message_type=None, enum_type=None, containing_type=None,
256 | is_extension=False, extension_scope=None,
257 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
258 | _descriptor.FieldDescriptor(
259 | name='power_enabled', full_name='QState.power_enabled', index=9,
260 | number=10, type=5, cpp_type=1, label=1,
261 | has_default_value=False, default_value=0,
262 | message_type=None, enum_type=None, containing_type=None,
263 | is_extension=False, extension_scope=None,
264 | serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
265 | ],
266 | extensions=[
267 | ],
268 | nested_types=[],
269 | enum_types=[
270 | ],
271 | serialized_options=None,
272 | is_extendable=False,
273 | syntax='proto3',
274 | extension_ranges=[],
275 | oneofs=[
276 | ],
277 | serialized_start=220,
278 | serialized_end=398,
279 | )
280 |
281 | DESCRIPTOR.message_types_by_name['QRequest'] = _QREQUEST
282 | DESCRIPTOR.message_types_by_name['QResponse'] = _QRESPONSE
283 | DESCRIPTOR.message_types_by_name['QControl'] = _QCONTROL
284 | DESCRIPTOR.message_types_by_name['QState'] = _QSTATE
285 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
286 |
287 | QRequest = _reflection.GeneratedProtocolMessageType('QRequest', (_message.Message,), {
288 | 'DESCRIPTOR' : _QREQUEST,
289 | '__module__' : 'coms_pb2'
290 | # @@protoc_insertion_point(class_scope:QRequest)
291 | })
292 | _sym_db.RegisterMessage(QRequest)
293 |
294 | QResponse = _reflection.GeneratedProtocolMessageType('QResponse', (_message.Message,), {
295 | 'DESCRIPTOR' : _QRESPONSE,
296 | '__module__' : 'coms_pb2'
297 | # @@protoc_insertion_point(class_scope:QResponse)
298 | })
299 | _sym_db.RegisterMessage(QResponse)
300 |
301 | QControl = _reflection.GeneratedProtocolMessageType('QControl', (_message.Message,), {
302 | 'DESCRIPTOR' : _QCONTROL,
303 | '__module__' : 'coms_pb2'
304 | # @@protoc_insertion_point(class_scope:QControl)
305 | })
306 | _sym_db.RegisterMessage(QControl)
307 |
308 | QState = _reflection.GeneratedProtocolMessageType('QState', (_message.Message,), {
309 | 'DESCRIPTOR' : _QSTATE,
310 | '__module__' : 'coms_pb2'
311 | # @@protoc_insertion_point(class_scope:QState)
312 | })
313 | _sym_db.RegisterMessage(QState)
314 |
315 |
316 | # @@protoc_insertion_point(module_scope)
317 |
--------------------------------------------------------------------------------
/shared/shared.py:
--------------------------------------------------------------------------------
1 | import struct
2 | import hashlib
3 | import logging
4 | import random
5 | import binascii
6 | import json
7 | from enum import Enum
8 | import bech32
9 |
10 |
11 | def swap_endian_word(hex_word):
12 | '''Swaps the endianness of a hexidecimal string of a word and converts to a binary string.'''
13 |
14 | message = binascii.unhexlify(hex_word)
15 | if len(message) != 4: raise ValueError('Must be 4-byte word')
16 | return message[::-1]
17 |
18 | def swap_endian_words(hex_words):
19 | '''Swaps the endianness of a hexadecimal string of words and keeps as binary data.'''
20 | message = binascii.unhexlify(hex_words)
21 | if len(message) % 4 != 0:
22 | raise ValueError('Must be 4-byte word aligned')
23 | return b''.join([message[4 * i: 4 * i + 4][::-1] for i in range(len(message) // 4)])
24 |
25 |
26 | def sha256d(message):
27 | '''Double SHA256 Hashing function.'''
28 |
29 | return hashlib.sha256(hashlib.sha256(message).digest()).digest()
30 |
31 | def count_leading_zeros(hex_string):
32 | # Convert the hexadecimal string to a binary string
33 | binary_string = bin(int(hex_string, 16))[2:].zfill(len(hex_string) * 4)
34 |
35 | # Count the leading zeros
36 | count = 0
37 | for char in binary_string:
38 | if char == '0':
39 | count += 1
40 | else:
41 | break
42 |
43 | return count
44 |
45 | def swap_endianness_32bit(byte_array):
46 | # Ensure the byte array length is a multiple of 4 (32 bits)
47 | if len(byte_array) % 4 != 0:
48 | raise ValueError("Byte array length must be a multiple of 4.")
49 |
50 | swapped_array = bytearray()
51 |
52 | # Process each 32-bit chunk
53 | for i in range(0, len(byte_array), 4):
54 | # Unpack the 32-bit word in little-endian format
55 | word, = struct.unpack('I', word))
59 |
60 | return swapped_array
61 |
62 | def reverse_bytes(data):
63 | data = bytearray(data)
64 | len_data = len(data)
65 | for i in range(len_data // 2):
66 | # Swap bytes
67 | temp = data[i]
68 | data[i] = data[len_data - 1 - i]
69 | data[len_data - 1 - i] = temp
70 | return bytes(data)
71 |
72 | def hex_to_be(hex):
73 | bin_be = swap_endian_words(hex)
74 | bin_be = reverse_bytes(bin_be)
75 | return bin_be
76 |
77 | class Job:
78 | def __init__(
79 | self,
80 | job_id,
81 | prevhash,
82 | coinb1,
83 | coinb2,
84 | merkle_branches,
85 | version,
86 | nbits,
87 | ntime,
88 | extranonce1,
89 | extranonce2_size,
90 | max_nonce=0x7fffffff,
91 | ):
92 | # Job parts from the mining.notify command
93 | self._job_id = job_id
94 | self._prevhash = prevhash
95 | self._coinb1 = coinb1
96 | self._coinb2 = coinb2
97 | self._merkle_branches = [ b for b in merkle_branches ]
98 | self._version = version
99 | self._nbits = nbits
100 | self._ntime = ntime
101 |
102 | self._max_nonce = max_nonce
103 |
104 | # Job information needed to mine from mining.subsribe
105 | self._extranonce1 = extranonce1
106 | self._extranonce2_size = extranonce2_size
107 |
108 | # choose random extranonce
109 | extranonce2 = random.randint(0, 2**31-1)
110 | self.set_extranonce2(extranonce2)
111 |
112 | def _limit_extranonce2(self, extranonce2):
113 | # Convert extranonce2 to hex
114 | hex_extranonce2 = int_to_hex32(extranonce2)
115 |
116 | # Ensure the hex string length is twice extranonce2_size
117 | hex_extranonce2 = hex_extranonce2[:2 * self._extranonce2_size].zfill(2 * self._extranonce2_size)
118 |
119 | return hex_extranonce2
120 |
121 | def set_extranonce2(self, extranonce2):
122 | self._extranonce2 = self._limit_extranonce2(extranonce2)
123 |
124 | coinbase_bin = binascii.unhexlify(self._coinb1) + binascii.unhexlify(self._extranonce1) + binascii.unhexlify(self._extranonce2) + binascii.unhexlify(self._coinb2)
125 | coinbase_hash_bin = sha256d(coinbase_bin)
126 |
127 | # save coinbase_hex for verification
128 | self.coinbase_hex = binascii.hexlify(coinbase_bin).decode('utf-8')
129 |
130 | self._merkle_root_bin = coinbase_hash_bin
131 | for branch in self._merkle_branches:
132 | self._merkle_root_bin = sha256d(self._merkle_root_bin + binascii.unhexlify(branch))
133 |
134 | self._merkle_root = binascii.hexlify(swap_endian_words(binascii.hexlify(self._merkle_root_bin).decode('utf8'))).decode('utf8')
135 |
136 |
137 |
138 | def to_dict(self):
139 | # Convert object to a dictionary
140 | return {
141 | "job_id": self._job_id,
142 | "prevhash": self._prevhash,
143 | "coinb1": self._coinb1,
144 | "coinb2": self._coinb2,
145 | "merkle_branches": self._merkle_branches,
146 | "version": self._version,
147 | "nbits": self._nbits,
148 | "ntime": self._ntime,
149 | "extranonce1": self._extranonce1,
150 | "extranonce2_size": self._extranonce2_size,
151 | # You might need to convert binary data to a string format
152 | "extranonce2": self._extranonce2,
153 | "merkle_root": self._merkle_root
154 | }
155 |
156 | @classmethod
157 | def from_dict(cls, data):
158 | # Create a new instance from a dictionary
159 | return cls(
160 | data["job_id"],
161 | data["prevhash"],
162 | data["coinb1"],
163 | data["coinb2"],
164 | data["merkle_branches"],
165 | data["version"],
166 | data["nbits"],
167 | data["ntime"],
168 | data["extranonce1"],
169 | data["extranonce2_size"],
170 | )
171 |
172 | def to_json(self):
173 | # Serialize to JSON
174 | return json.dumps(self.to_dict(), indent=4)
175 |
176 | @classmethod
177 | def from_json(cls, json_str):
178 | # Deserialize from JSON
179 | data = json.loads(json_str)
180 | return cls.from_dict(data)
181 |
182 | def deserialize_coinbase(self):
183 | hex_string = self.coinbase_hex
184 | # Helper function to read variable integer (CompactSize)
185 | def read_varint(hex_data):
186 | size = int(hex_data[:2], 16)
187 | if size < 0xfd:
188 | return size, hex_data[2:]
189 | if size == 0xfd:
190 | return int(hex_data[2:6], 16), hex_data[6:]
191 | if size == 0xfe:
192 | return int(hex_data[2:10], 16), hex_data[10:]
193 | if size == 0xff:
194 | return int(hex_data[2:18], 16), hex_data[18:]
195 |
196 | def decode_script_number(buffer, max_length=4, minimal=True):
197 | length = len(buffer)
198 | if length == 0:
199 | return 0
200 | if length > max_length:
201 | raise TypeError('Script number overflow')
202 | if minimal:
203 | if (buffer[-1] & 0x7f) == 0:
204 | if length <= 1 or (buffer[-2] & 0x80) == 0:
205 | raise ValueError('Non-minimally encoded script number')
206 |
207 | # 32-bit / 24-bit / 16-bit / 8-bit
208 | result = 0
209 | for i in range(length):
210 | result |= buffer[i] << (8 * i)
211 |
212 | if buffer[-1] & 0x80:
213 | return -(result & ~(0x80 << (8 * (length - 1))))
214 | return result
215 |
216 | # Helper function to convert little endian hex to int
217 | def le_hex_to_int(hex_data):
218 | return int.from_bytes(bytes.fromhex(hex_data), 'little')
219 |
220 | # Cursor to keep track of position
221 | cursor = 0
222 |
223 | # Deserialize the transaction
224 | tx = {}
225 |
226 | # Version
227 | tx['version'] = le_hex_to_int(hex_string[cursor:cursor + 8])
228 | cursor += 8
229 |
230 | # Input Count
231 | input_count, hex_string = read_varint(hex_string[cursor:])
232 | cursor = 0 # reset cursor as hex_string is now shorter
233 | tx['input_count'] = input_count
234 |
235 | # Inputs
236 | tx['inputs'] = []
237 | for _ in range(input_count):
238 | input = {}
239 |
240 | # Previous Output Hash
241 | input['previous_output_hash'] = hex_string[cursor:cursor + 64]
242 | cursor += 64
243 |
244 | # Previous Output Index
245 | input['previous_output_index'] = hex_string[cursor:cursor + 8]
246 | cursor += 8
247 |
248 | # Coinbase Data Size
249 | coinbase_size, hex_string = read_varint(hex_string[cursor:])
250 | cursor = 0 # reset cursor as hex_string is now shorter
251 | input['coinbase_size'] = coinbase_size
252 |
253 | # Coinbase Data
254 | input['coinbase_data'] = hex_string[cursor:cursor + coinbase_size * 2]
255 | cursor += coinbase_size * 2
256 |
257 | # extract blocknumber
258 | if tx['version'] == 2:
259 | coinbase_data_bytes = binascii.unhexlify(input['coinbase_data'])
260 | height_num_bytes = coinbase_data_bytes[0]
261 | tx['height'] = decode_script_number(coinbase_data_bytes[1:1+height_num_bytes])
262 | else:
263 | tx['height'] = None
264 |
265 | # Sequence
266 | input['sequence'] = hex_string[cursor:cursor + 8]
267 | cursor += 8
268 |
269 | tx['inputs'].append(input)
270 |
271 | # Output Count
272 | output_count, hex_string = read_varint(hex_string[cursor:])
273 | cursor = 0 # reset cursor as hex_string is now shorter
274 | tx['output_count'] = output_count
275 |
276 | # Outputs
277 | tx['outputs'] = []
278 | for _ in range(output_count):
279 | output = {}
280 |
281 | # Value
282 | output['value'] = le_hex_to_int(hex_string[cursor:cursor + 16])
283 | cursor += 16
284 |
285 | # Script Length
286 | script_length, hex_string = read_varint(hex_string[cursor:])
287 | cursor = 0 # reset cursor as hex_string is now shorter
288 | output['script_length'] = script_length
289 |
290 | # Script
291 | output['script'] = hex_string[cursor:cursor + script_length * 2]
292 | cursor += script_length * 2
293 |
294 | tx['outputs'].append(output)
295 |
296 | # Locktime
297 | tx['locktime'] = le_hex_to_int(hex_string[cursor:cursor + 8])
298 |
299 | return tx
300 |
301 |
302 | class BitcoinNetwork(Enum):
303 | MAINNET = 1
304 | TESTNET = 2
305 | REGTEST = 3
306 | UNKNOWN = 4
307 |
308 | def detect_btc_network(address):
309 | if address.startswith("1") or address.startswith("3") or address.startswith("bc1"):
310 | return BitcoinNetwork.MAINNET
311 | elif address.startswith("m") or address.startswith("n") or address.startswith("2") or address.startswith("tb1"):
312 | return BitcoinNetwork.TESTNET
313 | elif address.startswith("bcrt1"):
314 | return BitcoinNetwork.REGTEST
315 | else:
316 | return BitcoinNetwork.UNKNOWN
317 |
318 | def int_to_hex32(v):
319 | return f"{v:08x}"
320 |
321 | def int_to_hex256(v):
322 | return f"{v:064x}"
323 |
324 | def int_to_hex16(v):
325 | return f"{v:04x}"
326 |
327 | def int_to_bytes32(i):
328 | return struct.pack('> 8) | (num << 8)) & 0xFFFF
351 |
352 | def calculate_target(difficulty):
353 | if difficulty < 0:
354 | raise Exception('Difficulty must be non-negative')
355 |
356 | # Compute target
357 | if difficulty == 0:
358 | target = 2 ** 256 - 1
359 | else:
360 | target = min(int((0xffff0000 * 2 ** (256 - 64) + 1) / difficulty - 1 + 0.5), 2 ** 256 - 1)
361 |
362 | return target
363 |
364 | def calculate_difficulty_from_hash(hash_hex):
365 | # Convert hash from hex to integer
366 | hash_int = int(hash_hex, 16)
367 |
368 | # Difficulty 1 Target
369 | diff1_target = 0xffff0000 * 2 ** (256 - 64)
370 |
371 | # Calculate difficulty
372 | difficulty = diff1_target / hash_int
373 |
374 | return difficulty
375 |
376 | def nbits_to_target(nbits):
377 | nbits = int(nbits, 16)
378 |
379 | # Split nbits into the exponent and coefficient
380 | exponent = nbits >> 24
381 | coefficient = nbits & 0xffffff
382 |
383 | # Convert to 256-bit target
384 | target = coefficient << (8 * (exponent - 3))
385 |
386 | # Format target as a 64-character hexadecimal string
387 | target_hex = format(target, '064x')
388 |
389 | leading_zeros = count_leading_zeros(target_hex)
390 |
391 | return target_hex, leading_zeros
392 |
393 | def verify_work(difficulty, job, result):
394 | # print(job.to_json())
395 | # print(json.dumps(result, indent=4))
396 |
397 | if job._job_id != result['job_id']:
398 | raise Exception("job_ids mismatch")
399 |
400 | header = swap_endian_word(int_to_hex32(int(job._version, 16) ^ int(result['version'], 16)))
401 | header += swap_endian_words(job._prevhash)
402 | header += swap_endian_words(job._merkle_root)
403 | header += swap_endian_words(result['ntime'])
404 | header += swap_endian_words(job._nbits)
405 | header += swap_endian_words(result['nonce'])
406 | logging.debug("header: %s", bytearray(header).hex())
407 |
408 | target = int_to_hex256(calculate_target(difficulty))
409 |
410 | # Hash the header twice using SHA-256.
411 | hash_buffer = hashlib.sha256(header).digest()
412 | hash_result = hashlib.sha256(hash_buffer).digest()
413 | hash_str = bytearray(hash_result).hex()
414 | hash_be = swap_endianness_32bit(hex_to_be(hash_str))
415 | hash_str = bytearray(hash_be).hex()
416 | leading_zeros = count_leading_zeros(hash_str)
417 |
418 | return hash_str < target, hash_str, leading_zeros
419 |
420 | def get_network_target(difficulty):
421 | target = int_to_hex256(calculate_target(difficulty))
422 | leading_zeros = count_leading_zeros(target)
423 | return target, leading_zeros
424 |
425 |
426 | def decode_bech32(address):
427 | hrp = address.split('1')[0]
428 |
429 | # Decoding the Bech32 address to get the witness version and witness program
430 | hrp, decoded_data = bech32.decode(hrp, address)
431 | if decoded_data is None:
432 | raise ValueError("Invalid Bech32 address")
433 |
434 | return bytes(decoded_data)
435 |
436 | def get_scriptpubkey_from_bech32(address):
437 | # Decoding the Bech32 address
438 | decoded_data = decode_bech32(address)
439 | # The first byte is the witness version
440 | witness_version = decoded_data[0]
441 | # The rest is the witness program (hash)
442 | witness_program = decoded_data[1:]
443 | # Constructing the scriptPubKey
444 | return witness_version, witness_program
445 |
446 | def verify_solo(btc_address, coinb):
447 | if coinb['output_count'] < 1:
448 | raise Exception("no outputs found")
449 |
450 | witness_version, witness_program = get_scriptpubkey_from_bech32(btc_address)
451 | scriptpubkey = binascii.hexlify(witness_program).decode('utf-8')
452 |
453 | value_total = 0
454 | value_our = 0
455 | for i, output in enumerate(coinb['outputs']):
456 | if i == 0:
457 | #print(output['script'])
458 | if scriptpubkey not in output['script']:
459 | raise Exception("script pubkey of our address not found")
460 | value_our += output['value']
461 |
462 | value_total += output['value']
463 |
464 | if value_our != value_total:
465 | raise Exception("not getting all rewards! {} vs {}".format(value_our, value_total))
466 |
467 | return True, value_our, value_total
468 |
469 |
470 |
471 | if __name__ == '__main__':
472 | logging.basicConfig(level=logging.DEBUG,
473 | format='%(asctime)s - %(levelname)s - %(message)s')
474 | difficulty = 0.0032
475 | job_dict = {
476 | "job_id": "10",
477 | "prevhash": "b82cc386d81b16238daa4906ae4fc0599d9d145347bacdac0000007b00000000",
478 | "coinb1": "02000000010000000000000000000000000000000000000000000000000000000000000000ffffffff1903cac3265c7075626c69632d706f6f6c5c",
479 | "coinb2": "ffffffff02bdcd1200000000001976a914fbff95b4e35aca918d26e157392ea1643a2dc28388ac0000000000000000266a24aa21a9edac9132f342173ab4e3cfe34f393b1ce7d46226c100426d02667fc7d89dc7942f00000000",
480 | "merkle_branches": [
481 | "2c4b311ff57d11518cab724b93286f33dd441391e2b63d2a19c901200390ce91",
482 | "1265661d1c0e2839b78e2d65eaadf04941b7fffd27722f4059bdd3c617dca326",
483 | "7956bf0ecaf8a0a797e1a9517a535f9b1f076ca0e4b5db460a0bef4c0c105125",
484 | "ea2569f34f3189ca7f4c6f4c1b856551e8a94bae47ee6fdeb6eae2c144fd333a"
485 | ],
486 | "version": "20000000",
487 | "nbits": "1924f3f1",
488 | "ntime": "6562e8e6",
489 | "extranonce1": "44f454dd",
490 | "extranonce2_size": 4,
491 | "extranonce2": "0x6eaaf700",
492 | "merkle_root": "f7614f139a8c70b1ed6bc55e29a242418f22ba99d2efdf901366fc4c4c5a358c"
493 | }
494 | result = {
495 | "job_id": "10",
496 | "extranonce2": "0x6eaaf700",
497 | "ntime": "6562e8e6",
498 | "nonce": "018ced64",
499 | "version": "6072000"
500 | }
501 | job = Job.from_dict(job_dict)
502 |
503 | print(verify_work(job, result))
504 |
--------------------------------------------------------------------------------
/piaxe/bm1366.py:
--------------------------------------------------------------------------------
1 | # translated from: https://github.com/skot/ESP-Miner
2 | import struct
3 | import serial
4 |
5 | import time
6 | import math
7 | import logging
8 | import json
9 | from .crc_functions import crc5, crc16_false
10 | from . import utils
11 | import binascii
12 |
13 | TYPE_JOB = 0x20
14 | TYPE_CMD = 0x40
15 |
16 | JOB_PACKET = 0
17 | CMD_PACKET = 1
18 |
19 | GROUP_SINGLE = 0x00
20 | GROUP_ALL = 0x10
21 |
22 | CMD_JOB = 0x01
23 |
24 | CMD_SETADDRESS = 0x00
25 | CMD_WRITE = 0x01
26 | CMD_READ = 0x02
27 | CMD_INACTIVE = 0x03
28 |
29 | RESPONSE_CMD = 0x00
30 | RESPONSE_JOB = 0x80
31 |
32 | SLEEP_TIME = 20
33 | FREQ_MULT = 25.0
34 |
35 | CLOCK_ORDER_CONTROL_0 = 0x80
36 | CLOCK_ORDER_CONTROL_1 = 0x84
37 | ORDERED_CLOCK_ENABLE = 0x20
38 | CORE_REGISTER_CONTROL = 0x3C
39 | PLL3_PARAMETER = 0x68
40 | FAST_UART_CONFIGURATION = 0x28
41 | TICKET_MASK = 0x14
42 | MISC_CONTROL = 0x18
43 |
44 |
45 | class AsicResult:
46 | # Define the struct format corresponding to the C structure.
47 | # < for little-endian, B for uint8_t, I for uint32_t, H for uint16_t
48 | _struct_format = '<2BIBBHB'
49 |
50 | def __init__(self):
51 | self.preamble = [0x00, 0x00]
52 | self.nonce = 0
53 | self.midstate_num = 0
54 | self.job_id = 0
55 | self.version = 0
56 | self.crc = 0
57 |
58 | @classmethod
59 | def from_bytes(cls, data):
60 | # Unpack the data using the struct format.
61 | unpacked_data = struct.unpack(cls._struct_format, data)
62 |
63 | # Create an instance of the AsicResult class.
64 | result = cls()
65 |
66 | # Assign the unpacked data to the class fields.
67 | result.preamble = list(unpacked_data[0:2])
68 | result.nonce = unpacked_data[2]
69 | result.midstate_num = unpacked_data[3]
70 | result.job_id = unpacked_data[4]
71 | result.version = unpacked_data[5]
72 | result.crc = unpacked_data[6]
73 |
74 | return result
75 |
76 | def print(self):
77 | print("AsicResult:")
78 | print(f" preamble: {self.preamble}")
79 | print(f" nonce: {self.nonce:08x}")
80 | print(f" midstate_num: {self.midstate_num}")
81 | print(f" job_id: {self.job_id:02x}")
82 | print(f" version: {self.version:04x}")
83 | print(f" crc: {self.crc:02x}")
84 |
85 | class WorkRequest:
86 | def __init__(self):
87 | self.time = None
88 | self.id = int(0)
89 | self.starting_nonce = int(0)
90 | self.nbits = int(0)
91 | self.ntime = int(0)
92 | self.merkle_root = bytearray([])
93 | self.prev_block_hash = bytearray([])
94 | self.version = int(0)
95 |
96 | def create_work(self, id, starting_nonce, nbits, ntime, merkle_root, prev_block_hash, version):
97 | self.time = time.time()
98 | self.id = id
99 | self.starting_nonce = starting_nonce
100 | self.nbits = nbits
101 | self.ntime = ntime
102 | self.merkle_root = merkle_root
103 | self.prev_block_hash = prev_block_hash
104 | self.version = version
105 |
106 | def print(self):
107 | print("WorkRequest:")
108 | print(f" id: {self.id:02x}")
109 | print(f" starting_nonce: {self.starting_nonce:08x}")
110 | print(f" nbits: {self.nbits:08x}")
111 | print(f" ntime: {self.ntime:08x}")
112 | print(f" merkle_root: {self.merkle_root.hex()}")
113 | print(f" prev_block_hash: {self.prev_block_hash.hex()}")
114 | print(f" version: {self.version:08x}")
115 |
116 |
117 |
118 | class TaskResult:
119 | def __init__(self, job_id, nonce, rolled_version):
120 | self.job_id = job_id
121 | self.nonce = nonce
122 | self.rolled_version = rolled_version
123 |
124 | class ClockManager:
125 | def __init__(self, bm1366, clocks, num_asics=1):
126 | self.bm1366 = bm1366
127 | self.num_asics = num_asics
128 | if isinstance(clocks, list):
129 | self.clocks = clocks
130 | else:
131 | self.clocks = [clocks for i in range(0, self.num_asics)]
132 |
133 | def set_clock(self, id, clock):
134 | logging.info(f"setting clock of {id} to {clock}")
135 | try:
136 | self.bm1366.send_hash_frequency2(id, clock)
137 | # all
138 | if id == -1:
139 | self.clocks = [clock for i in range(0, self.num_asics)]
140 | else:
141 | self.clocks[id] = clock
142 | except Exception as e:
143 | raise(e)
144 |
145 | def get_clock(self, id):
146 | if id == -1:
147 | return self.clocks
148 | else:
149 | return self.clocks[id]
150 |
151 | def do_frequency_ramp_up(self, frequency):
152 | start = current = 56.25
153 | step = 6.25
154 | target= frequency
155 |
156 | self.set_clock(-1, start)
157 | while current < target:
158 | next_step = min(step, target-current)
159 | current += next_step
160 | self.set_clock(-1, current)
161 | time.sleep(0.100)
162 |
163 | class BM1366:
164 | def __init__(self):
165 | self.chip_id_response="aa5513660000"
166 |
167 | def ll_init(self, _serial_tx_func, _serial_rx_func, _reset_func):
168 | self.serial_tx_func = _serial_tx_func
169 | self.serial_rx_func = _serial_rx_func
170 | self.reset_func = _reset_func
171 |
172 |
173 | def send(self, header, data):
174 | packet_type = JOB_PACKET if header & TYPE_JOB else CMD_PACKET
175 | data_len = len(data)
176 | total_length = data_len + 6 if packet_type == JOB_PACKET else data_len + 5
177 |
178 | # Create a buffer
179 | buf = bytearray(total_length)
180 |
181 | # Add the preamble
182 | buf[0] = 0x55
183 | buf[1] = 0xAA
184 |
185 | # Add the header field
186 | buf[2] = header
187 |
188 | # Add the length field
189 | buf[3] = data_len + 4 if packet_type == JOB_PACKET else data_len + 3
190 |
191 | # Add the data
192 | buf[4:data_len+4] = data
193 |
194 | # Add the correct CRC type
195 | if packet_type == JOB_PACKET:
196 | crc16_total = crc16_false(buf[2:data_len+4])
197 | buf[4 + data_len] = (crc16_total >> 8) & 0xFF
198 | buf[5 + data_len] = crc16_total & 0xFF
199 | else:
200 | buf[4 + data_len] = crc5(buf[2:data_len+4])
201 |
202 | self.serial_tx_func(buf)
203 |
204 | def send_simple(self, data):
205 | self.serial_tx_func(data)
206 |
207 | def send_chain_inactive(self):
208 | self.send(TYPE_CMD | GROUP_ALL | CMD_INACTIVE, [0x00, 0x00])
209 |
210 | def set_chip_address(self, chipAddr):
211 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_SETADDRESS, [chipAddr, 0x00])
212 |
213 | def send_hash_frequency2(self, id, target_freq, max_diff = 0.001):
214 | freqbuf = bytearray([0x00, 0x08, 0x40, 0xA0, 0x02, 0x41]) # freqbuf - pll0_parameter
215 | postdiv_min = 255
216 | postdiv2_min = 255
217 | best = None
218 |
219 | for refdiv in range(2, 0, -1):
220 | for postdiv1 in range(7, 0, -1):
221 | for postdiv2 in range(7, 0, -1):
222 | fb_divider = round(target_freq / 25.0 * (refdiv * postdiv2 * postdiv1))
223 | newf = 25.0 * fb_divider / (refdiv * postdiv2 * postdiv1)
224 | if \
225 | 0xa0 <= fb_divider <= 0xef and \
226 | abs(target_freq - newf) < max_diff and \
227 | postdiv1 >= postdiv2 and \
228 | postdiv1 * postdiv2 < postdiv_min and \
229 | postdiv2 <= postdiv2_min:
230 |
231 | postdiv2_min = postdiv2
232 | postdiv_min = postdiv1 * postdiv2
233 | best = (refdiv, fb_divider, postdiv1, postdiv2, newf)
234 |
235 | if not best:
236 | raise Exception(f"didn't find PLL settings for target frequency {target_freq:.2f}")
237 |
238 | freqbuf[2] = 0x50 if best[1] * 25 / best[0] >= 2400 else 0x40
239 | freqbuf[3] = best[1]
240 | freqbuf[4] = best[0]
241 | freqbuf[5] = ((best[2] - 1) & 0xf) << 4 | (best[3] - 1) & 0xf
242 |
243 | if id != -1:
244 | freqbuf[0] = id*2
245 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, freqbuf)
246 | else:
247 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, freqbuf)
248 |
249 | logging.info(f"Setting Frequency to {target_freq:.2f}MHz ({best[4]:.2f})")
250 |
251 | return freqbuf
252 |
253 |
254 |
255 | def count_asic_chips(self):
256 | self.send(TYPE_CMD | GROUP_ALL | CMD_READ, [0x00, 0x00])
257 |
258 | chip_counter = 0
259 | while True:
260 | data = self.serial_rx_func(11, 5000)
261 |
262 | if data is None:
263 | break
264 |
265 | # only count chip id responses
266 | if self.chip_id_response not in binascii.hexlify(data).decode('utf8'):
267 | continue
268 |
269 | chip_counter += 1
270 |
271 | self.send(TYPE_CMD | GROUP_ALL | CMD_INACTIVE, [0x00, 0x00])
272 |
273 | return chip_counter
274 |
275 |
276 | def send_init(self, frequency, expected, chips_enabled = None):
277 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
278 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
279 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
280 |
281 | chip_counter = self.count_asic_chips()
282 |
283 | if chip_counter != expected:
284 | raise Exception(f"chips mismatch. expected: {expected}, actual: {chip_counter}")
285 |
286 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xa8, 0x00, 0x07, 0x00, 0x00])
287 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x18, 0xff, 0x0f, 0xc1, 0x00])
288 |
289 | for id in range(0, chip_counter):
290 | self.set_chip_address(id * 2)
291 |
292 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x3C, 0x80, 0x00, 0x85, 0x40])
293 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x3C, 0x80, 0x00, 0x80, 0x20])
294 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x14, 0x00, 0x00, 0x00, 0xFF])
295 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x54, 0x00, 0x00, 0x00, 0x03])
296 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x58, 0x02, 0x11, 0x11, 0x11])
297 |
298 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [0x00, 0x2c, 0x00, 0x7c, 0x00, 0x03])
299 |
300 | # change baud
301 | #self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x28, 0x11, 0x30, 0x02, 0x00])
302 |
303 | for id in range(0, chip_counter):
304 | if chips_enabled is not None and id not in chips_enabled:
305 | continue
306 |
307 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0xA8, 0x00, 0x07, 0x01, 0xF0])
308 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x18, 0xF0, 0x00, 0xC1, 0x00])
309 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x3C, 0x80, 0x00, 0x85, 0x40])
310 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x3C, 0x80, 0x00, 0x80, 0x20])
311 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x3C, 0x80, 0x00, 0x82, 0xAA])
312 | time.sleep(0.500)
313 |
314 | self.clock_manager = ClockManager(self, frequency, chip_counter)
315 | self.clock_manager.do_frequency_ramp_up(frequency)
316 |
317 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x10, 0x00, 0x00, 0x15, 0x1c])
318 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
319 |
320 | return chip_counter
321 |
322 |
323 | def request_chip_id(self):
324 | self.send_simple([0x55, 0xAA, 0x52, 0x05, 0x00, 0x00, 0x0A]) # chipid
325 |
326 |
327 | def send_read_address(self):
328 | self.send(TYPE_CMD | GROUP_ALL | CMD_READ, [0x00, 0x00])
329 |
330 | def reset(self):
331 | self.reset_func()
332 |
333 | def init(self, frequency, expected, chips_enabled = None):
334 | logging.info("Initializing BM1366")
335 |
336 | self.reset()
337 |
338 | return self.send_init(frequency, expected, chips_enabled)
339 |
340 | # Baud formula = 25M/((denominator+1)*8)
341 | # The denominator is 5 bits found in the misc_control (bits 9-13)
342 | def set_default_baud(self):
343 | # default divider of 26 (11010) for 115,749
344 | baudrate = [0x00, MISC_CONTROL, 0x00, 0x00, 0b01111010, 0b00110001]
345 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, baudrate, 6)
346 | return 115749
347 |
348 | def set_max_baud(self):
349 | # Log the setting of max baud (you would need to have a logging mechanism in place)
350 | logging.info("Setting max baud of 1000000")
351 |
352 | # divider of 0 for 3,125,000
353 | init8 = [0x55, 0xAA, 0x51, 0x09, 0x00, 0x28, 0x11, 0x30, 0x02, 0x00, 0x03]
354 | self.send_simple(init8, 11)
355 | return 1000000
356 |
357 | def _largest_power_of_two(self, n):
358 | # Finds the largest power of 2 less than or equal to n
359 | p = 1
360 | while p * 2 <= n:
361 | p *= 2
362 | return p
363 |
364 | def _reverse_bits(self, byte):
365 | # Reverses the bits in a byte
366 | return int('{:08b}'.format(byte)[::-1], 2)
367 |
368 | def set_job_difficulty_mask(self, difficulty):
369 | # Default mask of 256 diff
370 | job_difficulty_mask = [0x00, TICKET_MASK, 0b00000000, 0b00000000, 0b00000000, 0b11111111]
371 |
372 | # The mask must be a power of 2 so there are no holes
373 | # Correct: {0b00000000, 0b00000000, 0b11111111, 0b11111111}
374 | # Incorrect: {0b00000000, 0b00000000, 0b11100111, 0b11111111}
375 | # (difficulty - 1) if it is a pow 2 then step down to second largest for more hashrate sampling
376 | difficulty = self._largest_power_of_two(difficulty) - 1
377 |
378 | # convert difficulty into char array
379 | # Ex: 256 = {0b00000000, 0b00000000, 0b00000000, 0b11111111}, {0x00, 0x00, 0x00, 0xff}
380 | # Ex: 512 = {0b00000000, 0b00000000, 0b00000001, 0b11111111}, {0x00, 0x00, 0x01, 0xff}
381 | for i in range(4):
382 | value = (difficulty >> (8 * i)) & 0xFF
383 | # The char is read in backwards to the register so we need to reverse them
384 | # So a mask of 512 looks like 0b00000000 00000000 00000001 1111111
385 | # and not 0b00000000 00000000 10000000 1111111
386 | job_difficulty_mask[5 - i] = self._reverse_bits(value)
387 |
388 | # Log the setting of job ASIC mask (replace with your logging method)
389 | logging.info("Setting job ASIC mask to %d", difficulty)
390 |
391 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, job_difficulty_mask)
392 |
393 | def send_work(self, t: WorkRequest):
394 | job_packet_format = '> 1
444 |
445 | def get_job_id(self, job_id):
446 | # job-IDs: 00, 18, 30, 48, 60, 78, 10, 28, 40, 58, 70, 08, 20, 38, 50, 68
447 | return (job_id * 24) & 0x7f
448 |
449 | def clear_serial_buffer(self):
450 | while True:
451 | data = self.serial_rx_func(11, 5000)
452 | if data is None:
453 | return
454 |
455 | def send_init(self, frequency, expected, chips_enabled = None):
456 | self.clear_serial_buffer()
457 |
458 | # enable and set version rolling mask to 0xFFFF
459 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
460 | # enable and set version rolling mask to 0xFFFF again
461 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
462 | # enable and set version rolling mask to 0xFFFF again
463 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
464 | # enable and set version rolling mask to 0xFFFF again
465 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
466 |
467 | chip_counter = self.count_asic_chips()
468 |
469 | if chip_counter != expected:
470 | raise Exception(f"chips mismatch. expected: {expected}, actual: {chip_counter}")
471 |
472 | # enable and set version rolling mask to 0xFFFF again
473 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
474 | # Reg_A8
475 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xa8, 0x00, 0x07, 0x00, 0x00])
476 | # Misc Control
477 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x18, 0xff, 0x0f, 0xc1, 0x00])
478 |
479 | for id in range(0, chip_counter):
480 | self.set_chip_address(id * 2)
481 |
482 | # Core Register Control
483 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x3C, 0x80, 0x00, 0x8b, 0x00])
484 | # Core Register Control
485 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x3C, 0x80, 0x00, 0x80, 0x18])
486 | # set ticket mask
487 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x14, 0x00, 0x00, 0x00, 0xFF])
488 | # Analog Mux Control
489 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x54, 0x00, 0x00, 0x00, 0x03])
490 | # Set the IO Driver Strength
491 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x58, 0x02, 0x11, 0x11, 0x11])
492 |
493 | for id in range(0, chip_counter):
494 | if chips_enabled is not None and id not in chips_enabled:
495 | continue
496 |
497 | # Reg_A8
498 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0xA8, 0x00, 0x07, 0x01, 0xF0])
499 | # Misc Control
500 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x18, 0xF0, 0x00, 0xC1, 0x00])
501 | # Core Register Control
502 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x3C, 0x80, 0x00, 0x8b, 0x00])
503 | # Core Register Control
504 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x3C, 0x80, 0x00, 0x80, 0x18])
505 | # Core Register Control
506 | self.send(TYPE_CMD | GROUP_SINGLE | CMD_WRITE, [id*2, 0x3C, 0x80, 0x00, 0x82, 0xAA])
507 | time.sleep(0.500)
508 |
509 | self.clock_manager = ClockManager(self, frequency, chip_counter)
510 | self.clock_manager.do_frequency_ramp_up(frequency)
511 |
512 | # change baud
513 | #self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x28, 0x11, 0x30, 0x02, 0x00])
514 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0x10, 0x00, 0x00, 0x15, 0xa4])
515 | self.send(TYPE_CMD | GROUP_ALL | CMD_WRITE, [0x00, 0xA4, 0x90, 0x00, 0xFF, 0xFF])
516 |
517 | return chip_counter
518 |
519 |
520 | def request_temps(self):
521 | self.send_simple([0x55, 0xAA, 0x51, 0x09, 0x00, 0xB0, 0x80, 0x00, 0x00, 0x00, 0x0F])
522 | self.send_simple([0x55, 0xAA, 0x51, 0x09, 0x00, 0xB0, 0x00, 0x02, 0x00, 0x00, 0x1F])
523 | self.send_simple([0x55, 0xAA, 0x51, 0x09, 0x00, 0xB0, 0x01, 0x02, 0x00, 0x00, 0x16])
524 | self.send_simple([0x55, 0xAA, 0x51, 0x09, 0x00, 0xB0, 0x10, 0x02, 0x00, 0x00, 0x1B])
525 | self.send_simple([0x55, 0xAA, 0x52, 0x05, 0x00, 0xB4, 0x1B])
526 |
527 | def try_get_temp_from_response(self, response : AsicResult):
528 | # temp response has this pattern
529 | # aa55 8000080c 00 b4 0000 1a
530 | if response.nonce & 0x0000ffff == 0x00000080 and response.job_id == 0xb4:
531 | value = (response.nonce & 0xff000000) >> 24 | (response.nonce & 0x00ff0000) >> 8
532 | id = response.midstate_num // 2
533 |
534 | return (value, id)
535 |
536 | return (None, None)
537 |
--------------------------------------------------------------------------------
/pyminer.py:
--------------------------------------------------------------------------------
1 | # The MIT License (MIT)
2 | #
3 | # Copyright (c) 2014 Richard Moore
4 | #
5 | # Permission is hereby granted, free of charge, to any person obtaining a copy
6 | # of this software and associated documentation files (the "Software"), to deal
7 | # in the Software without restriction, including without limitation the rights
8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | # copies of the Software, and to permit persons to whom the Software is
10 | # furnished to do so, subject to the following conditions:
11 | #
12 | # The above copyright notice and this permission notice shall be included in
13 | # all copies or substantial portions of the Software.
14 | #
15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | # THE SOFTWARE.
22 |
23 | import json, socket, sys, threading, time
24 | from urllib import parse as urlparse
25 | from shared import shared
26 | #from cpu_miner import miner
27 | from piaxe import miner
28 | #import cpu_miner
29 | import logging
30 | import piaxe
31 | import signal
32 | import os
33 | import datetime
34 | import yaml
35 |
36 | # Subscription state
37 | class Subscription(object):
38 | '''Encapsulates the Subscription state from the JSON-RPC server'''
39 |
40 | _max_nonce = 0x7fffffff
41 |
42 | # Subclasses should override this
43 | def ProofOfWork(header):
44 | raise Exception('Do not use the Subscription class directly, subclass it')
45 |
46 | class StateException(Exception): pass
47 |
48 | def __init__(self):
49 | self._id = None
50 | self._difficulty = None
51 | self._extranonce1 = None
52 | self._extranonce2_size = None
53 | self._worker_name = None
54 |
55 | self._mining_thread = None
56 |
57 | # Accessors
58 | id = property(lambda s: s._id)
59 | worker_name = property(lambda s: s._worker_name)
60 |
61 | difficulty = property(lambda s: s._difficulty)
62 |
63 | extranonce1 = property(lambda s: s._extranonce1)
64 | extranonce2_size = property(lambda s: s._extranonce2_size)
65 |
66 |
67 | def set_worker_name(self, worker_name):
68 | if self._worker_name:
69 | raise self.StateException('Already authenticated as %r (requesting %r)' % (self._username, username))
70 |
71 | self._worker_name = worker_name
72 |
73 |
74 | def set_subscription(self, subscription_id, extranonce1, extranonce2_size):
75 | if self._id is not None:
76 | raise self.StateException('Already subscribed')
77 |
78 | self._id = subscription_id
79 | self._extranonce1 = extranonce1
80 | self._extranonce2_size = extranonce2_size
81 |
82 |
83 | def create_job(self, job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime):
84 | '''Creates a new Job object populated with all the goodness it needs to mine.'''
85 |
86 | if self._id is None:
87 | raise self.StateException('Not subscribed')
88 |
89 | return piaxe.miner.Job(
90 | job_id=job_id,
91 | prevhash=prevhash,
92 | coinb1=coinb1,
93 | coinb2=coinb2,
94 | merkle_branches=merkle_branches,
95 | version=version,
96 | nbits=nbits,
97 | ntime=ntime,
98 | extranonce1=self._extranonce1,
99 | extranonce2_size=self.extranonce2_size,
100 | max_nonce=self._max_nonce,
101 | )
102 |
103 | def __str__(self):
104 | return '' % (self.id, self.extranonce1, self.extranonce2_size, self.difficulty, self.worker_name)
105 |
106 | class SubscriptionSHA256D(Subscription):
107 | '''Subscription for Double-SHA256-based coins, like Bitcoin.'''
108 |
109 | ProofOfWork = shared.sha256d
110 |
111 | class SimpleJsonRpcClient(object):
112 | '''Simple JSON-RPC client.
113 |
114 | To use this class:
115 | 1) Create a sub-class
116 | 2) Override handle_reply(self, request, reply)
117 | 3) Call connect(socket)
118 |
119 | Use self.send(method, params) to send JSON-RPC commands to the server.
120 |
121 | A new thread is created for listening to the connection; so calls to handle_reply
122 | are synchronized. It is safe to call send from withing handle_reply.
123 | '''
124 |
125 | class ClientException(Exception): pass
126 |
127 | class RequestReplyException(Exception):
128 | def __init__(self, message, reply, request = None):
129 | Exception.__init__(self, message)
130 | self._reply = reply
131 | self._request = request
132 |
133 | request = property(lambda s: s._request)
134 | reply = property(lambda s: s._reply)
135 |
136 | class RequestReplyWarning(RequestReplyException):
137 | '''Sub-classes can raise this to inform the user of JSON-RPC server issues.'''
138 | pass
139 |
140 | def __init__(self):
141 | self._socket = None
142 | self._lock = threading.RLock()
143 | self._rpc_thread = None
144 | self._message_id = 1
145 | self._requests = dict()
146 | self.error_event = threading.Event()
147 |
148 | def stop(self):
149 | self.error_event.set()
150 |
151 | try:
152 | if self._socket:
153 | self._socket.shutdown(socket.SHUT_RDWR)
154 | self._socket.close()
155 | except OSError as e:
156 | print(f"Error when closing socket: {e}")
157 |
158 | if self._rpc_thread:
159 | logging.debug("joining rpc_thread")
160 | self._rpc_thread.join()
161 | logging.debug("joining done")
162 | self._rpc_thread = None
163 |
164 | def _handle_incoming_rpc(self):
165 | data = ""
166 | while not self.error_event.is_set():
167 | try:
168 | # Get the next line if we have one, otherwise, read and block
169 | if '\n' in data:
170 | (line, data) = data.split('\n', 1)
171 | else:
172 | try:
173 | chunk = self._socket.recv(1024)
174 | if not chunk:
175 | raise Exception("tcp connection closed ...")
176 | chunk = chunk.decode('utf-8')
177 | data += chunk
178 | continue
179 | except socket.timeout:
180 | # we have to handle it but we don't care actually
181 | continue
182 |
183 | if log_protocol:
184 | logging.debug('JSON-RPC Server > ' + line)
185 |
186 | # Parse the JSON
187 | try:
188 | reply = json.loads(line)
189 | except Exception as e:
190 | logging.error("JSON-RPC Error: Failed to parse JSON %r (skipping)" % line)
191 | continue
192 |
193 | try:
194 | request = None
195 | with self._lock:
196 | if 'id' in reply and reply['id'] in self._requests:
197 | request = self._requests[reply['id']]
198 | self.handle_reply(request = request, reply = reply)
199 | except self.RequestReplyWarning as e:
200 | output = str(e)
201 | if e.request:
202 | try:
203 | output += '\n ' + e.request
204 | except TypeError:
205 | output += '\n ' + str(e.request)
206 | output += '\n ' + str(e.reply)
207 | logging.error(output)
208 | except Exception as e:
209 | logging.error('Exception in RPC thread: %s' % str(e))
210 | self.error_event.set()
211 | logging.error("error flag set ... ending handle_incoming_rpc thread")
212 |
213 |
214 | def handle_reply(self, request, reply):
215 | # Override this method in sub-classes to handle a message from the server
216 | raise self.RequestReplyWarning('Override this method')
217 |
218 | def _send_message(self, message):
219 | ''' Internal method to send a message '''
220 | try:
221 | self._socket.send((message + '\n').encode('utf-8'))
222 | logging.debug("send successful")
223 | except Exception as e:
224 | logging.error("send failed: %s", e)
225 | self.error_event.set()
226 |
227 |
228 | def send(self, method, params, timeout=10):
229 | '''Sends a message to the JSON-RPC server with a timeout'''
230 |
231 | if not self._socket:
232 | raise self.ClientException('Not connected')
233 |
234 | request = dict(id=self._message_id, method=method, params=params)
235 | message = json.dumps(request)
236 |
237 | with self._lock:
238 | self._requests[self._message_id] = request
239 | self._message_id += 1
240 |
241 | # Create a thread to send the message
242 | sender_thread = threading.Thread(target=self._send_message, args=(message,))
243 | sender_thread.start()
244 | sender_thread.join(timeout)
245 |
246 | if sender_thread.is_alive():
247 | # Handle the timeout situation
248 | logging.error("Timeout occurred in send method")
249 | self.error_event.set()
250 | return False
251 |
252 | # If here, the send operation completed within the timeout
253 | if log_protocol:
254 | logging.debug('JSON-RPC Server < ' + message)
255 |
256 | return True
257 |
258 |
259 |
260 | def mining_submit(self, result):
261 | params = [ self._subscription.worker_name ] + [ result[k] for k in ('job_id', 'extranonce2', 'ntime', 'nonce', 'version') ]
262 | try:
263 | ret = self.send(method = 'mining.submit', params = params)
264 | if not ret:
265 | raise Exception("mining.submit failed")
266 | return True
267 | except Exception as e:
268 | logging.error("mining.submit exception: %s", e)
269 | return False
270 |
271 | def connect(self, socket):
272 | '''Connects to a remote JSON-RPC server'''
273 |
274 | if self._rpc_thread:
275 | raise self.ClientException('Already connected')
276 |
277 | self._socket = socket
278 | # submit sometimes would hang forever
279 | self._socket.settimeout(10)
280 |
281 | self._rpc_thread = threading.Thread(target = self._handle_incoming_rpc)
282 | self._rpc_thread.daemon = True
283 | self._rpc_thread.start()
284 |
285 |
286 | # Miner client
287 | class Miner(SimpleJsonRpcClient):
288 | '''Simple mining client'''
289 |
290 | class MinerWarning(SimpleJsonRpcClient.RequestReplyWarning):
291 | def __init__(self, message, reply, request = None):
292 | SimpleJsonRpcClient.RequestReplyWarning.__init__(self, 'Mining Sate Error: ' + message, reply, request)
293 |
294 | class MinerAuthenticationException(SimpleJsonRpcClient.RequestReplyException): pass
295 |
296 | def __init__(self, url, username, password, miner, suggest_difficulty):
297 | SimpleJsonRpcClient.__init__(self)
298 |
299 | self._url = url
300 | self._username = username
301 | self._password = password
302 | self._suggest_difficulty = suggest_difficulty
303 |
304 | self._subscription = SubscriptionSHA256D()
305 |
306 | self._job = None
307 |
308 | self._miner = miner
309 | self._miner.set_submit_callback(self.mining_submit)
310 |
311 | self._accepted_shares = 0
312 |
313 | # Accessors
314 | url = property(lambda s: s._url)
315 | username = property(lambda s: s._username)
316 | password = property(lambda s: s._password)
317 |
318 |
319 | # Overridden from SimpleJsonRpcClient
320 | def handle_reply(self, request, reply):
321 |
322 | # New work, stop what we were doing before, and start on this.
323 | if reply.get('method') == 'mining.notify':
324 | if 'params' not in reply or len(reply['params']) != 9:
325 | raise self.MinerWarning('Malformed mining.notify message', reply)
326 |
327 | (job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime, clean_jobs) = reply['params']
328 |
329 | # Create the new job
330 | self._job = self._subscription.create_job(
331 | job_id = job_id,
332 | prevhash = prevhash,
333 | coinb1 = coinb1,
334 | coinb2 = coinb2,
335 | merkle_branches = merkle_branches,
336 | version = version,
337 | nbits = nbits,
338 | ntime = ntime
339 | )
340 | if clean_jobs:
341 | self._miner.clean_jobs()
342 |
343 | self._miner.start_job(self._job)
344 |
345 | logging.debug('New job: job_id=%s' % job_id)
346 |
347 | # The server wants us to change our difficulty (on all *future* work)
348 | elif reply.get('method') == 'mining.set_difficulty':
349 | if 'params' not in reply or len(reply['params']) != 1:
350 | raise self.MinerWarning('Malformed mining.set_difficulty message', reply)
351 |
352 | (difficulty, ) = reply['params']
353 | self._miner.set_difficulty(int(difficulty))
354 |
355 | logging.debug('Change difficulty: difficulty=%s' % difficulty)
356 |
357 | # This is a reply to...
358 | elif request:
359 |
360 | # ...subscribe; set-up the work and request authorization
361 | if request.get('method') == 'mining.subscribe':
362 | if 'result' not in reply or len(reply['result']) != 3:
363 | raise self.MinerWarning('Reply to mining.subscribe is malformed', reply, request)
364 |
365 | (tmp, extranonce1, extranonce2_size) = reply['result']
366 |
367 | if not isinstance(tmp, list) or len(tmp) < 1 or not isinstance(tmp[0], list) or not len(tmp[0]) == 2:
368 | raise self.MinerWarning('Reply to mining.subscribe is malformed', reply, request)
369 |
370 | notify_subscription_id = None
371 | for subscription in tmp:
372 | if subscription[0] == "mining.notify":
373 | notify_subscription_id = subscription[1]
374 |
375 | if notify_subscription_id == None:
376 | raise self.MinerWarning('Reply to mining.subscribe is malformed', reply, request)
377 |
378 | self._subscription.set_subscription(notify_subscription_id, extranonce1, extranonce2_size)
379 |
380 | logging.debug('Subscribed: subscription_id=%s' % notify_subscription_id)
381 |
382 | # Request authentication
383 | self.send(method = 'mining.authorize', params = [ self.username, self.password ])
384 |
385 | # ...authorize; if we failed to authorize, quit
386 | elif request.get('method') == 'mining.authorize':
387 | if 'result' not in reply or not reply['result']:
388 | raise self.MinerAuthenticationException('Failed to authenticate worker', reply, request)
389 |
390 | worker_name = request['params'][0]
391 | self._subscription.set_worker_name(worker_name)
392 |
393 | logging.debug('Authorized: worker_name=%s' % worker_name)
394 |
395 | # suggest_difficulty after authorize
396 | if self._suggest_difficulty:
397 | try:
398 | self.send(method = 'mining.suggest_difficulty', params = [self._suggest_difficulty])
399 | except:
400 | logging.warn("suggest_difficulty failed, mybe not supported by the stratum server")
401 |
402 | # ...submit; complain if the server didn't accept our submission
403 | elif request.get('method') == 'mining.submit':
404 | if 'result' not in reply or not reply['result']:
405 | logging.info('Share - Invalid')
406 | self._miner.not_accepted_callback()
407 | raise self.MinerWarning('Failed to accept submit', reply, request)
408 |
409 | self._accepted_shares += 1
410 | self._miner.accepted_callback()
411 | logging.info('Accepted shares: %d' % self._accepted_shares)
412 |
413 | # ??? *shrug*
414 | else:
415 | raise self.MinerWarning('Unhandled message', reply, request)
416 |
417 | # ??? *double shrug*
418 | else:
419 | raise self.MinerWarning('Bad message state', reply)
420 |
421 | def serve(self):
422 | '''Begins the miner. This method does not return.'''
423 |
424 | # Figure out the hostname and port
425 | url = urlparse.urlparse(self.url)
426 | hostname = url.hostname or ''
427 | port = url.port or 9333
428 |
429 | logging.info('Starting server on %s:%d' % (hostname, port))
430 | # clear error if there was any
431 | self.error_event.clear()
432 |
433 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
434 | sock.connect((hostname, port))
435 | self.connect(sock)
436 |
437 | self.send(method = 'mining.subscribe', params = [ f"{self._miner.get_user_agent()}" ])
438 |
439 | def shutdown(self):
440 | self._miner.shutdown()
441 |
442 |
443 |
444 | def setup_logging(log_level, log_filename):
445 | # Create a logger
446 | logger = logging.getLogger()
447 | logger.setLevel(log_level)
448 |
449 | # Create a formatter
450 | formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
451 |
452 | # Create a handler for logging to the console
453 | console_handler = logging.StreamHandler()
454 | console_handler.setFormatter(formatter)
455 | logger.addHandler(console_handler)
456 |
457 | # If a log filename is provided, also log to a file
458 | if log_filename:
459 | file_handler = logging.FileHandler(log_filename, mode='w')
460 | file_handler.setFormatter(formatter)
461 | logger.addHandler(file_handler)
462 |
463 |
464 | # make it accessible to the sigint handler
465 | pyminer = None
466 |
467 | def sigint_handler(signal_received, frame):
468 | print('SIGINT (Ctrl+C) captured, exiting gracefully')
469 | if pyminer is not None:
470 | pyminer.shutdown()
471 | os._exit(0)
472 |
473 | def handle_exception(exc_type, exc_value, exc_traceback):
474 | """
475 | Custom exception handler.
476 | Logs the exception information before the program exits.
477 | """
478 | if issubclass(exc_type, KeyboardInterrupt):
479 | # For user interrupts, log at a lower level and do not include traceback
480 | logging.warning("Application interrupted by user.")
481 | else:
482 | # Log all other exceptions with traceback
483 | logging.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
484 |
485 |
486 | # CLI for cpu mining
487 | if __name__ == '__main__':
488 | import argparse
489 |
490 | # Parse the command line
491 | parser = argparse.ArgumentParser(description="PyMiner is a Stratum CPU mining client. "
492 | "If you like this piece of software, please "
493 | "consider supporting its future development via "
494 | "donating to one of the addresses indicated in the "
495 | "README.md file")
496 |
497 | parser.add_argument('-o', '--url', help = 'stratum mining server url (eg: stratum+tcp://foobar.com:3333)')
498 | parser.add_argument('-u', '--user', dest = 'username', default = '', help = 'username for mining server', metavar = "USERNAME")
499 | parser.add_argument('-p', '--pass', dest = 'password', default = '', help = 'password for mining server', metavar = "PASSWORD")
500 |
501 | parser.add_argument('-O', '--userpass', help = 'username:password pair for mining server', metavar = "USERNAME:PASSWORD")
502 |
503 | parser.add_argument('-B', '--background', action ='store_true', help = 'run in the background as a daemon')
504 |
505 | parser.add_argument('-q', '--quiet', action ='store_true', help = 'suppress non-errors')
506 | parser.add_argument('-P', '--dump-protocol', dest = 'protocol', action ='store_true', help = 'show all JSON-RPC chatter')
507 | parser.add_argument('-d', '--debug', action ='store_true', help = 'show extra debug information')
508 |
509 | parser.add_argument('-l', '--log-file', dest = 'logfile', default='', help = 'log to file')
510 |
511 | parser.add_argument('-c', '--config', dest = 'config', default='config.yml', help="use configfile")
512 |
513 | options = parser.parse_args(sys.argv[1:])
514 |
515 | message = None
516 |
517 | # Get the username/password
518 | username = options.username
519 | password = options.password
520 |
521 | if options.userpass:
522 | if username or password:
523 | message = 'May not use -O/-userpass in conjunction with -u/--user or -p/--pass'
524 | else:
525 | try:
526 | (username, password) = options.userpass.split(':')
527 | except Exception as e:
528 | message = 'Could not parse username:password for -O/--userpass'
529 |
530 | # Was there an issue? Show the help screen and exit.
531 | if message:
532 | parser.print_help()
533 | print()
534 | print(message)
535 | sys.exit(1)
536 |
537 | log_level = logging.INFO
538 |
539 | if options.debug:
540 | log_level = logging.DEBUG
541 |
542 | setup_logging(log_level, options.logfile)
543 |
544 | # write all exceptions to log file
545 | sys.excepthook = handle_exception
546 |
547 | log_protocol = options.protocol
548 |
549 | # The want a daemon, give them a daemon
550 | if options.background:
551 | import os
552 | if os.fork() or os.fork(): sys.exit()
553 |
554 | signal.signal(signal.SIGINT, sigint_handler)
555 |
556 | username_parts = options.username.split(".")
557 | address = username_parts[0]
558 |
559 | network = shared.detect_btc_network(address)
560 | if network == shared.BitcoinNetwork.UNKNOWN:
561 | logging.error("unknown address type: %s", address)
562 |
563 | # Load configuration from YAML
564 | with open(options.config, 'r') as file:
565 | config = yaml.safe_load(file)
566 |
567 | suggest_difficulty = config.get('suggest_difficulty', None)
568 |
569 | piaxeMiner = miner.BM1366Miner(config, address, network)
570 | piaxeMiner.init()
571 |
572 | # Heigh-ho, heigh-ho, it's off to work we go...
573 |
574 | while True:
575 | try:
576 | pyminer = Miner(options.url, username, password, piaxeMiner, suggest_difficulty)
577 | pyminer.serve()
578 | except Exception as e:
579 | logging.error("exception in serve ... restarting client")
580 | pyminer.error_event.set()
581 |
582 | logging.debug("waiting for error")
583 | pyminer.error_event.wait()
584 | logging.debug("error received")
585 | pyminer.stop()
586 | time.sleep(5)
587 |
588 |
589 |
--------------------------------------------------------------------------------
/piaxe/miner.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | import serial
4 | import time
5 | import logging
6 | import random
7 | import copy
8 | import os
9 | import math
10 | import yaml
11 | import json
12 |
13 | import threading
14 | from shared import shared
15 |
16 | from . import ssd1306
17 | from . import bm1366
18 | from . import influx
19 | from . import discord
20 | from . import rest
21 | from . import smartplug
22 |
23 | from .boards import piaxe
24 | from .boards import qaxe
25 | from .boards import bitcrane
26 | from .boards import flex4axe
27 | from .boards import zeroxaxe
28 |
29 | try:
30 | from .ssd1306 import SSD1306
31 | except:
32 | pass
33 |
34 | class Job(shared.Job):
35 | def __init__(
36 | self,
37 | job_id,
38 | prevhash,
39 | coinb1,
40 | coinb2,
41 | merkle_branches,
42 | version,
43 | nbits,
44 | ntime,
45 | extranonce1,
46 | extranonce2_size,
47 | max_nonce=0x7fffffff,
48 | ):
49 | super().__init__(job_id, prevhash, coinb1, coinb2, merkle_branches, version, nbits, ntime, extranonce1, extranonce2_size, max_nonce)
50 |
51 |
52 | class BM1366Miner:
53 | def __init__(self, config, address, network):
54 | self.config = config
55 |
56 | self.current_job = None
57 | self.current_work = None
58 | self.serial_port = None
59 |
60 | self._read_index = 0
61 | self._write_index = 0
62 | self._buffer = bytearray([0] * 64)
63 |
64 | self._internal_id = 0
65 | self._latest_work_id = 0
66 | self._jobs = dict()
67 | self.last_response = time.time()
68 |
69 | self.tracker_send = list()
70 | self.tracker_received = list()
71 |
72 | self.job_thread = None
73 | self.receive_thread = None
74 | self.temp_thread = None
75 | self.display_thread = None
76 | self.job_lock = threading.Lock()
77 | self.serial_lock = threading.Lock()
78 | self.stop_event = threading.Event()
79 | self.new_job_event = threading.Event()
80 | self.led_thread = None
81 | self.led_event = threading.Event()
82 | self.network = network
83 | self.address = address
84 |
85 | self.last_job_time = time.time()
86 | self.last_response = time.time()
87 |
88 | self.found_hashes = dict()
89 | self.found_timestamps = list()
90 |
91 | self.shares = list()
92 | self.stats = influx.Stats()
93 |
94 | self.display = SSD1306(self.stats)
95 |
96 | self.miner = self.config['miner']
97 | self.verify_solo = self.config.get('verify_solo', False)
98 | self.debug_bm1366 = self.config.get("debug_bm1366", False)
99 |
100 | def shutdown(self):
101 | # signal the threads to end
102 | self.stop_event.set()
103 |
104 | # stop influx
105 | if self.influx:
106 | self.influx.shutdown()
107 |
108 | # stop smartplug
109 | if self.smartplug:
110 | self.smartplug.shutdown()
111 |
112 | # join all threads
113 | for t in [self.job_thread, self.receive_thread, self.temp_thread, self.display_thread, self.led_thread, self.uptime_counter_thread, self.alerter_thread]:
114 | if t is not None:
115 | t.join(5)
116 |
117 | self.hardware.shutdown()
118 |
119 | def get_name(self):
120 | return self.hardware.get_name()
121 |
122 | def get_user_agent(self):
123 | return f"{self.get_name()}/0.1"
124 |
125 | def init(self):
126 | if self.miner == 'bitcrane':
127 | self.hardware = bitcrane.BitcraneHardware(self.config[self.miner])
128 | self.asics = bm1366.BM1366()
129 | if self.miner == 'piaxe':
130 | self.hardware = piaxe.RPiHardware(self.config[self.miner])
131 | self.asics = bm1366.BM1366()
132 | elif self.miner == "qaxe":
133 | self.hardware = qaxe.QaxeHardware(self.config[self.miner])
134 | self.asics = bm1366.BM1366()
135 | elif self.miner == "qaxe+":
136 | self.hardware = qaxe.QaxeHardware(self.config[self.miner])
137 | self.asics = bm1366.BM1368()
138 | elif self.miner == "flex4axe":
139 | self.hardware = flex4axe.Flex4AxeHardware(self.config[self.miner])
140 | self.asics = bm1366.BM1366()
141 | elif self.miner == "0xaxe":
142 | self.hardware = zeroxaxe.ZeroxAxe(self.config[self.miner])
143 | self.asics = bm1366.BM1366()
144 | else:
145 | raise Exception('unknown miner: %s', self.miner)
146 |
147 | self.serial_port = self.hardware.serial_port()
148 |
149 | # set the hardware dependent functions for serial and reset
150 | self.asics.ll_init(self._serial_tx_func, self._serial_rx_func,
151 | self.hardware.reset_func)
152 |
153 |
154 | # default is: enable all chips
155 | chips_enabled = self.config[self.miner].get('chips_enabled', None)
156 |
157 |
158 | max_retries = 5 # Maximum number of attempts
159 |
160 | # currently the qaxe+ needs this loop :see-no-evil:
161 | for attempt in range(max_retries):
162 | try:
163 | chip_counter = self.asics.init(self.hardware.get_asic_frequency(), self.hardware.get_chip_count(), chips_enabled)
164 | print("Initialization successful.")
165 | break
166 | except Exception as e:
167 | logging.error("Attempt %d: Not enough chips found: %s", attempt + 1, e)
168 |
169 | # only retry on 1368s
170 | if not isinstance(self.asics, bm1366.BM1368):
171 | raise
172 |
173 | if attempt < max_retries - 1:
174 | time.sleep(1) # Wait before the next attempt
175 | else:
176 | logging.error("Max retries reached. Initialization failed.")
177 | raise
178 |
179 | logging.info(f"{chip_counter} chips were found!")
180 |
181 | self.set_difficulty(512)
182 | self.extranonce2_interval = self.config[self.miner]["extranonce2_interval"]
183 |
184 | self.temp_thread = threading.Thread(target=self._monitor_temperature)
185 | self.temp_thread.start()
186 |
187 | self.receive_thread = threading.Thread(target=self._receive_thread)
188 | self.receive_thread.start()
189 |
190 | self.job_thread = threading.Thread(target=self._job_thread)
191 | self.job_thread.start()
192 |
193 | self.uptime_counter_thread = threading.Thread(target=self._uptime_counter_thread)
194 | self.uptime_counter_thread.start()
195 |
196 | self.led_thread = threading.Thread(target=self._led_thread)
197 | self.led_thread.start()
198 |
199 | influx_config = self.config.get('influx', None)
200 | self.influx = None
201 | if influx_config is not None and influx_config.get('enabled', False):
202 | stats_name = "mainnet_stats"
203 | if self.network == shared.BitcoinNetwork.TESTNET:
204 | stats_name = "testnet_stats"
205 | elif self.network == shared.BitcoinNetwork.REGTEST:
206 | stats_name = "regtest_stats"
207 |
208 | self.influx = influx.Influx(influx_config, self.stats, stats_name)
209 | try:
210 | self.influx.load_last_values()
211 | except Exception as e:
212 | logging.error("we really don't want to start without previous influx values: %s", e)
213 | self.hardware.shutdown()
214 | os._exit(0)
215 |
216 | # start writing thread after values were loaded
217 | self.influx.start()
218 |
219 | smartplug_config = self.config.get('smartplug', None)
220 | self.smartplug = None
221 | if smartplug_config is not None and smartplug_config.get('enabled', False):
222 | if not self.influx:
223 | logging.error("influx not enabled, skipping smartplug module")
224 |
225 | self.smartplug = smartplug.Tasmota(smartplug_config)
226 | self.influx.add_stats_callback(self.smartplug.add_smart_plug_energy_data)
227 | self.smartplug.start()
228 |
229 | alerter_config = self.config.get("alerter", None)
230 | self.alerter_thread = None
231 | if alerter_config is not None and alerter_config.get("enabled", False):
232 | if alerter_config["type"] == "discord-webhook":
233 | self.alerter = discord.DiscordWebhookAlerter(alerter_config)
234 | self.alerter_thread = threading.Thread(target=self._alerter_thread)
235 | self.alerter_thread.start()
236 | else:
237 | raise Exception(f"unknown alerter: {alerter_config['type']}")
238 |
239 | i2c_config = self.config.get("i2c_display", None)
240 | if i2c_config is not None and i2c_config.get("enabled", False):
241 | self.display_thread = threading.Thread(target=self._display_update)
242 | self.display_thread.start()
243 |
244 | rest_config = self.config.get("rest_api", None)
245 | if rest_config is not None and rest_config.get("enabled", False):
246 | self.rest_api = rest.RestAPI(rest_config, self, self.stats)
247 | self.rest_api.run()
248 |
249 |
250 | def _uptime_counter_thread(self):
251 | logging.info("uptime counter thread started ...")
252 | while not self.stop_event.is_set():
253 | with self.stats.lock:
254 | self.stats.total_uptime += 1
255 | self.stats.uptime += 1
256 | time.sleep(1)
257 |
258 | logging.info("uptime counter thread ended ...")
259 |
260 | def _alerter_thread(self):
261 | logging.info("Alerter thread started ...")
262 | self.alerter.alert("MINER", "started")
263 | while not self.stop_event.is_set():
264 | self.alerter.alert_if("NO_JOB", "no new job for more than 5 minutes!", (time.time() - self.last_job_time) > 5*60)
265 | self.alerter.alert_if("NO_RESPONSE", "no ASIC response for more than 5 minutes!", (time.time() - self.last_response) > 5*60)
266 | time.sleep(1)
267 |
268 | self.alerter.alert("MINER", "shutdown")
269 | logging.info("Alerter thread ended ...")
270 |
271 | def _display_update(self):
272 | logging.info("display update ...")
273 | self.display.init()
274 | while not self.stop_event.is_set():
275 | self.display.update()
276 | time.sleep(2)
277 | logging.info("display update ended ...")
278 |
279 | def _led_thread(self):
280 | logging.info("LED thread started ...")
281 | led_state = True
282 | while not self.stop_event.is_set():
283 | # if for more than 5 minutes no new job is received
284 | # we flash the light faster
285 | if time.time() - self.last_job_time > 5*60 or \
286 | time.time() - self.last_response > 5*60:
287 | led_state = not led_state
288 | self.hardware.set_led(led_state)
289 | time.sleep(0.25)
290 | continue
291 |
292 | # this gets triggered in 2s intervals
293 | # .wait() doesn't work reliably because it happens
294 | # that the submit method hangs forever and the
295 | # event wouldn't be fired then
296 | if self.led_event.is_set():
297 | self.led_event.clear()
298 | led_state = not led_state
299 | self.hardware.set_led(led_state)
300 | continue
301 |
302 | time.sleep(0.25)
303 |
304 | logging.info("LED thread ended ...")
305 |
306 | def _monitor_temperature(self):
307 | while not self.stop_event.is_set():
308 |
309 | temp = self.hardware.read_temperature_and_voltage()
310 |
311 | # trigger measurement of metrics
312 | if isinstance(self.asics, bm1366.BM1368):
313 | self.asics.request_temps()
314 |
315 | with self.stats.lock:
316 | self.stats.temp = temp["temp"][0]
317 | self.stats.temp2 = temp["temp"][1]
318 | self.stats.temp3 = temp["temp"][2]
319 | self.stats.temp4 = temp["temp"][3]
320 | self.stats.vdomain1 = temp["voltage"][0]
321 | self.stats.vdomain2 = temp["voltage"][1]
322 | self.stats.vdomain3 = temp["voltage"][2]
323 | self.stats.vdomain4 = temp["voltage"][3]
324 |
325 | # inject asic temps into the temp dict for display
326 | temp['asic_temp'] = [
327 | self.stats.asic_temp1,
328 | self.stats.asic_temp2,
329 | self.stats.asic_temp3,
330 | self.stats.asic_temp4
331 | ]
332 |
333 | logging.info("temperature and voltage: %s", str(temp))
334 |
335 |
336 |
337 | for i in range(0, 4):
338 | if temp["temp"][i] is not None and temp["temp"][i] > 70.0:
339 | logging.error("too hot, shutting down ...")
340 | self.hardware.shutdown()
341 | os._exit(1)
342 |
343 | time.sleep(1.5)
344 |
345 | def _serial_tx_func(self, data):
346 | with self.serial_lock:
347 | total_sent = 0
348 | while total_sent < len(data):
349 | sent = self.serial_port.write(data[total_sent:])
350 | if sent == 0:
351 | raise RuntimeError("Serial connection broken")
352 | total_sent += sent
353 | if self.debug_bm1366:
354 | logging.debug("-> %s", bytearray(data).hex())
355 |
356 | def _serial_rx_func(self, size, timeout_ms):
357 | self.serial_port.timeout = timeout_ms / 1000.0
358 |
359 | data = self.serial_port.read(size)
360 | bytes_read = len(data)
361 |
362 | if self.debug_bm1366 and bytes_read > 0:
363 | logging.debug("serial_rx: %d", bytes_read)
364 | logging.debug("<- %s", data.hex())
365 |
366 | return data if bytes_read > 0 else None
367 |
368 | def cleanup_duplicate_finds(self):
369 | current_time = time.time()
370 |
371 | # clean up dict, delete old hashes, counts elements to pop from the list
372 | remove_first_n=0
373 | for timestamp, hash_key in self.found_timestamps:
374 | if current_time - timestamp > 600:
375 | #logging.debug(f"removing {hash_key} from found_hashes dict")
376 | if hash_key in self.found_hashes:
377 | del self.found_hashes[hash_key]
378 | else:
379 | pass
380 | #logging.debug(f"{hash_key} not in dict")
381 | remove_first_n += 1
382 | else:
383 | break
384 |
385 | # pop elements
386 | #logging.debug(f"removing first {remove_first_n} element(s) of found_timestamps list")
387 | for i in range(0, remove_first_n):
388 | self.found_timestamps.pop(0)
389 |
390 |
391 | def hash_rate(self, time_period=600):
392 | current_time = time.time()
393 | total_work = 0
394 |
395 | #min_timestamp = current_time
396 | #max_timestamp = 0
397 | for shares, difficulty, timestamp in self.shares:
398 | # Consider shares only in the last 10 minutes
399 | if current_time - timestamp <= time_period:
400 | total_work += shares * (difficulty << 32)
401 | #min_timestamp = min(min_timestamp, timestamp)
402 | #max_timestamp = max(max_timestamp, timestamp)
403 |
404 | #if min_timestamp > max_timestamp:
405 | # raise Exception("timestamp range calculation failed")
406 |
407 | #if min_timestamp == max_timestamp:
408 | # return 0.0
409 |
410 | # Hash rate in H/s (Hashes per second)
411 | #hash_rate_hps = total_work / (max_timestamp - min_timestamp)
412 | hash_rate_hps = total_work / time_period
413 |
414 | # Convert hash rate to GH/s
415 | hash_rate_ghps = hash_rate_hps / 1e9
416 | logging.debug("\033[32mhash rate: %f GH/s\033[0m", hash_rate_ghps)
417 | return hash_rate_ghps
418 |
419 | def _set_target(self, target):
420 | self._target = '%064x' % target
421 |
422 | def set_difficulty(self, difficulty):
423 | # restrict to min 256
424 | difficulty = max(difficulty, 256)
425 |
426 | self._difficulty = difficulty
427 | self._set_target(shared.calculate_target(difficulty))
428 | self.asics.set_job_difficulty_mask(difficulty)
429 |
430 | with self.stats.lock:
431 | self.stats.difficulty = difficulty
432 |
433 | def set_submit_callback(self, cb):
434 | self.submit_cb = cb
435 |
436 | def accepted_callback(self):
437 | with self.stats.lock:
438 | self.stats.accepted += 1
439 |
440 | def not_accepted_callback(self):
441 | with self.stats.lock:
442 | self.stats.not_accepted += 1
443 |
444 | def _receive_thread(self):
445 | logging.info('receiving thread started ...')
446 | mask_nonce = 0x00000000
447 | mask_version = 0x00000000
448 |
449 | while not self.stop_event.is_set():
450 | byte = self._serial_rx_func(11, 100)
451 |
452 | if not byte:
453 | continue
454 |
455 | for i in range(0, len(byte)):
456 | self._buffer[self._write_index % 64] = byte[i]
457 | self._write_index += 1
458 |
459 | if self._write_index - self._read_index >= 11 and self._buffer[self._read_index % 64] == 0xaa and self._buffer[(self._read_index + 1) % 64] == 0x55:
460 | data = bytearray([0] * 11)
461 | for i in range(0, 11):
462 | data[i] = self._buffer[self._read_index % 64]
463 | self._read_index += 1
464 |
465 | #if self.debug_bm1366:
466 | # logging.debug("<- %s", bytes(data).hex())
467 |
468 | asic_result = bm1366.AsicResult().from_bytes(bytes(data))
469 | if not asic_result or not asic_result.nonce:
470 | continue
471 |
472 | # temperature response
473 | (temp_value, temp_id) = self.asics.try_get_temp_from_response(asic_result)
474 | if temp_value:
475 | logging.debug(f"temp for chip {temp_id}: {temp_value}")
476 |
477 | attribute_name = f"asic_temp{temp_id+1}"
478 | with self.stats.lock:
479 | setattr(self.stats, attribute_name, temp_value * 0.171342 - 299.5144)
480 |
481 | continue
482 |
483 | with self.job_lock:
484 | self.last_response = time.time()
485 | result_job_id = self.asics.get_job_id_from_result(asic_result.job_id)
486 | logging.debug("work received %02x", result_job_id)
487 |
488 | if result_job_id not in self._jobs:
489 | logging.debug("internal jobid %d not found", result_job_id)
490 | continue
491 |
492 | saved_job = self._jobs[result_job_id]
493 | job = saved_job['job']
494 | work = saved_job['work']
495 | difficulty = saved_job['difficulty']
496 |
497 | if result_job_id != work.id:
498 | logging.error("mismatch ids")
499 | continue
500 |
501 | result = dict(
502 | job_id = job._job_id,
503 | extranonce2 = job._extranonce2, #shared.int_to_hex32(job._extranonce2),
504 | ntime = job._ntime,
505 | nonce = shared.int_to_hex32(asic_result.nonce),
506 | version = shared.int_to_hex32(shared.reverse_uint16(asic_result.version) << 13),
507 | )
508 |
509 |
510 | is_valid, hash, zeros = shared.verify_work(difficulty, job, result)
511 | network_target, network_zeros = shared.nbits_to_target(job._nbits)
512 | pool_target, pool_zeros = shared.get_network_target(difficulty)
513 |
514 | logging.debug("network-target: %s (%d)", network_target, network_zeros)
515 | logging.debug("pool-target: %s (%d)", pool_target, pool_zeros)
516 | logging.debug("found hash: %s (%d)", hash, zeros)
517 |
518 | # detect duplicates
519 | duplicate = hash in self.found_hashes
520 |
521 | self.cleanup_duplicate_finds()
522 |
523 | # save hash in dict
524 | self.found_hashes[hash] = True
525 | self.found_timestamps.append((time.time(), hash))
526 |
527 | # some debug info
528 | #logging.debug(f"{len(self.found_hashes)} in found_hashes dict, {len(self.found_timestamps)} in found_timestamps list")
529 |
530 | if duplicate:
531 | logging.warn("found duplicate hash!")
532 |
533 | if hash < network_target:
534 | logging.info("!!! it seems we found a block !!!")
535 |
536 | # the hash isn't completly wrong but isn't lower than the target
537 | # the asic uses power-of-two targets but the pool might not (eg ckpool)
538 | # we should just pretend it didn't happen and not count it^^
539 | if not is_valid and zeros >= pool_zeros:
540 | logging.info("ignoring hash because higher than pool target")
541 | continue
542 |
543 |
544 | if is_valid:
545 | mask_nonce |= asic_result.nonce
546 | mask_version |= asic_result.version << 13
547 |
548 | logging.debug(f"mask_nonce: %s (%08x)", shared.int_to_bin32(mask_nonce, 4), mask_nonce)
549 | logging.debug(f"mask_version: %s (%08x)", shared.int_to_bin32(mask_version, 4), mask_version)
550 | x_nonce = (asic_result.nonce & 0x0000fc00) >> 10
551 | logging.debug(f"result from asic {x_nonce}")
552 |
553 | with self.stats.lock:
554 | if hash < network_target:
555 | self.stats.blocks_found += 1
556 | self.stats.total_blocks_found += 1
557 |
558 | if duplicate:
559 | self.stats.duplicate_hashes += 1
560 |
561 | self.stats.invalid_shares += 1 if not is_valid else 0
562 | self.stats.valid_shares += 1 if is_valid else 0
563 |
564 | # don't add to shares if it's invalid or it's a duplicate
565 | if is_valid and not duplicate:
566 | self.shares.append((1, difficulty, time.time()))
567 |
568 | self.stats.hashing_speed = self.hash_rate()
569 | hash_difficulty = shared.calculate_difficulty_from_hash(hash)
570 | self.stats.best_difficulty = max(self.stats.best_difficulty, hash_difficulty)
571 | self.stats.total_best_difficulty = max(self.stats.total_best_difficulty, hash_difficulty)
572 |
573 | # restart miner with new extranonce2
574 | #self.new_job_event.set() TODO
575 |
576 | # submit result without lock on the job!
577 | # we don't submit invalid hashes or duplicates
578 | if not is_valid or duplicate:
579 | # if its invalid it would be rejected
580 | # we don't try it but we can count it to not_accepted
581 | self.not_accepted_callback()
582 | logging.error("invalid result!")
583 | continue
584 |
585 |
586 | logging.info("valid result")
587 | if not self.submit_cb:
588 | logging.error("no submit callback set")
589 | elif not self.submit_cb(result):
590 | self.stats.pool_errors += 1
591 |
592 | logging.info('receiving thread ended ...')
593 |
594 |
595 |
596 | def _job_thread(self):
597 | logging.info("job thread started ...")
598 | current_time = time.time()
599 | while not self.stop_event.is_set():
600 | self.new_job_event.wait(self.extranonce2_interval)
601 | self.new_job_event.clear()
602 |
603 | with self.job_lock:
604 | if not self.current_job:
605 | logging.info("no job ...")
606 | time.sleep(1)
607 | continue
608 |
609 | extranonce2 = random.randint(0, 2**31-1)
610 | logging.debug("new extranonce2 %08x", extranonce2)
611 | self.current_job.set_extranonce2(extranonce2)
612 |
613 | self._internal_id += 1
614 | self._latest_work_id = self.asics.get_job_id(self._internal_id)
615 |
616 | work = bm1366.WorkRequest()
617 | logging.debug("new work %02x", self._latest_work_id)
618 | work.create_work(
619 | self._latest_work_id,
620 | 0x00000000,
621 | shared.hex_to_int(self.current_job._nbits),
622 | shared.hex_to_int(self.current_job._ntime),
623 | shared.reverse_bytes(shared.hex_to_bytes(self.current_job._merkle_root)),
624 | shared.reverse_bytes(shared.hex_to_bytes(self.current_job._prevhash)),
625 | shared.hex_to_int(self.current_job._version)
626 | )
627 | self.current_work = work
628 |
629 | # make deepcopies
630 | self._jobs[self._latest_work_id] = {
631 | 'job': copy.deepcopy(self.current_job),
632 | 'work': copy.deepcopy(self.current_work),
633 | 'difficulty': self._difficulty
634 | }
635 |
636 | self.led_event.set()
637 |
638 | self.asics.send_work(work)
639 |
640 | logging.info("job thread ended ...")
641 |
642 | def clean_jobs(self):
643 | with self.job_lock:
644 | logging.info("cleaning jobs ...")
645 | self._jobs = dict()
646 | self.current_job = None
647 |
648 | def start_job(self, job):
649 | logging.info("starting new job %s", job._job_id)
650 |
651 | self.last_job_time = time.time()
652 | with self.job_lock:
653 | self.current_job = job
654 |
655 | if self.verify_solo:
656 | try:
657 | # only decode when verify_solo is enabled
658 | coinb = job.deserialize_coinbase()
659 | if coinb['height'] is not None:
660 | logging.debug("mining for block %d", coinb['height'])
661 |
662 | is_solo, value_our, value_total = shared.verify_solo(self.address, coinb)
663 | logging.debug("solo mining verification passed! reward: %d", value_our)
664 | except Exception as e:
665 | logging.error("verify_solo error: %s", e)
666 | else:
667 | logging.debug("solo mining not verified!")
668 |
669 | #logging.debug(json.dumps(job.deserialize_coinbase(), indent=4))
670 |
671 |
672 | self.new_job_event.set()
673 |
--------------------------------------------------------------------------------
/docker/monitoring/assets/dashboards/PiAxe Miner.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "grafana",
8 | "uid": "-- Grafana --"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "type": "dashboard"
15 | }
16 | ]
17 | },
18 | "editable": true,
19 | "fiscalYearStartMonth": 0,
20 | "graphTooltip": 0,
21 | "id": 1,
22 | "links": [],
23 | "liveNow": false,
24 | "panels": [
25 | {
26 | "datasource": {
27 | "type": "influxdb",
28 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
29 | },
30 | "fieldConfig": {
31 | "defaults": {
32 | "color": {
33 | "mode": "continuous-RdYlGr"
34 | },
35 | "mappings": [],
36 | "max": 600,
37 | "min": 0,
38 | "noValue": "-",
39 | "thresholds": {
40 | "mode": "absolute",
41 | "steps": [
42 | {
43 | "color": "red",
44 | "value": null
45 | },
46 | {
47 | "color": "green",
48 | "value": 15
49 | }
50 | ]
51 | },
52 | "unit": "GHs"
53 | },
54 | "overrides": []
55 | },
56 | "gridPos": {
57 | "h": 8,
58 | "w": 5,
59 | "x": 0,
60 | "y": 0
61 | },
62 | "id": 6,
63 | "options": {
64 | "minVizHeight": 75,
65 | "minVizWidth": 75,
66 | "orientation": "auto",
67 | "reduceOptions": {
68 | "calcs": [
69 | "lastNotNull"
70 | ],
71 | "fields": "",
72 | "values": false
73 | },
74 | "showThresholdLabels": false,
75 | "showThresholdMarkers": true
76 | },
77 | "pluginVersion": "10.2.2",
78 | "targets": [
79 | {
80 | "datasource": {
81 | "type": "influxdb",
82 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
83 | },
84 | "query": "from(bucket: \"piaxe\")\n |> range(start: -5m)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"hashing_speed\")\n |> last() \n",
85 | "refId": "A"
86 | }
87 | ],
88 | "title": "Hash Rate GH/s",
89 | "type": "gauge"
90 | },
91 | {
92 | "datasource": {
93 | "type": "influxdb",
94 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
95 | },
96 | "fieldConfig": {
97 | "defaults": {
98 | "color": {
99 | "mode": "thresholds"
100 | },
101 | "mappings": [],
102 | "noValue": "-",
103 | "thresholds": {
104 | "mode": "absolute",
105 | "steps": [
106 | {
107 | "color": "green",
108 | "value": null
109 | },
110 | {
111 | "color": "red",
112 | "value": 80
113 | }
114 | ]
115 | }
116 | },
117 | "overrides": []
118 | },
119 | "gridPos": {
120 | "h": 2,
121 | "w": 3,
122 | "x": 5,
123 | "y": 0
124 | },
125 | "id": 15,
126 | "options": {
127 | "colorMode": "none",
128 | "graphMode": "none",
129 | "justifyMode": "auto",
130 | "orientation": "horizontal",
131 | "reduceOptions": {
132 | "calcs": [
133 | "lastNotNull"
134 | ],
135 | "fields": "/^Value$/",
136 | "values": false
137 | },
138 | "text": {
139 | "titleSize": 30,
140 | "valueSize": 30
141 | },
142 | "textMode": "value",
143 | "wideLayout": true
144 | },
145 | "pluginVersion": "10.2.2",
146 | "targets": [
147 | {
148 | "datasource": {
149 | "type": "influxdb",
150 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
151 | },
152 | "hide": false,
153 | "query": "from(bucket: \"piaxe\")\n |> range(start: -1y)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"total_uptime\")\n |> last()\n |> map(fn: (r) => {\n uptime_seconds = r._value\n days = uptime_seconds / 86400\n hours = (uptime_seconds % 86400) / 3600\n minutes = (uptime_seconds % 3600) / 60\n uptime_formatted = \"${days}d ${hours}h ${minutes}m\"\n return ({_time: r._time, _value: uptime_formatted})\n })",
154 | "refId": "A"
155 | }
156 | ],
157 | "title": "Total Uptime",
158 | "type": "stat"
159 | },
160 | {
161 | "datasource": {
162 | "type": "influxdb",
163 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
164 | },
165 | "fieldConfig": {
166 | "defaults": {
167 | "color": {
168 | "mode": "thresholds"
169 | },
170 | "mappings": [],
171 | "noValue": "No active session",
172 | "thresholds": {
173 | "mode": "absolute",
174 | "steps": [
175 | {
176 | "color": "green",
177 | "value": null
178 | },
179 | {
180 | "color": "red",
181 | "value": 80
182 | }
183 | ]
184 | }
185 | },
186 | "overrides": [
187 | {
188 | "matcher": {
189 | "id": "byName",
190 | "options": "accepted"
191 | },
192 | "properties": [
193 | {
194 | "id": "displayName",
195 | "value": "Accepted"
196 | }
197 | ]
198 | },
199 | {
200 | "matcher": {
201 | "id": "byName",
202 | "options": "not_accepted"
203 | },
204 | "properties": [
205 | {
206 | "id": "displayName",
207 | "value": "Not Accepted"
208 | }
209 | ]
210 | },
211 | {
212 | "matcher": {
213 | "id": "byName",
214 | "options": "pool_errors ${network}_stats"
215 | },
216 | "properties": [
217 | {
218 | "id": "displayName",
219 | "value": "Pool Errors"
220 | }
221 | ]
222 | },
223 | {
224 | "matcher": {
225 | "id": "byName",
226 | "options": "pool_errors"
227 | },
228 | "properties": [
229 | {
230 | "id": "displayName",
231 | "value": "Pool Errors"
232 | }
233 | ]
234 | }
235 | ]
236 | },
237 | "gridPos": {
238 | "h": 8,
239 | "w": 3,
240 | "x": 8,
241 | "y": 0
242 | },
243 | "id": 13,
244 | "options": {
245 | "colorMode": "none",
246 | "graphMode": "none",
247 | "justifyMode": "auto",
248 | "orientation": "horizontal",
249 | "reduceOptions": {
250 | "calcs": [
251 | "lastNotNull"
252 | ],
253 | "fields": "/.*/",
254 | "values": false
255 | },
256 | "text": {
257 | "titleSize": 15,
258 | "valueSize": 15
259 | },
260 | "textMode": "auto",
261 | "wideLayout": true
262 | },
263 | "pluginVersion": "10.2.2",
264 | "targets": [
265 | {
266 | "datasource": {
267 | "type": "influxdb",
268 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
269 | },
270 | "hide": false,
271 | "query": "from(bucket: \"piaxe\")\n |> range(start: -5m)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> last() \n |> map(fn: (r) => {\n uptime_seconds = r._value\n days = uptime_seconds / 86400\n hours = (uptime_seconds % 86400) / 3600\n minutes = (uptime_seconds % 3600) / 60\n uptime_formatted = \"${days}d ${hours}h ${minutes}m\"\n return ({_field: \"Uptime\", _time: r._time, _value: uptime_formatted})\n })\n",
272 | "refId": "D"
273 | },
274 | {
275 | "datasource": {
276 | "type": "influxdb",
277 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
278 | },
279 | "hide": false,
280 | "query": "import \"math\"\nfrom(bucket: \"piaxe\")\n |> range(start: -5m)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"best_difficulty\")\n |> last() \n |> map(fn: (r) => {\n suffix = if r._value >= 1000000000000.0 then \"T\"\n else if r._value >= 1000000000.0 then \"G\"\n else if r._value >= 1000000.0 then \"M\"\n else if r._value >= 1000.0 then \"k\"\n else \"\" \n scaled = if r._value >= 1000000000000.0 then r._value / 1000000000000.0\n else if r._value >= 1000000000.0 then r._value / 1000000000.0\n else if r._value >= 1000000.0 then r._value / 1000000.0\n else if r._value >= 1000.0 then r._value / 1000.0\n else r._value\n \n number = math.trunc(x: scaled * 100.0) / 100.0\n return ({\n _time: r._time, \n _field: \"Best Difficulty\",\n _value: string(v: number) + suffix\n })\n })\n",
281 | "refId": "E"
282 | },
283 | {
284 | "datasource": {
285 | "type": "influxdb",
286 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
287 | },
288 | "query": "from(bucket: \"piaxe\")\n |> range(start: -5m)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"accepted\")\n |> last() \n ",
289 | "refId": "A"
290 | },
291 | {
292 | "datasource": {
293 | "type": "influxdb",
294 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
295 | },
296 | "hide": false,
297 | "query": "from(bucket: \"piaxe\")\n |> range(start: -5m)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"not_accepted\")\n |> last() \n ",
298 | "refId": "B"
299 | },
300 | {
301 | "datasource": {
302 | "type": "influxdb",
303 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
304 | },
305 | "hide": false,
306 | "query": "from(bucket: \"piaxe\")\n |> range(start: -5m)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"pool_errors\")\n |> last() \n \n\n\n ",
307 | "refId": "C"
308 | }
309 | ],
310 | "title": "Session ",
311 | "transformations": [
312 | {
313 | "id": "joinByField",
314 | "options": {}
315 | },
316 | {
317 | "id": "organize",
318 | "options": {
319 | "excludeByName": {
320 | "Time": true
321 | },
322 | "indexByName": {},
323 | "renameByName": {}
324 | }
325 | }
326 | ],
327 | "type": "stat"
328 | },
329 | {
330 | "datasource": {
331 | "type": "influxdb",
332 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
333 | },
334 | "description": "",
335 | "fieldConfig": {
336 | "defaults": {
337 | "color": {
338 | "mode": "continuous-GrYlRd"
339 | },
340 | "mappings": [],
341 | "max": 75,
342 | "min": 0,
343 | "noValue": "-",
344 | "thresholds": {
345 | "mode": "absolute",
346 | "steps": [
347 | {
348 | "color": "green",
349 | "value": null
350 | },
351 | {
352 | "color": "orange",
353 | "value": 55
354 | },
355 | {
356 | "color": "red",
357 | "value": 65
358 | }
359 | ]
360 | },
361 | "unit": "celsius"
362 | },
363 | "overrides": []
364 | },
365 | "gridPos": {
366 | "h": 8,
367 | "w": 5,
368 | "x": 11,
369 | "y": 0
370 | },
371 | "id": 9,
372 | "options": {
373 | "minVizHeight": 75,
374 | "minVizWidth": 75,
375 | "orientation": "auto",
376 | "reduceOptions": {
377 | "calcs": [
378 | "lastNotNull"
379 | ],
380 | "fields": "",
381 | "values": false
382 | },
383 | "showThresholdLabels": false,
384 | "showThresholdMarkers": true
385 | },
386 | "pluginVersion": "10.2.2",
387 | "targets": [
388 | {
389 | "datasource": {
390 | "type": "influxdb",
391 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
392 | },
393 | "query": "from(bucket: \"piaxe\")\n |> range(start: -5m) // Adjust the time range as needed\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"temperature\")\n |> last() \n ",
394 | "refId": "A"
395 | }
396 | ],
397 | "title": "Temperature",
398 | "type": "gauge"
399 | },
400 | {
401 | "datasource": {
402 | "type": "influxdb",
403 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
404 | },
405 | "fieldConfig": {
406 | "defaults": {
407 | "color": {
408 | "fixedColor": "green",
409 | "mode": "fixed"
410 | },
411 | "decimals": 0,
412 | "mappings": [],
413 | "noValue": "-",
414 | "thresholds": {
415 | "mode": "absolute",
416 | "steps": [
417 | {
418 | "color": "green",
419 | "value": null
420 | },
421 | {
422 | "color": "red",
423 | "value": 80
424 | }
425 | ]
426 | }
427 | },
428 | "overrides": []
429 | },
430 | "gridPos": {
431 | "h": 2,
432 | "w": 3,
433 | "x": 5,
434 | "y": 2
435 | },
436 | "id": 12,
437 | "options": {
438 | "colorMode": "none",
439 | "graphMode": "none",
440 | "justifyMode": "auto",
441 | "orientation": "auto",
442 | "reduceOptions": {
443 | "calcs": [
444 | "lastNotNull"
445 | ],
446 | "fields": "/^Value$/",
447 | "values": false
448 | },
449 | "text": {
450 | "titleSize": 30,
451 | "valueSize": 30
452 | },
453 | "textMode": "auto",
454 | "wideLayout": true
455 | },
456 | "pluginVersion": "10.2.2",
457 | "targets": [
458 | {
459 | "datasource": {
460 | "type": "influxdb",
461 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
462 | },
463 | "query": "import \"math\"\nfrom(bucket: \"piaxe\")\n |> range(start: -1y)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"total_best_difficulty\")\n |> last() \n |> map(fn: (r) => {\n suffix = if r._value >= 1000000000000.0 then \"T\"\n else if r._value >= 1000000000.0 then \"G\"\n else if r._value >= 1000000.0 then \"M\"\n else if r._value >= 1000.0 then \"k\"\n else \"\" \n scaled = if r._value >= 1000000000000.0 then r._value / 1000000000000.0\n else if r._value >= 1000000000.0 then r._value / 1000000000.0\n else if r._value >= 1000000.0 then r._value / 1000000.0\n else if r._value >= 1000.0 then r._value / 1000.0\n else r._value\n \n number = math.trunc(x: scaled * 100.0) / 100.0\n return ({\n _time: r._time, \n _value: string(v: number) + suffix\n })\n })\n",
464 | "refId": "A"
465 | }
466 | ],
467 | "title": "Total Best Difficulty",
468 | "type": "stat"
469 | },
470 | {
471 | "datasource": {
472 | "type": "influxdb",
473 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
474 | },
475 | "description": "",
476 | "fieldConfig": {
477 | "defaults": {
478 | "color": {
479 | "mode": "thresholds"
480 | },
481 | "mappings": [],
482 | "noValue": "-",
483 | "thresholds": {
484 | "mode": "absolute",
485 | "steps": [
486 | {
487 | "color": "green",
488 | "value": null
489 | },
490 | {
491 | "color": "red",
492 | "value": 80
493 | }
494 | ]
495 | }
496 | },
497 | "overrides": []
498 | },
499 | "gridPos": {
500 | "h": 4,
501 | "w": 3,
502 | "x": 5,
503 | "y": 4
504 | },
505 | "id": 16,
506 | "options": {
507 | "colorMode": "value",
508 | "graphMode": "none",
509 | "justifyMode": "auto",
510 | "orientation": "auto",
511 | "reduceOptions": {
512 | "calcs": [
513 | "lastNotNull"
514 | ],
515 | "fields": "/^total_blocks_found$/",
516 | "values": false
517 | },
518 | "textMode": "auto",
519 | "wideLayout": true
520 | },
521 | "pluginVersion": "10.2.2",
522 | "targets": [
523 | {
524 | "datasource": {
525 | "type": "influxdb",
526 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
527 | },
528 | "query": "from(bucket: \"piaxe\")\n |> range(start: -1y)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"total_blocks_found\")\n |> last() \n |> map(fn: (r) => {\n emoji = if r._value == 0 then \"🥹\" else \"🥳\"\n return ({_field: r._field, _time: r._time, _value: string(v: r._value) + emoji})\n })\n",
529 | "refId": "A"
530 | }
531 | ],
532 | "title": "Total Blocks Found",
533 | "type": "stat"
534 | },
535 | {
536 | "datasource": {
537 | "type": "influxdb",
538 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
539 | },
540 | "fieldConfig": {
541 | "defaults": {
542 | "color": {
543 | "mode": "palette-classic"
544 | },
545 | "custom": {
546 | "axisBorderShow": false,
547 | "axisCenteredZero": false,
548 | "axisColorMode": "text",
549 | "axisLabel": "",
550 | "axisPlacement": "auto",
551 | "barAlignment": 0,
552 | "drawStyle": "line",
553 | "fillOpacity": 0,
554 | "gradientMode": "none",
555 | "hideFrom": {
556 | "legend": false,
557 | "tooltip": false,
558 | "viz": false
559 | },
560 | "insertNulls": false,
561 | "lineInterpolation": "smooth",
562 | "lineStyle": {
563 | "fill": "solid"
564 | },
565 | "lineWidth": 1,
566 | "pointSize": 5,
567 | "scaleDistribution": {
568 | "type": "linear"
569 | },
570 | "showPoints": "never",
571 | "spanNulls": true,
572 | "stacking": {
573 | "group": "A",
574 | "mode": "none"
575 | },
576 | "thresholdsStyle": {
577 | "mode": "off"
578 | }
579 | },
580 | "mappings": [],
581 | "min": 0,
582 | "thresholds": {
583 | "mode": "absolute",
584 | "steps": [
585 | {
586 | "color": "green",
587 | "value": null
588 | },
589 | {
590 | "color": "red",
591 | "value": 80
592 | }
593 | ]
594 | }
595 | },
596 | "overrides": []
597 | },
598 | "gridPos": {
599 | "h": 7,
600 | "w": 16,
601 | "x": 0,
602 | "y": 8
603 | },
604 | "id": 2,
605 | "options": {
606 | "legend": {
607 | "calcs": [],
608 | "displayMode": "list",
609 | "placement": "bottom",
610 | "showLegend": true
611 | },
612 | "tooltip": {
613 | "mode": "single",
614 | "sort": "none"
615 | }
616 | },
617 | "targets": [
618 | {
619 | "datasource": {
620 | "type": "influxdb",
621 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
622 | },
623 | "query": "from(bucket: \"piaxe\")\n |> range(start: v.timeRangeStart)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"hashing_speed\")\n |> aggregateWindow(every: 5s, fn: mean) \n",
624 | "refId": "A"
625 | },
626 | {
627 | "datasource": {
628 | "type": "influxdb",
629 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
630 | },
631 | "hide": false,
632 | "query": "from(bucket: \"piaxe\")\n |> range(start: v.timeRangeStart)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"hashing_speed\")\n |> aggregateWindow(every: 25m, fn: mean) \n",
633 | "refId": "B"
634 | }
635 | ],
636 | "title": "Hash Rate GH/s",
637 | "type": "timeseries"
638 | },
639 | {
640 | "datasource": {
641 | "type": "influxdb",
642 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
643 | },
644 | "fieldConfig": {
645 | "defaults": {
646 | "color": {
647 | "mode": "palette-classic"
648 | },
649 | "custom": {
650 | "axisBorderShow": false,
651 | "axisCenteredZero": false,
652 | "axisColorMode": "text",
653 | "axisLabel": "",
654 | "axisPlacement": "auto",
655 | "barAlignment": 0,
656 | "drawStyle": "line",
657 | "fillOpacity": 0,
658 | "gradientMode": "none",
659 | "hideFrom": {
660 | "legend": false,
661 | "tooltip": false,
662 | "viz": false
663 | },
664 | "insertNulls": false,
665 | "lineInterpolation": "linear",
666 | "lineStyle": {
667 | "fill": "solid"
668 | },
669 | "lineWidth": 1,
670 | "pointSize": 5,
671 | "scaleDistribution": {
672 | "type": "linear"
673 | },
674 | "showPoints": "never",
675 | "spanNulls": false,
676 | "stacking": {
677 | "group": "A",
678 | "mode": "none"
679 | },
680 | "thresholdsStyle": {
681 | "mode": "line"
682 | }
683 | },
684 | "mappings": [],
685 | "max": 75,
686 | "min": 25,
687 | "thresholds": {
688 | "mode": "absolute",
689 | "steps": [
690 | {
691 | "color": "green",
692 | "value": null
693 | },
694 | {
695 | "color": "#ffee0033",
696 | "value": 55
697 | },
698 | {
699 | "color": "#ff000033",
700 | "value": 65
701 | }
702 | ]
703 | }
704 | },
705 | "overrides": []
706 | },
707 | "gridPos": {
708 | "h": 6,
709 | "w": 8,
710 | "x": 0,
711 | "y": 15
712 | },
713 | "id": 1,
714 | "options": {
715 | "legend": {
716 | "calcs": [],
717 | "displayMode": "list",
718 | "placement": "bottom",
719 | "showLegend": true
720 | },
721 | "tooltip": {
722 | "mode": "single",
723 | "sort": "none"
724 | }
725 | },
726 | "targets": [
727 | {
728 | "datasource": {
729 | "type": "influxdb",
730 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
731 | },
732 | "query": "from(bucket: \"piaxe\")\n |> range(start: v.timeRangeStart)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"temperature\")\n|> aggregateWindow(every: 1m, fn: mean) ",
733 | "refId": "A"
734 | }
735 | ],
736 | "title": "Temperature",
737 | "type": "timeseries"
738 | },
739 | {
740 | "datasource": {
741 | "type": "influxdb",
742 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
743 | },
744 | "fieldConfig": {
745 | "defaults": {
746 | "color": {
747 | "mode": "palette-classic"
748 | },
749 | "custom": {
750 | "axisBorderShow": false,
751 | "axisCenteredZero": false,
752 | "axisColorMode": "text",
753 | "axisLabel": "",
754 | "axisPlacement": "auto",
755 | "barAlignment": 0,
756 | "drawStyle": "bars",
757 | "fillOpacity": 0,
758 | "gradientMode": "none",
759 | "hideFrom": {
760 | "legend": false,
761 | "tooltip": false,
762 | "viz": false
763 | },
764 | "insertNulls": false,
765 | "lineInterpolation": "linear",
766 | "lineWidth": 1,
767 | "pointSize": 5,
768 | "scaleDistribution": {
769 | "type": "linear"
770 | },
771 | "showPoints": "auto",
772 | "spanNulls": false,
773 | "stacking": {
774 | "group": "A",
775 | "mode": "none"
776 | },
777 | "thresholdsStyle": {
778 | "mode": "off"
779 | }
780 | },
781 | "mappings": [],
782 | "min": 0,
783 | "thresholds": {
784 | "mode": "absolute",
785 | "steps": [
786 | {
787 | "color": "green",
788 | "value": null
789 | },
790 | {
791 | "color": "red",
792 | "value": 80
793 | }
794 | ]
795 | }
796 | },
797 | "overrides": []
798 | },
799 | "gridPos": {
800 | "h": 6,
801 | "w": 8,
802 | "x": 8,
803 | "y": 15
804 | },
805 | "id": 14,
806 | "options": {
807 | "legend": {
808 | "calcs": [],
809 | "displayMode": "list",
810 | "placement": "bottom",
811 | "showLegend": true
812 | },
813 | "tooltip": {
814 | "mode": "single",
815 | "sort": "none"
816 | }
817 | },
818 | "targets": [
819 | {
820 | "datasource": {
821 | "type": "influxdb",
822 | "uid": "f79a7dc2-e573-47cc-a345-892d52f5f3d5"
823 | },
824 | "query": "from(bucket: \"piaxe\")\n |> range(start: v.timeRangeStart)\n |> filter(fn: (r) => r[\"_measurement\"] == \"${network}_stats\")\n |> filter(fn: (r) => r[\"_field\"] == \"pool_errors\")\n |> difference()\n ",
825 | "refId": "A"
826 | }
827 | ],
828 | "title": "Pool Errors",
829 | "type": "timeseries"
830 | }
831 | ],
832 | "refresh": "5s",
833 | "schemaVersion": 38,
834 | "tags": [],
835 | "templating": {
836 | "list": [
837 | {
838 | "current": {
839 | "selected": false,
840 | "text": "mainnet",
841 | "value": "mainnet"
842 | },
843 | "hide": 0,
844 | "includeAll": false,
845 | "multi": false,
846 | "name": "network",
847 | "options": [
848 | {
849 | "selected": true,
850 | "text": "mainnet",
851 | "value": "mainnet"
852 | },
853 | {
854 | "selected": false,
855 | "text": "testnet",
856 | "value": "testnet"
857 | },
858 | {
859 | "selected": false,
860 | "text": "regtest",
861 | "value": "regtest"
862 | }
863 | ],
864 | "query": "mainnet,testnet,regtest",
865 | "queryValue": "",
866 | "skipUrlSync": false,
867 | "type": "custom"
868 | }
869 | ]
870 | },
871 | "time": {
872 | "from": "now-6h",
873 | "to": "now"
874 | },
875 | "timepicker": {},
876 | "timezone": "",
877 | "title": "PiAxe Miner",
878 | "uid": "dd418553-e880-45b3-87af-f6fdee7959c1",
879 | "version": 79,
880 | "weekStart": ""
881 | }
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------