├── __init__.py ├── agents ├── __init__.py ├── utils.py ├── models.py └── policies.py ├── envs ├── __init__.py ├── functions.py ├── real_net_env.py └── env.py ├── evaluate.sh ├── README.md ├── weights ├── checkpoint ├── checkpoint-500316.index ├── checkpoint-500316.meta └── checkpoint-500316.data-00000-of-00001 ├── train.sh ├── real_net └── ma2c │ ├── price_data │ ├── reverse_line.py │ ├── EUR_USD_2021.csv │ └── EUR_USD.csv │ └── data │ └── config_ma2c_real.ini ├── config_ma2c_real.ini ├── get_bars.py ├── main.py └── infer.py /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /agents/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /envs/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /evaluate.sh: -------------------------------------------------------------------------------- 1 | python3 main.py --base-dir real_net evaluate --agents ma2c --evaluation-policy-type deterministic 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mt5_AI_trading_bot 2 | Apply LSTM neural network and reinforcement learning to trading Forex on mt5 3 | -------------------------------------------------------------------------------- /weights/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "checkpoint-500316" 2 | all_model_checkpoint_paths: "checkpoint-500316" 3 | -------------------------------------------------------------------------------- /train.sh: -------------------------------------------------------------------------------- 1 | python3 main.py --base-dir real_net/ma2c train --config-dir config/config_ma2c_real.ini --test-mode no_test 2 | -------------------------------------------------------------------------------- /weights/checkpoint-500316.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nguyenviettuan96/mt5_AI_trading_bot/HEAD/weights/checkpoint-500316.index -------------------------------------------------------------------------------- /weights/checkpoint-500316.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nguyenviettuan96/mt5_AI_trading_bot/HEAD/weights/checkpoint-500316.meta -------------------------------------------------------------------------------- /weights/checkpoint-500316.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nguyenviettuan96/mt5_AI_trading_bot/HEAD/weights/checkpoint-500316.data-00000-of-00001 -------------------------------------------------------------------------------- /real_net/ma2c/price_data/reverse_line.py: -------------------------------------------------------------------------------- 1 | import csv 2 | 3 | with open("EURUSD_threemonths.csv") as fr, open("reverse_EURUSD_threemonths.csv", "w", newline="") as fw: 4 | cr = csv.reader(fr, delimiter=",") 5 | cw = csv.writer(fw, delimiter=",") 6 | cw.writerow(next(cr)) # write title as-is 7 | cw.writerows(reversed(list(cr))) 8 | -------------------------------------------------------------------------------- /config_ma2c_real.ini: -------------------------------------------------------------------------------- 1 | [MODEL_CONFIG] 2 | rmsp_alpha = 0.99 3 | rmsp_epsilon = 1e-5 4 | max_grad_norm = 40 5 | gamma = 0.99 6 | lr_init = 5e-4 7 | lr_decay = constant 8 | entropy_coef_init = 0.01 9 | entropy_coef_min = 0.01 10 | entropy_decay = constant 11 | entropy_ratio = 0.5 12 | value_coef = 0.5 13 | num_fw = 128 14 | num_ft = 32 15 | num_lstm = 64 16 | num_fp = 64 17 | batch_size = 10 18 | reward_norm = 1.0 19 | reward_clip = 2.0 20 | 21 | [TRAIN_CONFIG] 22 | total_step = 2e6 23 | test_interval = 2e4 24 | log_interval = 1e4 25 | 26 | [ENV_CONFIG] 27 | clip_wave = 2.0 28 | clip_wait = 2.0 29 | ; agent is greedy, iqll, iqld, ia2c, ma2c, a2c. 30 | agent = ma2c 31 | ; coop discount is used to discount the neighbors' impact 32 | coop_gamma = 0.9 33 | data_path = ./real_net/data/ 34 | price_data = ./real_net/price_data/ 35 | key = EURUSD_lastmonth 36 | window_size = 10 37 | balance = 995 38 | ; the normailization is based on typical values in sim 39 | norm_wave = 5.0 40 | norm_wait = 30.0 41 | coef_wait = 0 42 | ; objective is chosen from queue, wait, hybrid 43 | objective = queue 44 | scenario = real_net 45 | seed = 42 46 | test_seeds = 10000,20000,30000 47 | yellow_interval_sec = 2 -------------------------------------------------------------------------------- /real_net/ma2c/data/config_ma2c_real.ini: -------------------------------------------------------------------------------- 1 | [MODEL_CONFIG] 2 | rmsp_alpha = 0.99 3 | rmsp_epsilon = 1e-5 4 | max_grad_norm = 40 5 | gamma = 0.99 6 | lr_init = 5e-4 7 | lr_decay = constant 8 | entropy_coef_init = 0.01 9 | entropy_coef_min = 0.01 10 | entropy_decay = constant 11 | entropy_ratio = 0.5 12 | value_coef = 0.5 13 | num_fw = 128 14 | num_ft = 32 15 | num_lstm = 64 16 | num_fp = 64 17 | batch_size = 10 18 | reward_norm = 1.0 19 | reward_clip = 2.0 20 | 21 | [TRAIN_CONFIG] 22 | total_step = 2e6 23 | test_interval = 2e4 24 | log_interval = 1e4 25 | 26 | [ENV_CONFIG] 27 | clip_wave = 2.0 28 | clip_wait = 2.0 29 | ; agent is greedy, iqll, iqld, ia2c, ma2c, a2c. 30 | agent = ma2c 31 | ; coop discount is used to discount the neighbors' impact 32 | coop_gamma = 0.9 33 | data_path = ./real_net/data/ 34 | price_data = ./real_net/price_data/ 35 | ; key = ^GSPC_2011 36 | ; key = ^GSPC 37 | key = EURUSD_lastmonth 38 | window_size = 10 39 | balance = 10000 40 | ; the normailization is based on typical values in sim 41 | norm_wave = 5.0 42 | norm_wait = 30.0 43 | coef_wait = 0 44 | ; objective is chosen from queue, wait, hybrid 45 | objective = queue 46 | scenario = real_net 47 | seed = 42 48 | test_seeds = 10000,20000,30000 49 | yellow_interval_sec = 2 50 | -------------------------------------------------------------------------------- /envs/functions.py: -------------------------------------------------------------------------------- 1 | from inspect import indentsize 2 | import numpy as np 3 | import math 4 | 5 | # prints formatted price 6 | 7 | 8 | def formatPrice(n): 9 | return ("-$" if n < 0 else "$") + "{0:.2f}".format(abs(n)) 10 | 11 | 12 | def formatPercent(n): 13 | return "{0:.2f}".format(n*100) + "%" 14 | 15 | # returns the vector containing stock data from a fixed file 16 | 17 | 18 | def getStockDataVec(data_dir, key, index=1000): 19 | vec = [] 20 | lines = open(data_dir + key + ".csv", "r").read().splitlines() 21 | 22 | for line in lines[1:]: 23 | l1 = line.split(",")[4] # [1:7] --> not use by no have " " 24 | # vec.append(float(line.split(",")[1])*volume) 25 | vec.append(float(l1)*index) 26 | # list1 = [float(x) for x in l1] 27 | # print('num', float(l1)*volume) 28 | 29 | return vec 30 | 31 | # returns the sigmoid 32 | 33 | 34 | def sigmoid(x): 35 | return 1 / (1 + math.exp(-x)) 36 | 37 | # returns an an n-day state representation ending at time t 38 | 39 | 40 | def getState(data, t, n): 41 | d = t - n + 1 42 | block = data[d:t + 1] if d >= 0 else -d * \ 43 | [data[0]] + data[0:t + 1] # pad with t0 44 | res = [] 45 | for i in range(n - 1): 46 | res.append(sigmoid(block[i + 1] - block[i])) 47 | 48 | return np.array(res) # [res] 49 | -------------------------------------------------------------------------------- /get_bars.py: -------------------------------------------------------------------------------- 1 | import pytz 2 | from datetime import datetime 3 | import MetaTrader5 as mt5 4 | import pandas as pd 5 | pd.set_option('display.max_columns', 500) # number of columns to be displayed 6 | pd.set_option('display.width', 1500) # max table width to display 7 | # import pytz module for working with time zone 8 | 9 | # establish connection to MetaTrader 5 terminal 10 | mt5.initialize() 11 | # file name to export to csv 12 | file_name = 'EURUSD_H4_20220103_20220203.csv' 13 | # set time zone to UTC 14 | timezone = pytz.timezone("Etc/GMT-2") 15 | # create 'datetime' objects in UTC time zone to avoid the implementation of a local time zone offset 16 | utc_from = datetime(2022, 1, 3, tzinfo=timezone) 17 | utc_to = datetime(2022, 2, 3, tzinfo=timezone) 18 | # get bars from EURUSD H4 within the interval of 2021.05.03 00:00 - 2022.01.03 00:00 in GMT-2 time zone 19 | rates = mt5.copy_rates_range("EURUSD", mt5.TIMEFRAME_H4, utc_from, utc_to) 20 | 21 | # shut down connection to the MetaTrader 5 terminal 22 | mt5.shutdown() 23 | 24 | # display each element of obtained data in a new line 25 | print("Display obtained data 'as is'") 26 | counter = 0 27 | for rate in rates: 28 | counter += 1 29 | if counter <= 10: 30 | print(rate) 31 | 32 | # create DataFrame out of the obtained data 33 | rates_frame = pd.DataFrame(rates) 34 | # convert time in seconds into the 'datetime' format 35 | rates_frame['time'] = pd.to_datetime(rates_frame['time'], unit='s') 36 | 37 | # display data 38 | print("\nDisplay dataframe with data") 39 | print(rates_frame.head(10)) 40 | rates_frame.to_csv(file_name, encoding='utf-8', index=False) 41 | -------------------------------------------------------------------------------- /envs/real_net_env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Particular class of real traffic network 3 | @author: Tianshu Chun 4 | """ 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import os 8 | import seaborn as sns 9 | from envs.env import PhaseMap, PhaseSet, TrafficSimulator 10 | # from real_net.data.build_file import gen_rou_file 11 | 12 | import sys 13 | import os 14 | sys.path.append(os.path.abspath( 15 | "/home/smartcube/tuannguyen/deeprl_signal_control/envs/")) 16 | sys.path.append(os.path.abspath( 17 | "/home/smartcube/tuannguyen/deeprl_signal_control/real_net/data")) 18 | 19 | sns.set_color_codes() 20 | 21 | STATE_NAMES = ['wave'] 22 | # node: (phase key, neighbor list) 23 | NODES = {'long': ('3.0', []), 24 | 'short': ('3.1', [])} 25 | 26 | PHASES = { 27 | '3.0': ['hold', 'buy', 'sell'], 28 | '3.1': ['hold', 'sell', 'buy'] 29 | } 30 | 31 | 32 | class RealNetPhase(PhaseMap): 33 | def __init__(self): 34 | self.phases = {} 35 | for key, val in PHASES.items(): 36 | self.phases[key] = PhaseSet(val) 37 | 38 | 39 | class RealNetController: 40 | def __init__(self, node_names, nodes): 41 | self.name = 'greedy' 42 | self.node_names = node_names 43 | self.nodes = nodes 44 | 45 | def forward(self, obs): 46 | actions = [] 47 | for ob, node_name in zip(obs, self.node_names): 48 | actions.append(self.greedy(ob, node_name)) 49 | return actions 50 | 51 | def greedy(self, ob, node_name): 52 | # get the action space 53 | phases = PHASES[NODES[node_name][0]] 54 | flows = [] 55 | node = self.nodes[node_name] 56 | # get the green waves 57 | for phase in phases: 58 | wave = 0 59 | visited_ilds = set() 60 | for i, signal in enumerate(phase): 61 | if signal == 'G': 62 | # find controlled lane 63 | lane = node.lanes_in[i] 64 | # ild = 'ild:' + lane 65 | ild = lane 66 | # if it has not been counted, add the wave 67 | if ild not in visited_ilds: 68 | j = node.ilds_in.index(ild) 69 | wave += ob[j] 70 | visited_ilds.add(ild) 71 | flows.append(wave) 72 | return np.argmax(np.array(flows)) 73 | 74 | 75 | class RealNetEnv(TrafficSimulator): 76 | def __init__(self, config, port=0, output_path='', is_record=False, record_stat=False): 77 | super().__init__(config, output_path, is_record, record_stat, port=port) 78 | 79 | def _get_node_phase_id(self, node_name): 80 | return self.phase_node_map[node_name] 81 | 82 | def _init_neighbor_map(self): 83 | return dict([(key, val[1]) for key, val in NODES.items()]) 84 | 85 | def _init_map(self): 86 | self.neighbor_map = self._init_neighbor_map() 87 | self.phase_map = RealNetPhase() 88 | self.phase_node_map = dict([(key, val[0]) 89 | for key, val in NODES.items()]) 90 | self.state_names = STATE_NAMES 91 | 92 | # def _init_sim_config(self, seed): 93 | # # comment out to call build_file.py 94 | # return gen_rou_file(self.data_path, 95 | # self.flow_rate, 96 | # seed=seed, 97 | # thread=self.sim_thread) 98 | 99 | def plot_stat(self, rewards): 100 | self.state_stat['reward'] = rewards 101 | for name, data in self.state_stat.items(): 102 | fig = plt.figure(figsize=(8, 6)) 103 | plot_cdf(data) 104 | plt.ylabel(name) 105 | fig.savefig(self.output_path + self.name + '_' + name + '.png') 106 | 107 | 108 | def plot_cdf(X, c='b', label=None): 109 | sorted_data = np.sort(X) 110 | yvals = np.arange(len(sorted_data))/float(len(sorted_data)-1) 111 | # print('sorted_data', sorted_data) 112 | plt.plot(sorted_data, yvals, color=c, label=label) 113 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main function for training and evaluating agents in traffic envs 3 | @author: Tianshu Chu 4 | run command: 5 | 1. Train: python main.py --base-dir real_net/ma2c train --config-dir config/config_ma2c_real.ini --test-mode no_test 6 | 2. Visualize: python main.py --base-dir real_net evaluate --agents ma2c 7 | """ 8 | 9 | import argparse 10 | import configparser 11 | import logging 12 | import tensorflow.compat.v1 as tf 13 | import threading 14 | from envs.real_net_env import RealNetEnv, RealNetController 15 | from agents.models import MA2C 16 | from utils import (Counter, Trainer, Tester, Evaluator, 17 | check_dir, copy_file, find_file, 18 | init_dir, init_log, init_test_flag) 19 | 20 | 21 | def parse_args(): 22 | default_base_dir = '/Users/tchu/Documents/rl_test/signal_control_results/eval_sep2019/large_grid' 23 | default_config_dir = './config/config_test_large.ini' 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('--base-dir', type=str, required=False, 26 | default=default_base_dir, help="experiment base dir") 27 | subparsers = parser.add_subparsers(dest='option', help="train or evaluate") 28 | sp = subparsers.add_parser( 29 | 'train', help='train a single agent under base dir') 30 | sp.add_argument('--test-mode', type=str, required=False, 31 | default='no_test', 32 | help="test mode during training", 33 | choices=['no_test', 'in_train_test', 'after_train_test', 'all_test']) 34 | sp.add_argument('--config-dir', type=str, required=False, 35 | default=default_config_dir, help="experiment config path") 36 | sp = subparsers.add_parser( 37 | 'evaluate', help="evaluate and compare agents under base dir") 38 | sp.add_argument('--agents', type=str, required=False, 39 | default='naive', help="agent folder names for evaluation, split by ,") 40 | sp.add_argument('--evaluation-policy-type', type=str, required=False, default='default', 41 | help="inference policy type in evaluation: default, stochastic, or deterministic") 42 | args = parser.parse_args() 43 | if not args.option: 44 | parser.print_help() 45 | exit(1) 46 | return args 47 | 48 | 49 | def init_env(config, port=1, naive_policy=False): 50 | if not naive_policy: 51 | return RealNetEnv(config, port=port) 52 | else: 53 | env = RealNetEnv(config, port=port) 54 | policy = RealNetController(env.node_names, env.nodes) 55 | return env, policy 56 | 57 | 58 | def train(args): 59 | base_dir = args.base_dir 60 | dirs = init_dir(base_dir) 61 | init_log(dirs['log']) 62 | config_dir = args.config_dir 63 | copy_file(config_dir, dirs['data']) 64 | config = configparser.ConfigParser() 65 | config.read(config_dir) 66 | in_test, post_test = init_test_flag(args.test_mode) 67 | 68 | # init env 69 | env = init_env(config['ENV_CONFIG']) 70 | logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' % 71 | (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls)) 72 | 73 | # init step counter 74 | total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step')) 75 | test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval')) 76 | log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval')) 77 | global_counter = Counter(total_step, test_step, log_step) 78 | 79 | # init centralized or multi agent 80 | seed = config.getint('ENV_CONFIG', 'seed') 81 | model = MA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, env.n_f_ls, total_step, 82 | config['MODEL_CONFIG'], seed=seed) 83 | 84 | # disable multi-threading for safe SUMO implementation 85 | summary_writer = tf.summary.FileWriter(dirs['log']) 86 | trainer = Trainer(env, model, global_counter, 87 | summary_writer, in_test, output_path=dirs['data']) 88 | trainer.run() 89 | # post-training test 90 | if post_test: 91 | tester = Tester(env, model, global_counter, 92 | summary_writer, dirs['data']) 93 | tester.run_offline(dirs['data']) 94 | 95 | # save model 96 | final_step = global_counter.cur_step 97 | logging.info('Training: save final model at step %d ...' % final_step) 98 | model.save(dirs['model'], final_step) 99 | 100 | 101 | def evaluate_fn(agent_dir, output_dir, port, policy_type): 102 | agent = agent_dir.split('/')[-1] 103 | if not check_dir(agent_dir): 104 | logging.error('Evaluation: %s does not exist!' % agent) 105 | return 106 | # load config file for env 107 | config_dir = find_file(agent_dir + '/data/') 108 | if not config_dir: 109 | return 110 | config = configparser.ConfigParser() 111 | config.read(config_dir) 112 | 113 | # init env 114 | env = init_env(config['ENV_CONFIG'], port) 115 | logging.info('Evaluation: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' % 116 | (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls)) 117 | 118 | # load model for agent 119 | # init centralized or multi agent 120 | model = MA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, 121 | env.n_f_ls, 0, config['MODEL_CONFIG']) 122 | if not model.load(agent_dir + '/model/'): 123 | return 124 | print('agent', agent) 125 | print('env.agent', env.agent) 126 | env.agent = agent 127 | # collect evaluation data 128 | evaluator = Evaluator(env, model, output_dir, policy_type=policy_type) 129 | evaluator.run() 130 | 131 | 132 | def evaluate(args): 133 | base_dir = args.base_dir 134 | dirs = init_dir(base_dir, pathes=['eva_data', 'eva_log']) 135 | init_log(dirs['eva_log']) 136 | agents = args.agents.split(',') 137 | print('agents', agents) 138 | # enforce the same evaluation seeds across agents 139 | policy_type = args.evaluation_policy_type 140 | logging.info('Evaluation: policy type: %s' % 141 | (policy_type)) 142 | 143 | threads = [] 144 | for i, agent in enumerate(agents): 145 | print('agent', agent) 146 | agent_dir = base_dir + '/' + agent 147 | thread = threading.Thread(target=evaluate_fn, 148 | args=(agent_dir, dirs['eva_data'], i, policy_type)) 149 | thread.start() 150 | threads.append(thread) 151 | for thread in threads: 152 | thread.join() 153 | 154 | 155 | if __name__ == '__main__': 156 | args = parse_args() 157 | if args.option == 'train': 158 | train(args) 159 | else: 160 | evaluate(args) 161 | -------------------------------------------------------------------------------- /infer.py: -------------------------------------------------------------------------------- 1 | import MetaTrader5 as mt5 2 | import pandas as pd 3 | import numpy as np 4 | import math 5 | import argparse 6 | import configparser 7 | from envs.real_net_env import RealNetEnv, RealNetController 8 | from envs.functions import getState, formatPrice 9 | from agents.models import MA2C 10 | from utils import Predictor 11 | 12 | 13 | SYMBOL = "EURUSD" 14 | DEVIATION = 20 15 | TIMEFRAME = mt5.TIMEFRAME_H4 16 | VOLUME = 0.03 17 | PERIOD = 11 18 | 19 | 20 | def parse_args(): 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument('--config-dir', type=str, required=False, 23 | default='config_ma2c_real.ini', help="inference config dir") 24 | parser.add_argument('--port', type=int, required=False, 25 | default=0, help="running port") 26 | parser.add_argument('--policy-type', type=str, required=False, default='default', 27 | help="inference policy type in evaluation: default, stochastic, or deterministic") 28 | parser.add_argument('--position-type', type=dict, required=False, 29 | default={'long': 1, 'short': -1}, help="types of position") 30 | args = parser.parse_args() 31 | return args 32 | 33 | 34 | def market_order(symbol, volume, order_type): 35 | tick = mt5.symbol_info_tick(symbol) 36 | 37 | order_dict = {'long': 0, 'short': 1} 38 | price_dict = {'long': tick.ask, 'short': tick.bid} 39 | 40 | request = { 41 | "action": mt5.TRADE_ACTION_DEAL, 42 | "symbol": symbol, 43 | "volume": volume, 44 | "type": order_dict[order_type], 45 | "price": price_dict[order_type], 46 | "deviation": DEVIATION, 47 | "magic": 100, 48 | "comment": "python market order", 49 | "type_time": mt5.ORDER_TIME_GTC, 50 | "type_filling": mt5.ORDER_FILLING_IOC, 51 | } 52 | 53 | order_result = mt5.order_send(request) 54 | print(order_result) 55 | 56 | return order_result 57 | 58 | 59 | # function to close an order base don ticket id 60 | def close_order(ticket): 61 | positions = mt5.positions_get() 62 | 63 | for pos in positions: 64 | tick = mt5.symbol_info_tick(pos.symbol) 65 | # 0 represents buy, 1 represents sell - inverting order_type to close the position 66 | type_dict = {0: 1, 1: 0} 67 | price_dict = {0: tick.ask, 1: tick.bid} 68 | 69 | if pos.ticket == ticket: 70 | request = { 71 | "action": mt5.TRADE_ACTION_DEAL, 72 | "position": pos.ticket, 73 | "symbol": pos.symbol, 74 | "volume": pos.volume, 75 | "type": type_dict[pos.type], 76 | "price": price_dict[pos.type], 77 | "deviation": DEVIATION, 78 | "magic": 100, 79 | "comment": "python close order", 80 | "type_time": mt5.ORDER_TIME_GTC, 81 | "type_filling": mt5.ORDER_FILLING_IOC, 82 | } 83 | order_result = mt5.order_send(request) 84 | print(order_result) 85 | 86 | return order_result 87 | 88 | return 'Ticket does not exist' 89 | 90 | 91 | def sigmoid(x): 92 | return 1 / (1 + math.exp(-x)) 93 | 94 | 95 | def _norm_clip_state(x, norm, clip=-1): 96 | x = x / norm 97 | return x if clip < 0 else np.clip(x, 0, clip) 98 | 99 | 100 | def getState(symbol, timeframe, period, index=1000): 101 | bars = mt5.copy_rates_from_pos(symbol, timeframe, 1, period) 102 | bars_df = pd.DataFrame(bars) 103 | vec = bars_df.close.tolist() 104 | vec = [x*index for x in vec] 105 | res = [] 106 | for i in range(period - 1): 107 | res.append(sigmoid(vec[i + 1] - vec[i])) 108 | 109 | return np.array(res) 110 | 111 | 112 | def init_env(config, port=1, naive_policy=False): 113 | if not naive_policy: 114 | return RealNetEnv(config, port=port) 115 | else: 116 | env = RealNetEnv(config, port=port) 117 | policy = RealNetController(env.node_names, env.nodes) 118 | return env, policy 119 | 120 | 121 | def data_preprocessing(cur_state, norm, clip, agents): 122 | # hard code the state ordering as wave, wait, fp 123 | state = [] 124 | # measure the most recent state 125 | norm_cur_state = _norm_clip_state(cur_state, norm, clip) 126 | # get the appropriate state vectors 127 | for _ in agents: 128 | # wave is required in state 129 | cur_state = [norm_cur_state] 130 | state.append(np.concatenate(cur_state)) 131 | 132 | return state 133 | 134 | 135 | def main(args): 136 | config_dir = args.config_dir 137 | port = args.port 138 | policy_type = args.policy_type 139 | agent_type = args.position_type 140 | 141 | # initialize start value 142 | inventory = {} 143 | open_ticket = {} 144 | for agent in [*agent_type]: 145 | inventory[agent] = [] 146 | open_ticket[agent] = [] 147 | total_profit = 0 148 | balance_list = [] 149 | pre_state = np.array([]) 150 | 151 | # load config file for env 152 | config = configparser.ConfigParser() 153 | config.read(config_dir) 154 | cur_balance = config['ENV_CONFIG'].getint('balance') 155 | norm = config['ENV_CONFIG'].getfloat('norm_wave') 156 | clip = config['ENV_CONFIG'].getfloat('clip_wave') 157 | 158 | # init env 159 | env = init_env(config['ENV_CONFIG'], port) 160 | # load model for agent 161 | # init centralized or multi agent 162 | model = MA2C(env.n_s_ls, env.n_a_ls, env.n_w_ls, 163 | env.n_f_ls, 0, config['MODEL_CONFIG']) 164 | model.load('weights/') 165 | model.reset() 166 | # collect evaluation data 167 | predictor = Predictor(env, model, policy_type=policy_type) 168 | # init mt5 169 | mt5.initialize() 170 | 171 | while True: 172 | cur_state = getState(symbol=SYMBOL, timeframe=TIMEFRAME, 173 | period=PERIOD) 174 | if not np.array_equal(cur_state, pre_state): 175 | state = data_preprocessing(cur_state, norm, clip, [*agent_type]) 176 | action = predictor.run(state) 177 | print('---ACTION--- :', action) 178 | tick = mt5.symbol_info_tick(SYMBOL) 179 | price_dict = {'long': tick.ask, 'short': tick.bid} 180 | 181 | for agent, a in zip([*agent_type], list(action)): 182 | if a == 1: 183 | market_order(SYMBOL, VOLUME, agent) 184 | inventory[agent].append(price_dict[agent]) 185 | open_ticket[agent].append(mt5.positions_get()[-1].ticket) 186 | 187 | elif a == 2 and len(inventory[agent]) > 0: 188 | close_order(open_ticket[agent].pop(0)) 189 | order_price = inventory[agent].pop(0) 190 | profit = (price_dict[agent] - order_price) * \ 191 | agent_type[agent] * VOLUME * 100000 192 | total_profit += profit 193 | cur_balance += profit 194 | balance_list.append(round(cur_balance, 2)) 195 | 196 | print("--------------------------------") 197 | print("Total Profit: " + formatPrice(total_profit)) 198 | print('Curent Balance: ' + formatPrice(cur_balance)) 199 | print('Balance List', balance_list) 200 | print("--------------------------------") 201 | 202 | pre_state = cur_state 203 | 204 | 205 | if __name__ == '__main__': 206 | args = parse_args() 207 | main(args) 208 | -------------------------------------------------------------------------------- /agents/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import tensorflow as tf 4 | 5 | """ 6 | initializers 7 | """ 8 | DEFAULT_SCALE = np.sqrt(2) 9 | DEFAULT_MODE = 'fan_in' 10 | 11 | 12 | def ortho_init(scale=DEFAULT_SCALE, mode=None): 13 | def _ortho_init(shape, dtype, partition_info=None): 14 | # lasagne ortho init for tf 15 | shape = tuple(shape) 16 | if len(shape) == 2: # fc: in, out 17 | flat_shape = shape 18 | elif (len(shape) == 3) or (len(shape) == 4): # 1d/2dcnn: (in_h), in_w, in_c, out 19 | flat_shape = (np.prod(shape[:-1]), shape[-1]) 20 | a = np.random.standard_normal(flat_shape) 21 | u, _, v = np.linalg.svd(a, full_matrices=False) 22 | q = u if u.shape == flat_shape else v # pick the one with the correct shape 23 | q = q.reshape(shape) 24 | return (scale * q).astype(np.float32) 25 | return _ortho_init 26 | 27 | 28 | def norm_init(scale=DEFAULT_SCALE, mode=DEFAULT_MODE): 29 | def _norm_init(shape, dtype, partition_info=None): 30 | shape = tuple(shape) 31 | if len(shape) == 2: 32 | n_in = shape[0] 33 | elif (len(shape) == 3) or (len(shape) == 4): 34 | n_in = np.prod(shape[:-1]) 35 | a = np.random.standard_normal(shape) 36 | if mode == 'fan_in': 37 | n = n_in 38 | elif mode == 'fan_out': 39 | n = shape[-1] 40 | elif mode == 'fan_avg': 41 | n = 0.5 * (n_in + shape[-1]) 42 | return (scale * a / np.sqrt(n)).astype(np.float32) 43 | 44 | 45 | DEFAULT_METHOD = ortho_init 46 | """ 47 | layers 48 | """ 49 | 50 | 51 | def conv(x, scope, n_out, f_size, stride=1, pad='VALID', f_size_w=None, act=tf.nn.relu, 52 | conv_dim=1, init_scale=DEFAULT_SCALE, init_mode=None, init_method=DEFAULT_METHOD): 53 | with tf.variable_scope(scope): 54 | b = tf.get_variable( 55 | "b", [n_out], initializer=tf.constant_initializer(0.0)) 56 | if conv_dim == 1: 57 | n_c = x.shape[2].value 58 | w = tf.get_variable("w", [f_size, n_c, n_out], 59 | initializer=init_method(init_scale, init_mode)) 60 | z = tf.nn.conv1d(x, w, stride=stride, padding=pad) + b 61 | elif conv_dim == 2: 62 | n_c = x.shape[3].value 63 | if f_size_w is None: 64 | f_size_w = f_size 65 | w = tf.get_variable("w", [f_size, f_size_w, n_c, n_out], 66 | initializer=init_method(init_scale, init_mode)) 67 | z = tf.nn.conv2d( 68 | x, w, strides=[1, stride, stride, 1], padding=pad) + b 69 | return act(z) 70 | 71 | 72 | def fc(x, scope, n_out, act=tf.nn.relu, init_scale=DEFAULT_SCALE, 73 | init_mode=DEFAULT_MODE, init_method=DEFAULT_METHOD): 74 | with tf.compat.v1.variable_scope(scope): 75 | n_in = x.shape[1].value 76 | w = tf.compat.v1.get_variable("w", [n_in, n_out], 77 | initializer=init_method(init_scale, init_mode)) 78 | b = tf.compat.v1.get_variable( 79 | "b", [n_out], initializer=tf.constant_initializer(0.0)) 80 | z = tf.matmul(x, w) + b 81 | return act(z) 82 | 83 | 84 | def batch_to_seq(x): 85 | n_step = x.shape[0].value 86 | if len(x.shape) == 1: 87 | x = tf.expand_dims(x, -1) 88 | return tf.split(axis=0, num_or_size_splits=n_step, value=x) 89 | 90 | 91 | def seq_to_batch(x): 92 | return tf.concat(axis=0, values=x) 93 | 94 | 95 | def lstm(xs, dones, s, scope, init_scale=DEFAULT_SCALE, init_mode=DEFAULT_MODE, 96 | init_method=DEFAULT_METHOD): 97 | xs = batch_to_seq(xs) 98 | # need dones to reset states 99 | dones = batch_to_seq(dones) 100 | n_in = xs[0].shape[1].value 101 | n_out = s.shape[0] // 2 102 | with tf.compat.v1.variable_scope(scope): 103 | wx = tf.compat.v1.get_variable("wx", [n_in, n_out*4], 104 | initializer=init_method(init_scale, init_mode)) 105 | wh = tf.compat.v1.get_variable("wh", [n_out, n_out*4], 106 | initializer=init_method(init_scale, init_mode)) 107 | b = tf.compat.v1.get_variable( 108 | "b", [n_out*4], initializer=tf.constant_initializer(0.0)) 109 | s = tf.expand_dims(s, 0) 110 | c, h = tf.split(axis=1, num_or_size_splits=2, value=s) 111 | for ind, (x, done) in enumerate(zip(xs, dones)): 112 | c = c * (1-done) 113 | h = h * (1-done) 114 | z = tf.matmul(x, wx) + tf.matmul(h, wh) + b 115 | i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) 116 | i = tf.nn.sigmoid(i) 117 | f = tf.nn.sigmoid(f) 118 | o = tf.nn.sigmoid(o) 119 | u = tf.tanh(u) 120 | c = f*c + i*u 121 | h = o*tf.tanh(c) 122 | xs[ind] = h 123 | s = tf.concat(axis=1, values=[c, h]) 124 | return seq_to_batch(xs), tf.squeeze(s) 125 | 126 | 127 | def test_layers(): 128 | print(tf.__version__) 129 | tf.reset_default_graph() 130 | sess = tf.Session() 131 | n_step = 5 132 | fc_x = tf.placeholder(tf.float32, [None, 10]) 133 | lstm_x = tf.placeholder(tf.float32, [n_step, 2]) 134 | lstm_done = tf.placeholder(tf.float32, [n_step]) 135 | lstm_s = tf.placeholder(tf.float32, [20]) 136 | conv1_x = tf.placeholder(tf.float32, [None, 8, 1]) 137 | conv2_x = tf.placeholder(tf.float32, [None, 8, 8, 1]) 138 | fc_out = fc(fc_x, 'fc', 10) 139 | lstm_out, lstm_ns = lstm(lstm_x, lstm_done, lstm_s, 'lstm') 140 | conv1_out = conv(conv1_x, 'conv1', 10, 4, conv_dim=1) 141 | conv2_out = conv(conv2_x, 'conv2', 10, 4, conv_dim=2) 142 | sess.run(tf.global_variables_initializer()) 143 | inputs = {'fc': {fc_x: np.random.randn(n_step, 10)}, 144 | 'lstm_done': {lstm_x: np.zeros((n_step, 2)), 145 | lstm_done: np.ones(n_step), 146 | lstm_s: np.random.randn(20)}, 147 | 'lstm': {lstm_x: np.random.randn(n_step, 2), 148 | lstm_done: np.zeros(n_step), 149 | lstm_s: np.random.randn(20)}, 150 | 'conv1': {conv1_x: np.random.randn(n_step, 8, 1)}, 151 | 'conv2': {conv2_x: np.random.randn(n_step, 8, 8, 1)}} 152 | outputs = {'fc': [fc_out], 'lstm_done': [lstm_out, lstm_ns], 153 | 'conv1': [conv1_out], 'conv2': [conv2_out], 154 | 'lstm': [lstm_out, lstm_ns]} 155 | for scope in ['fc', 'lstm', 'conv1', 'conv2']: 156 | print(scope) 157 | wts = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope) 158 | for wt in wts: 159 | wt_val = wt.eval(sess) 160 | print(wt_val.shape) 161 | print(np.mean(wt_val), np.std(wt_val), 162 | np.min(wt_val), np.max(wt_val)) 163 | print('=====================================') 164 | for x_name in inputs: 165 | print(x_name) 166 | out = sess.run(outputs[x_name], inputs[x_name]) 167 | if x_name.startswith('lstm'): 168 | print(out[0]) 169 | print(out[1]) 170 | else: 171 | print(out[0].shape) 172 | 173 | 174 | """ 175 | buffers 176 | """ 177 | 178 | 179 | class TransBuffer: 180 | def reset(self): 181 | self.buffer = [] 182 | 183 | @property 184 | def size(self): 185 | return len(self.buffer) 186 | 187 | def add_transition(self, ob, a, r, *_args, **_kwargs): 188 | raise NotImplementedError() 189 | 190 | def sample_transition(self, *_args, **_kwargs): 191 | raise NotImplementedError() 192 | 193 | 194 | class OnPolicyBuffer(TransBuffer): 195 | def __init__(self, gamma): 196 | self.gamma = gamma 197 | self.reset() 198 | 199 | def reset(self, done=False): 200 | # the done before each step is required 201 | self.obs = [] 202 | self.acts = [] 203 | self.rs = [] 204 | self.vs = [] 205 | self.dones = [done] 206 | 207 | def add_transition(self, ob, a, r, v, done): 208 | self.obs.append(ob) 209 | self.acts.append(a) 210 | self.rs.append(r) 211 | self.vs.append(v) 212 | self.dones.append(done) 213 | 214 | def _add_R_Adv(self, R): 215 | Rs = [] 216 | Advs = [] 217 | # use post-step dones here 218 | for r, v, done in zip(self.rs[::-1], self.vs[::-1], self.dones[:0:-1]): 219 | R = r + self.gamma * R * (1.-done) 220 | Adv = R - v 221 | Rs.append(R) 222 | Advs.append(Adv) 223 | Rs.reverse() 224 | Advs.reverse() 225 | self.Rs = Rs 226 | self.Advs = Advs 227 | 228 | def sample_transition(self, R, discrete=True): 229 | self._add_R_Adv(R) 230 | obs = np.array(self.obs, dtype=np.float32) 231 | if discrete: 232 | acts = np.array(self.acts, dtype=np.int32) 233 | else: 234 | acts = np.array(self.acts, dtype=np.float32) 235 | Rs = np.array(self.Rs, dtype=np.float32) 236 | Advs = np.array(self.Advs, dtype=np.float32) 237 | # use pre-step dones here 238 | dones = np.array(self.dones[:-1], dtype=np.bool) 239 | self.reset(self.dones[-1]) 240 | return obs, acts, dones, Rs, Advs 241 | 242 | 243 | class ReplayBuffer(TransBuffer): 244 | def __init__(self, buffer_size, batch_size): 245 | self.buffer_size = buffer_size 246 | self.batch_size = batch_size 247 | self.cum_size = 0 248 | self.buffer = [] 249 | 250 | def add_transition(self, ob, a, r, next_ob, done): 251 | experience = (ob, a, r, next_ob, done) 252 | if self.cum_size < self.buffer_size: 253 | self.buffer.append(experience) 254 | else: 255 | ind = int(self.cum_size % self.buffer_size) 256 | self.buffer[ind] = experience 257 | self.cum_size += 1 258 | 259 | def reset(self): 260 | self.buffer = [] 261 | self.cum_size = 0 262 | 263 | def sample_transition(self): 264 | # Randomly sample batch_size examples 265 | minibatch = random.sample(self.buffer, self.batch_size) 266 | state_batch = np.asarray([data[0] for data in minibatch]) 267 | action_batch = np.asarray([data[1] for data in minibatch]) 268 | next_state_batch = np.asarray([data[3] for data in minibatch]) 269 | reward_batch = np.asarray([data[2] for data in minibatch]) 270 | done_batch = np.asarray([data[4] for data in minibatch]) 271 | return state_batch, action_batch, next_state_batch, reward_batch, done_batch 272 | 273 | @property 274 | def size(self): 275 | return min(self.buffer_size, self.cum_size) 276 | 277 | 278 | """ 279 | util functions 280 | """ 281 | 282 | 283 | class Scheduler: 284 | def __init__(self, val_init, val_min=0, total_step=0, decay='linear'): 285 | self.val = val_init 286 | self.N = float(total_step) 287 | self.val_min = val_min 288 | self.decay = decay 289 | self.n = 0 290 | 291 | def get(self, n_step): 292 | self.n += n_step 293 | if self.decay == 'linear': 294 | return max(self.val_min, self.val * (1 - self.n / self.N)) 295 | else: 296 | return self.val 297 | 298 | 299 | if __name__ == '__main__': 300 | test_layers() 301 | -------------------------------------------------------------------------------- /real_net/ma2c/price_data/EUR_USD_2021.csv: -------------------------------------------------------------------------------- 1 | "Ngày","Lần cuối","Mở","Cao","Thấp","% Thay đổi" 2 | 18/01/2021,1.2076,1.2085,1.2091,1.2054,-0.02% 3 | 19/01/2021,1.2128,1.2078,1.2146,1.2074,0.43% 4 | 20/01/2021,1.2104,1.2127,1.2158,1.2076,-0.20% 5 | 21/01/2021,1.2162,1.2106,1.2175,1.2103,0.48% 6 | 22/01/2021,1.2167,1.2164,1.2191,1.2151,0.04% 7 | 25/01/2021,1.2137,1.2168,1.2184,1.2115,-0.25% 8 | 26/01/2021,1.2160,1.2138,1.2177,1.2107,0.19% 9 | 27/01/2021,1.2108,1.2160,1.2170,1.2057,-0.43% 10 | 28/01/2021,1.2121,1.2109,1.2142,1.2079,0.11% 11 | 29/01/2021,1.2136,1.2121,1.2156,1.2093,0.12% 12 | 01/02/2021,1.2059,1.2140,1.2146,1.2054,-0.63% 13 | 02/02/2021,1.2042,1.2059,1.2089,1.2011,-0.14% 14 | 03/02/2021,1.2034,1.2044,1.2050,1.2003,-0.07% 15 | 04/02/2021,1.1962,1.2033,1.2043,1.1957,-0.60% 16 | 05/02/2021,1.2042,1.1964,1.2050,1.1952,0.67% 17 | 08/02/2021,1.2048,1.2042,1.2067,1.2019,0.05% 18 | 09/02/2021,1.2117,1.2048,1.2122,1.2045,0.57% 19 | 10/02/2021,1.2116,1.2119,1.2145,1.2108,-0.01% 20 | 11/02/2021,1.2128,1.2118,1.2150,1.2112,0.10% 21 | 12/02/2021,1.2118,1.2130,1.2135,1.2081,-0.08% 22 | 15/02/2021,1.2127,1.2123,1.2146,1.2116,0.07% 23 | 16/02/2021,1.2104,1.2130,1.2170,1.2095,-0.19% 24 | 17/02/2021,1.2036,1.2107,1.2111,1.2023,-0.56% 25 | 18/02/2021,1.2088,1.2037,1.2096,1.2035,0.43% 26 | 19/02/2021,1.2117,1.2091,1.2145,1.2082,0.24% 27 | 22/02/2021,1.2154,1.2117,1.2170,1.2091,0.31% 28 | 23/02/2021,1.2149,1.2155,1.2181,1.2135,-0.04% 29 | 24/02/2021,1.2164,1.2150,1.2176,1.2109,0.12% 30 | 25/02/2021,1.2176,1.2163,1.2243,1.2155,0.10% 31 | 26/02/2021,1.2074,1.2176,1.2185,1.2062,-0.84% 32 | 01/03/2021,1.2047,1.2079,1.2102,1.2027,-0.22% 33 | 02/03/2021,1.2089,1.2049,1.2095,1.1991,0.35% 34 | 03/03/2021,1.2062,1.2091,1.2113,1.2042,-0.22% 35 | 04/03/2021,1.1966,1.2063,1.2068,1.1961,-0.80% 36 | 05/03/2021,1.1917,1.1968,1.1978,1.1894,-0.41% 37 | 08/03/2021,1.1843,1.1922,1.1933,1.1844,-0.62% 38 | 09/03/2021,1.1898,1.1847,1.1917,1.1835,0.46% 39 | 10/03/2021,1.1925,1.1900,1.1931,1.1868,0.23% 40 | 11/03/2021,1.1984,1.1928,1.1990,1.1916,0.49% 41 | 12/03/2021,1.1952,1.1983,1.1990,1.1910,-0.27% 42 | 15/03/2021,1.1928,1.1952,1.1968,1.1911,-0.20% 43 | 16/03/2021,1.1900,1.1929,1.1953,1.1882,-0.23% 44 | 17/03/2021,1.1978,1.1906,1.1986,1.1885,0.66% 45 | 18/03/2021,1.1915,1.1979,1.1990,1.1906,-0.53% 46 | 19/03/2021,1.1903,1.1915,1.1939,1.1874,-0.10% 47 | 22/03/2021,1.1931,1.1899,1.1947,1.1871,0.24% 48 | 23/03/2021,1.1849,1.1935,1.1941,1.1841,-0.69% 49 | 24/03/2021,1.1812,1.1851,1.1854,1.1809,-0.31% 50 | 25/03/2021,1.1764,1.1813,1.1829,1.1761,-0.41% 51 | 26/03/2021,1.1796,1.1763,1.1805,1.1763,0.27% 52 | 29/03/2021,1.1762,1.1795,1.1797,1.1760,-0.29% 53 | 30/03/2021,1.1714,1.1764,1.1774,1.1711,-0.41% 54 | 31/03/2021,1.1728,1.1716,1.1760,1.1703,0.12% 55 | 01/04/2021,1.1775,1.1731,1.1781,1.1712,0.40% 56 | 02/04/2021,1.1762,1.1777,1.1788,1.1748,-0.11% 57 | 05/04/2021,1.1811,1.1761,1.1820,1.1738,0.42% 58 | 06/04/2021,1.1875,1.1814,1.1878,1.1795,0.54% 59 | 07/04/2021,1.1870,1.1875,1.1916,1.1861,-0.04% 60 | 08/04/2021,1.1912,1.1867,1.1928,1.1861,0.35% 61 | 09/04/2021,1.1896,1.1913,1.1921,1.1867,-0.13% 62 | 12/04/2021,1.1909,1.1891,1.1920,1.1870,0.11% 63 | 13/04/2021,1.1946,1.1911,1.1956,1.1878,0.31% 64 | 14/04/2021,1.1978,1.1948,1.1988,1.1947,0.27% 65 | 15/04/2021,1.1965,1.1980,1.1995,1.1955,-0.11% 66 | 16/04/2021,1.1983,1.1967,1.1996,1.1950,0.15% 67 | 19/04/2021,1.2033,1.1984,1.2049,1.1942,0.42% 68 | 20/04/2021,1.2033,1.2036,1.2081,1.2022,0.00% 69 | 21/04/2021,1.2033,1.2037,1.2045,1.1998,0.00% 70 | 22/04/2021,1.2015,1.2035,1.2070,1.1993,-0.15% 71 | 23/04/2021,1.2099,1.2015,1.2100,1.2012,0.70% 72 | 26/04/2021,1.2083,1.2093,1.2118,1.2061,-0.13% 73 | 27/04/2021,1.2090,1.2086,1.2093,1.2056,0.06% 74 | 28/04/2021,1.2123,1.2091,1.2135,1.2055,0.27% 75 | 29/04/2021,1.2118,1.2126,1.2150,1.2102,-0.04% 76 | 30/04/2021,1.2018,1.2116,1.2127,1.2016,-0.83% 77 | 03/05/2021,1.2061,1.2031,1.2076,1.2012,0.36% 78 | 04/05/2021,1.2013,1.2064,1.2066,1.1998,-0.40% 79 | 05/05/2021,1.2005,1.2013,1.2027,1.1985,-0.07% 80 | 06/05/2021,1.2064,1.2005,1.2072,1.1992,0.49% 81 | 07/05/2021,1.2163,1.2066,1.2172,1.2053,0.82% 82 | 10/05/2021,1.2129,1.2161,1.2178,1.2127,-0.28% 83 | 11/05/2021,1.2146,1.2129,1.2182,1.2122,0.14% 84 | 12/05/2021,1.2069,1.2148,1.2153,1.2065,-0.63% 85 | 13/05/2021,1.2078,1.2073,1.2108,1.2050,0.07% 86 | 14/05/2021,1.2140,1.2080,1.2150,1.2070,0.51% 87 | 17/05/2021,1.2151,1.2140,1.2170,1.2126,0.09% 88 | 18/05/2021,1.2220,1.2153,1.2234,1.2151,0.57% 89 | 19/05/2021,1.2172,1.2222,1.2246,1.2160,-0.39% 90 | 20/05/2021,1.2226,1.2171,1.2230,1.2169,0.44% 91 | 21/05/2021,1.2179,1.2228,1.2241,1.2161,-0.38% 92 | 24/05/2021,1.2215,1.2178,1.2230,1.2172,0.30% 93 | 25/05/2021,1.2250,1.2217,1.2267,1.2211,0.29% 94 | 26/05/2021,1.2190,1.2251,1.2263,1.2182,-0.49% 95 | 27/05/2021,1.2192,1.2192,1.2216,1.2175,0.02% 96 | 28/05/2021,1.2189,1.2195,1.2205,1.2132,-0.02% 97 | 31/05/2021,1.2225,1.2192,1.2233,1.2183,0.30% 98 | 01/06/2021,1.2211,1.2227,1.2255,1.2211,-0.11% 99 | 02/06/2021,1.2209,1.2214,1.2227,1.2163,-0.02% 100 | 03/06/2021,1.2124,1.2210,1.2217,1.2118,-0.70% 101 | 04/06/2021,1.2165,1.2129,1.2186,1.2104,0.34% 102 | 07/06/2021,1.2189,1.2168,1.2203,1.2145,0.20% 103 | 08/06/2021,1.2171,1.2190,1.2196,1.2163,-0.15% 104 | 09/06/2021,1.2178,1.2173,1.2219,1.2170,0.06% 105 | 10/06/2021,1.2169,1.2181,1.2195,1.2142,-0.07% 106 | 11/06/2021,1.2106,1.2170,1.2196,1.2092,-0.52% 107 | 14/06/2021,1.2118,1.2110,1.2131,1.2093,0.10% 108 | 15/06/2021,1.2124,1.2120,1.2149,1.2100,0.05% 109 | 16/06/2021,1.1994,1.2127,1.2135,1.1993,-1.07% 110 | 17/06/2021,1.1906,1.1996,1.2007,1.1891,-0.73% 111 | 18/06/2021,1.1860,1.1908,1.1926,1.1846,-0.39% 112 | 21/06/2021,1.1915,1.1868,1.1922,1.1847,0.46% 113 | 22/06/2021,1.1938,1.1918,1.1954,1.1881,0.19% 114 | 23/06/2021,1.1925,1.1939,1.1970,1.1911,-0.11% 115 | 24/06/2021,1.1930,1.1927,1.1957,1.1918,0.04% 116 | 25/06/2021,1.1933,1.1932,1.1976,1.1925,0.03% 117 | 28/06/2021,1.1924,1.1935,1.1946,1.1902,-0.08% 118 | 29/06/2021,1.1895,1.1926,1.1932,1.1877,-0.24% 119 | 30/06/2021,1.1855,1.1897,1.1910,1.1845,-0.34% 120 | 01/07/2021,1.1848,1.1859,1.1885,1.1837,-0.06% 121 | 02/07/2021,1.1864,1.1849,1.1875,1.1806,0.14% 122 | 05/07/2021,1.1861,1.1862,1.1882,1.1851,-0.03% 123 | 06/07/2021,1.1822,1.1863,1.1896,1.1806,-0.33% 124 | 07/07/2021,1.1789,1.1823,1.1839,1.1782,-0.28% 125 | 08/07/2021,1.1842,1.1792,1.1868,1.1783,0.45% 126 | 09/07/2021,1.1873,1.1844,1.1882,1.1825,0.26% 127 | 12/07/2021,1.1859,1.1872,1.1881,1.1836,-0.12% 128 | 13/07/2021,1.1774,1.1862,1.1876,1.1772,-0.72% 129 | 14/07/2021,1.1835,1.1776,1.1839,1.1772,0.52% 130 | 15/07/2021,1.1812,1.1837,1.1852,1.1796,-0.19% 131 | 16/07/2021,1.1805,1.1814,1.1823,1.1791,-0.06% 132 | 19/07/2021,1.1798,1.1808,1.1825,1.1763,-0.06% 133 | 20/07/2021,1.1779,1.1801,1.1805,1.1755,-0.16% 134 | 21/07/2021,1.1792,1.1782,1.1805,1.1752,0.11% 135 | 22/07/2021,1.1770,1.1793,1.1831,1.1757,-0.19% 136 | 23/07/2021,1.1771,1.1769,1.1788,1.1753,0.01% 137 | 26/07/2021,1.1799,1.1778,1.1818,1.1763,0.24% 138 | 27/07/2021,1.1814,1.1804,1.1841,1.1769,0.13% 139 | 28/07/2021,1.1842,1.1817,1.1851,1.1773,0.24% 140 | 29/07/2021,1.1886,1.1845,1.1894,1.1840,0.37% 141 | 30/07/2021,1.1870,1.1888,1.1910,1.1851,-0.13% 142 | 02/08/2021,1.1867,1.1864,1.1897,1.1858,-0.03% 143 | 03/08/2021,1.1860,1.1868,1.1894,1.1853,-0.06% 144 | 04/08/2021,1.1835,1.1864,1.1901,1.1832,-0.21% 145 | 05/08/2021,1.1832,1.1837,1.1858,1.1827,-0.03% 146 | 06/08/2021,1.1760,1.1834,1.1837,1.1753,-0.61% 147 | 09/08/2021,1.1737,1.1762,1.1769,1.1734,-0.20% 148 | 10/08/2021,1.1718,1.1738,1.1744,1.1710,-0.16% 149 | 11/08/2021,1.1737,1.1720,1.1753,1.1705,0.16% 150 | 12/08/2021,1.1727,1.1738,1.1749,1.1724,-0.09% 151 | 13/08/2021,1.1791,1.1730,1.1805,1.1727,0.55% 152 | 16/08/2021,1.1777,1.1794,1.1802,1.1767,-0.12% 153 | 17/08/2021,1.1708,1.1776,1.1786,1.1708,-0.59% 154 | 18/08/2021,1.1710,1.1710,1.1743,1.1694,0.02% 155 | 19/08/2021,1.1674,1.1711,1.1717,1.1666,-0.31% 156 | 20/08/2021,1.1696,1.1675,1.1705,1.1664,0.19% 157 | 23/08/2021,1.1743,1.1694,1.1751,1.1691,0.40% 158 | 24/08/2021,1.1753,1.1746,1.1766,1.1726,0.09% 159 | 25/08/2021,1.1770,1.1754,1.1775,1.1725,0.14% 160 | 26/08/2021,1.1750,1.1772,1.1781,1.1746,-0.17% 161 | 27/08/2021,1.1793,1.1752,1.1803,1.1734,0.37% 162 | 30/08/2021,1.1795,1.1793,1.1810,1.1782,0.02% 163 | 31/08/2021,1.1807,1.1797,1.1848,1.1795,0.10% 164 | 01/09/2021,1.1837,1.1810,1.1858,1.1793,0.25% 165 | 02/09/2021,1.1873,1.1839,1.1877,1.1834,0.30% 166 | 03/09/2021,1.1882,1.1875,1.1910,1.1866,0.08% 167 | 06/09/2021,1.1868,1.1879,1.1888,1.1855,-0.12% 168 | 07/09/2021,1.1839,1.1870,1.1885,1.1837,-0.24% 169 | 08/09/2021,1.1813,1.1840,1.1852,1.1802,-0.22% 170 | 09/09/2021,1.1825,1.1817,1.1842,1.1804,0.10% 171 | 10/09/2021,1.1814,1.1826,1.1852,1.1809,-0.09% 172 | 13/09/2021,1.1808,1.1813,1.1818,1.1769,-0.05% 173 | 14/09/2021,1.1802,1.1812,1.1846,1.1799,-0.05% 174 | 15/09/2021,1.1815,1.1806,1.1833,1.1798,0.11% 175 | 16/09/2021,1.1764,1.1816,1.1822,1.1751,-0.43% 176 | 17/09/2021,1.1725,1.1762,1.1789,1.1724,-0.33% 177 | 20/09/2021,1.1725,1.1732,1.1737,1.1700,0.00% 178 | 21/09/2021,1.1723,1.1726,1.1751,1.1715,-0.02% 179 | 22/09/2021,1.1686,1.1726,1.1757,1.1683,-0.32% 180 | 23/09/2021,1.1736,1.1686,1.1751,1.1683,0.43% 181 | 24/09/2021,1.1714,1.1738,1.1748,1.1700,-0.19% 182 | 27/09/2021,1.1694,1.1724,1.1728,1.1684,-0.17% 183 | 28/09/2021,1.1681,1.1696,1.1704,1.1667,-0.11% 184 | 29/09/2021,1.1595,1.1683,1.1691,1.1589,-0.74% 185 | 30/09/2021,1.1581,1.1596,1.1611,1.1562,-0.12% 186 | 01/10/2021,1.1594,1.1582,1.1608,1.1563,0.11% 187 | 04/10/2021,1.1621,1.1593,1.1641,1.1587,0.23% 188 | 05/10/2021,1.1596,1.1623,1.1623,1.1580,-0.22% 189 | 06/10/2021,1.1556,1.1603,1.1605,1.1529,-0.34% 190 | 07/10/2021,1.1550,1.1556,1.1573,1.1547,-0.05% 191 | 08/10/2021,1.1567,1.1551,1.1586,1.1540,0.15% 192 | 11/10/2021,1.1551,1.1571,1.1588,1.1549,-0.14% 193 | 12/10/2021,1.1527,1.1555,1.1571,1.1524,-0.21% 194 | 13/10/2021,1.1592,1.1528,1.1598,1.1527,0.56% 195 | 14/10/2021,1.1594,1.1593,1.1625,1.1583,0.02% 196 | 15/10/2021,1.1600,1.1599,1.1621,1.1588,0.05% 197 | 18/10/2021,1.1609,1.1597,1.1624,1.1571,0.08% 198 | 19/10/2021,1.1632,1.1611,1.1670,1.1608,0.20% 199 | 20/10/2021,1.1649,1.1634,1.1659,1.1616,0.15% 200 | 21/10/2021,1.1621,1.1650,1.1668,1.1619,-0.24% 201 | 22/10/2021,1.1647,1.1623,1.1656,1.1621,0.22% 202 | 25/10/2021,1.1606,1.1644,1.1666,1.1590,-0.35% 203 | 26/10/2021,1.1595,1.1607,1.1626,1.1585,-0.09% 204 | 27/10/2021,1.1604,1.1597,1.1626,1.1585,0.08% 205 | 28/10/2021,1.1679,1.1605,1.1693,1.1581,0.65% 206 | 29/10/2021,1.1561,1.1680,1.1690,1.1535,-1.01% 207 | 01/11/2021,1.1606,1.1559,1.1610,1.1545,0.39% 208 | 02/11/2021,1.1577,1.1605,1.1615,1.1574,-0.25% 209 | 03/11/2021,1.1610,1.1579,1.1617,1.1561,0.29% 210 | 04/11/2021,1.1552,1.1611,1.1617,1.1528,-0.50% 211 | 05/11/2021,1.1566,1.1553,1.1574,1.1513,0.12% 212 | 08/11/2021,1.1586,1.1555,1.1595,1.1550,0.17% 213 | 09/11/2021,1.1591,1.1588,1.1609,1.1569,0.04% 214 | 10/11/2021,1.1478,1.1593,1.1596,1.1475,-0.97% 215 | 11/11/2021,1.1450,1.1482,1.1488,1.1443,-0.24% 216 | 12/11/2021,1.1450,1.1454,1.1463,1.1432,0.00% 217 | 15/11/2021,1.1367,1.1452,1.1465,1.1356,-0.72% 218 | 16/11/2021,1.1319,1.1368,1.1386,1.1309,-0.42% 219 | 17/11/2021,1.1319,1.1321,1.1333,1.1264,0.00% 220 | 18/11/2021,1.1369,1.1319,1.1375,1.1314,0.44% 221 | 19/11/2021,1.1289,1.1371,1.1374,1.1250,-0.70% 222 | 22/11/2021,1.1234,1.1292,1.1293,1.1230,-0.49% 223 | 23/11/2021,1.1246,1.1237,1.1276,1.1226,0.11% 224 | 24/11/2021,1.1197,1.1248,1.1256,1.1185,-0.44% 225 | 25/11/2021,1.1206,1.1198,1.1231,1.1198,0.08% 226 | 26/11/2021,1.1317,1.1209,1.1331,1.1205,0.99% 227 | 29/11/2021,1.1291,1.1309,1.1316,1.1258,-0.23% 228 | 30/11/2021,1.1336,1.1291,1.1384,1.1236,0.40% 229 | 01/12/2021,1.1319,1.1339,1.1360,1.1302,-0.15% 230 | 02/12/2021,1.1299,1.1320,1.1349,1.1294,-0.18% 231 | 03/12/2021,1.1313,1.1301,1.1334,1.1265,0.12% 232 | 06/12/2021,1.1285,1.1321,1.1321,1.1266,-0.25% 233 | 07/12/2021,1.1263,1.1287,1.1301,1.1228,-0.19% 234 | 08/12/2021,1.1342,1.1267,1.1356,1.1264,0.70% 235 | 09/12/2021,1.1292,1.1342,1.1348,1.1278,-0.44% 236 | 10/12/2021,1.1311,1.1293,1.1325,1.1264,0.17% 237 | 13/12/2021,1.1283,1.1316,1.1321,1.1259,-0.25% 238 | 14/12/2021,1.1257,1.1285,1.1327,1.1253,-0.23% 239 | 15/12/2021,1.1285,1.1259,1.1300,1.1221,0.25% 240 | 16/12/2021,1.1328,1.1286,1.1361,1.1280,0.38% 241 | 17/12/2021,1.1239,1.1330,1.1350,1.1235,-0.79% 242 | 20/12/2021,1.1275,1.1242,1.1305,1.1234,0.32% 243 | 21/12/2021,1.1283,1.1277,1.1303,1.1260,0.07% 244 | 22/12/2021,1.1324,1.1279,1.1343,1.1264,0.36% 245 | 23/12/2021,1.1328,1.1325,1.1343,1.1289,0.04% 246 | 24/12/2021,1.1317,1.1330,1.1344,1.1303,-0.10% 247 | 27/12/2021,1.1325,1.1318,1.1335,1.1302,0.07% 248 | 28/12/2021,1.1309,1.1329,1.1335,1.1289,-0.14% 249 | 29/12/2021,1.1348,1.1310,1.1370,1.1273,0.34% 250 | 30/12/2021,1.1323,1.1349,1.1360,1.1298,-0.22% 251 | 31/12/2021,1.1368,1.1324,1.1387,1.1302,0.40% 252 | 03/01/2022,1.1294,1.1365,1.1381,1.1279,-0.65% 253 | 04/01/2022,1.1285,1.1298,1.1323,1.1272,-0.08% 254 | 05/01/2022,1.1313,1.1287,1.1347,1.1277,0.25% 255 | 06/01/2022,1.1291,1.1314,1.1332,1.1285,-0.19% 256 | 07/01/2022,1.1359,1.1293,1.1365,1.1289,0.60% 257 | 10/01/2022,1.1324,1.1353,1.1362,1.1285,-0.31% 258 | 11/01/2022,1.1364,1.1325,1.1375,1.1313,0.35% 259 | 12/01/2022,1.1442,1.1366,1.1453,1.1354,0.69% 260 | 13/01/2022,1.1453,1.1443,1.1482,1.1435,0.10% 261 | 14/01/2022,1.1414,1.1454,1.1484,1.1398,-0.34% 262 | 17/01/2022,1.1407,1.1414,1.1436,1.1392,-0.06% 263 | 18/01/2022,1.1378,1.1410,1.1422,1.1363,-0.26% 264 | -------------------------------------------------------------------------------- /envs/env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Traffic network simulator w/ defined sumo files 3 | @author: Tianshu Chu 4 | """ 5 | from envs.functions import getState, getStockDataVec, formatPrice, formatPercent 6 | import xml.etree.cElementTree as ET 7 | import subprocess 8 | import pandas as pd 9 | import numpy as np 10 | import logging 11 | import sys 12 | import os 13 | os.environ['SUMO_HOME'] = "/usr/share/sumo" 14 | sys.path.append(os.path.join(os.environ.get("SUMO_HOME"), 'tools')) 15 | 16 | 17 | DEFAULT_PORT = 8000 18 | SEC_IN_MS = 1000 19 | 20 | # hard code real-net reward norm 21 | REALNET_REWARD_NORM = 20 22 | 23 | 24 | class PhaseSet: 25 | def __init__(self, phases): 26 | self.num_phase = len(phases) 27 | self.num_lane = len(phases[0]) 28 | self.phases = phases 29 | # self._init_phase_set() 30 | 31 | @staticmethod 32 | def _get_phase_lanes(phase, signal='r'): 33 | phase_lanes = [] 34 | for i, l in enumerate(phase): 35 | if l == signal: 36 | phase_lanes.append(i) 37 | return phase_lanes 38 | 39 | def _init_phase_set(self): 40 | self.red_lanes = [] 41 | # self.green_lanes = [] 42 | for phase in self.phases: 43 | self.red_lanes.append(self._get_phase_lanes(phase)) 44 | # self.green_lanes.append(self._get_phase_lanes(phase, signal='G')) 45 | 46 | 47 | class PhaseMap: 48 | def __init__(self): 49 | self.phases = {} 50 | 51 | def get_phase(self, phase_id, action): 52 | # phase_type is either green or yellow 53 | return self.phases[phase_id].phases[int(action)] 54 | 55 | def get_phase_num(self, phase_id): 56 | return self.phases[phase_id].num_phase 57 | 58 | def get_lane_num(self, phase_id): 59 | # the lane number is link number 60 | return self.phases[phase_id].num_lane 61 | 62 | def get_red_lanes(self, phase_id, action): 63 | # the lane number is link number 64 | return self.phases[phase_id].red_lanes[int(action)] 65 | 66 | 67 | class Node: 68 | def __init__(self, name, neighbor=[], control=False): 69 | self.control = control # disabled 70 | self.lanes_in = [] 71 | self.ilds_in = [] # for state 72 | self.fingerprint = [] # local policy 73 | self.name = name 74 | self.neighbor = neighbor 75 | self.num_state = 0 # wave and wait should have the same dim 76 | self.num_fingerprint = 0 77 | self.wave_state = [] # local state 78 | self.wait_state = [] # local state 79 | self.phase_id = -1 80 | self.n_a = 0 81 | # self.prev_action = -1 82 | 83 | 84 | class TrafficSimulator: 85 | def __init__(self, config, output_path, is_record, record_stats, port=0): 86 | self.name = config.get('scenario') 87 | self.control_interval_sec = config.getint('control_interval_sec') 88 | self.yellow_interval_sec = config.getint('yellow_interval_sec') 89 | self.port = DEFAULT_PORT + port 90 | self.sim_thread = port 91 | self.obj = config.get('objective') 92 | self.data_path = config.get('data_path') 93 | self.price_data = config.get('price_data') 94 | self.key = config.get('key') 95 | self.window_size = config.getint('window_size') 96 | self.balance = config.getint('balance') 97 | self.agent = config.get('agent') 98 | self.coop_gamma = config.getfloat('coop_gamma') 99 | self.cur_episode = 0 100 | self.norms = {'wave': config.getfloat('norm_wave'), 101 | 'wait': config.getfloat('norm_wait')} 102 | self.clips = {'wave': config.getfloat('clip_wave'), 103 | 'wait': config.getfloat('clip_wait')} 104 | self.coef_wait = config.getfloat('coef_wait') 105 | self.train_mode = True 106 | test_seeds = config.get('test_seeds').split(',') 107 | test_seeds = [int(s) for s in test_seeds] 108 | 109 | self.data = getStockDataVec(self.price_data, self.key) 110 | self.l = len(self.data) - 1 111 | self.t = 0 112 | self.n = self.window_size + 1 113 | self.inventory = {} 114 | self.agent_type = {'long': 1, 'short': -1} 115 | self.total_profit = 0 116 | # self.drawdown = [] 117 | self.balance_list = [] 118 | self.cur_balance = self.balance 119 | 120 | self._init_map() 121 | self.init_data(is_record, record_stats, output_path) 122 | self.init_test_seeds(test_seeds) 123 | self._init_nodes() 124 | 125 | def _get_node_phase_id(self, node_name): 126 | # needs to be overwriteen 127 | raise NotImplementedError() 128 | 129 | def _get_state(self): 130 | # hard code the state ordering as wave, wait, fp 131 | state = [] 132 | # measure the most recent state 133 | self._measure_state_step() 134 | 135 | # get the appropriate state vectors 136 | for node_name in self.node_names: 137 | # node_name 10026 138 | node = self.nodes[node_name] 139 | # wave is required in state 140 | cur_state = [node.wave_state] 141 | # include wave states of neighbors 142 | for nnode_name in node.neighbor: 143 | # discount the neigboring states 144 | cur_state.append( 145 | self.nodes[nnode_name].wave_state * self.coop_gamma) 146 | # include wait state 147 | if 'wait' in self.state_names: 148 | cur_state.append(node.wait_state) 149 | # include fingerprints of neighbors 150 | for nnode_name in node.neighbor: 151 | cur_state.append(self.nodes[nnode_name].fingerprint) 152 | state.append(np.concatenate(cur_state)) 153 | 154 | return state 155 | 156 | def _init_nodes(self): 157 | nodes = {} 158 | trafficlight_id = ['long', 'short'] 159 | for node_name in trafficlight_id: 160 | if node_name in self.neighbor_map: 161 | neighbor = self.neighbor_map[node_name] 162 | else: 163 | logging.info('node %s can not be found!' % node_name) 164 | neighbor = [] 165 | nodes[node_name] = Node(node_name, 166 | neighbor=neighbor, 167 | control=True) 168 | # Init inventory 169 | self.inventory[node_name] = [] 170 | 171 | self.nodes = nodes 172 | self.node_names = sorted(list(nodes.keys())) 173 | s = 'Env: init %d node information:\n' % len(self.node_names) 174 | for node in self.nodes.values(): 175 | s += node.name + ':\n' 176 | s += '\tneigbor: %r\n' % node.neighbor 177 | # s += '\tlanes_in: %r\n' % node.lanes_in 178 | s += '\tilds_in: %r\n' % node.ilds_in 179 | # s += '\tedges_in: %r\n' % node.edges_in 180 | logging.info(s) 181 | self._init_action_space() 182 | self._init_state_space() 183 | 184 | def _init_action_space(self): 185 | # for local and neighbor coop level 186 | self.n_a_ls = [] 187 | for node_name in self.node_names: 188 | node = self.nodes[node_name] 189 | # phase_id 3.0 190 | phase_id = self._get_node_phase_id(node_name) 191 | node.phase_id = phase_id 192 | node.n_a = self.phase_map.get_phase_num(phase_id) 193 | self.n_a_ls.append(node.n_a) 194 | # for global coop level 195 | self.n_a = np.prod(np.array(self.n_a_ls)) 196 | 197 | def _init_map(self): 198 | # needs to be overwriteen 199 | self.neighbor_map = None 200 | self.phase_map = None 201 | self.state_names = None 202 | raise NotImplementedError() 203 | 204 | def _init_policy(self): 205 | policy = [] 206 | for node_name in self.node_names: 207 | phase_num = self.nodes[node_name].n_a 208 | p = 1. / phase_num 209 | policy.append(np.array([p] * phase_num)) 210 | return policy 211 | 212 | def _init_state_space(self): 213 | self._reset_state() 214 | self.n_s_ls = [] 215 | self.n_w_ls = [] 216 | self.n_f_ls = [] 217 | for node_name in self.node_names: 218 | node = self.nodes[node_name] 219 | # num_wave = node.num_state 220 | num_wave = self.window_size 221 | num_fingerprint = 0 222 | for nnode_name in node.neighbor: 223 | if self.agent not in ['a2c', 'greedy']: 224 | # all marl agents have neighborhood communication 225 | num_wave += self.nodes[nnode_name].num_state 226 | if self.agent == 'ma2c': 227 | # only ma2c uses neighbor's policy 228 | num_fingerprint += self.nodes[nnode_name].num_fingerprint 229 | num_wait = 0 if 'wait' not in self.state_names else node.num_state 230 | self.n_s_ls.append(num_wave + num_wait + num_fingerprint) 231 | self.n_f_ls.append(num_fingerprint) 232 | self.n_w_ls.append(num_wait) 233 | self.n_s = np.sum(np.array(self.n_s_ls)) 234 | 235 | def _measure_reward_step(self, action): 236 | rewards = [] 237 | for node_name, a in zip(self.node_names, list(action)): 238 | reward = 0 239 | if a == 1: # buy 240 | self.inventory[node_name].append(self.data[self.t]) 241 | # print("Buy: " + formatPrice(self.data[self.t])) 242 | 243 | elif a == 2 and len(self.inventory[node_name]) > 0: # sell 244 | order_price = self.inventory[node_name].pop(0) 245 | profit = (self.data[self.t] - order_price) * \ 246 | self.agent_type[node_name] 247 | reward = max(profit, 0) 248 | # if node_name == 'short': 249 | self.total_profit += profit 250 | 251 | # if profit < 0: 252 | # self.drawdown.append(-profit/self.cur_balance) 253 | self.cur_balance += profit 254 | self.balance_list.append(round(self.cur_balance, 2)) 255 | # print("Sell: " + formatPrice(self.data[self.t]) + " | Profit: " + formatPrice( 256 | # self.data[self.t] - bought_price)) 257 | rewards.append(reward) 258 | 259 | return np.array(rewards) 260 | 261 | def _measure_state_step(self): 262 | for node_name in self.node_names: 263 | node = self.nodes[node_name] 264 | for state_name in self.state_names: 265 | cur_state = getState(self.data, self.t, self.n) 266 | if self.record_stats: 267 | self.state_stat[state_name] += list(cur_state) 268 | # normalization 269 | norm_cur_state = self._norm_clip_state(cur_state, 270 | self.norms[state_name], 271 | self.clips[state_name]) 272 | node.wave_state = norm_cur_state 273 | 274 | def _measure_traffic_step(self): 275 | cur_traffic = {'episode': self.cur_episode, 276 | 'time_sec': self.t 277 | } 278 | 279 | self.traffic_data.append(cur_traffic) 280 | 281 | @staticmethod 282 | def _norm_clip_state(x, norm, clip=-1): 283 | x = x / norm 284 | return x if clip < 0 else np.clip(x, 0, clip) 285 | 286 | def _reset_state(self): 287 | for node_name in self.node_names: 288 | node = self.nodes[node_name] 289 | # prev action for yellow phase before each switch 290 | node.prev_action = 0 291 | # fingerprint is previous policy[:-1] 292 | node.num_fingerprint = node.n_a - 1 293 | # node.num_state = self._get_node_state_num(node) 294 | 295 | def _simulate(self): 296 | self.t += 1 297 | if self.is_record: 298 | self._measure_traffic_step() 299 | 300 | def _transfer_action(self, action): 301 | '''Transfer global action to a list of local actions''' 302 | phase_nums = [] 303 | for node in self.control_node_names: 304 | phase_nums.append(self.nodes[node].phase_num) 305 | action_ls = [] 306 | for i in range(len(phase_nums) - 1): 307 | action, cur_action = divmod(action, phase_nums[i]) 308 | action_ls.append(cur_action) 309 | action_ls.append(action) 310 | return action_ls 311 | 312 | def _update_waits(self, action): 313 | for node_name, a in zip(self.node_names, action): 314 | red_lanes = set() 315 | node = self.nodes[node_name] 316 | for i in self.phase_map.get_red_lanes(node.phase_id, a): 317 | red_lanes.add(node.lanes_in[i]) 318 | for i in range(len(node.waits)): 319 | lane = node.ilds_in[i] 320 | if lane in red_lanes: 321 | node.waits[i] += self.control_interval_sec 322 | else: 323 | node.waits[i] = 0 324 | 325 | def init_data(self, is_record, record_stats, output_path): 326 | self.is_record = is_record 327 | self.record_stats = record_stats 328 | self.output_path = output_path 329 | if self.is_record: 330 | self.traffic_data = [] 331 | self.control_data = [] 332 | self.trip_data = [] 333 | if self.record_stats: 334 | self.state_stat = {} 335 | for state_name in self.state_names: 336 | self.state_stat[state_name] = [] 337 | 338 | def init_test_seeds(self, test_seeds): 339 | self.test_num = len(test_seeds) 340 | self.test_seeds = test_seeds 341 | 342 | def output_data(self): 343 | if not self.is_record: 344 | logging.error('Env: no record to output!') 345 | control_data = pd.DataFrame(self.control_data) 346 | control_data.to_csv(self.output_path + 347 | ('%s_%s_control.csv' % (self.name, self.agent))) 348 | traffic_data = pd.DataFrame(self.traffic_data) 349 | traffic_data.to_csv(self.output_path + 350 | ('%s_%s_traffic.csv' % (self.name, self.agent))) 351 | trip_data = pd.DataFrame(self.trip_data) 352 | trip_data.to_csv(self.output_path + ('%s_%s_trip.csv' % 353 | (self.name, self.agent))) 354 | 355 | def reset(self): 356 | self._reset_state() 357 | self.t = 0 358 | self.cur_episode += 1 359 | # initialize fingerprint 360 | self.update_fingerprint(self._init_policy()) 361 | # next environment random condition should be different 362 | self.inventory = dict.fromkeys(self.inventory, []) 363 | self.total_profit = 0 364 | # self.drawdown = [] 365 | self.balance_list = [] 366 | self.cur_balance = self.balance 367 | return self._get_state() 368 | 369 | def step(self, action): 370 | state = self._get_state() 371 | reward = self._measure_reward_step(action) 372 | global_reward = np.sum(reward) # for fair comparison 373 | # New t for new price 374 | self.t += 1 375 | # self._simulate() 376 | done = True if self.t == self.l - 1 else False 377 | 378 | if done: 379 | print("--------------------------------") 380 | print("Total Profit: " + formatPrice(self.total_profit)) 381 | # print('Max Drawdown: ' + formatPercent(max(self.drawdown))) 382 | print('Balance List', self.balance_list) 383 | print('Final Balance: ' + formatPrice(self.cur_balance)) 384 | print("--------------------------------") 385 | 386 | if self.is_record: 387 | action_r = ','.join(['%d' % a for a in action]) 388 | cur_control = {'episode': self.cur_episode, 389 | 'time_sec': self.t, 390 | 'total_profit': formatPrice(self.total_profit), 391 | 'action': action_r, 392 | 'reward': global_reward} 393 | self.control_data.append(cur_control) 394 | 395 | # use local rewards in test 396 | if not self.train_mode: 397 | return state, reward, done, global_reward 398 | 399 | # discounted global reward for ma2c 400 | new_reward = [] 401 | for node_name, r in zip(self.node_names, reward): 402 | cur_reward = r 403 | for nnode_name in self.nodes[node_name].neighbor: 404 | i = self.node_names.index(nnode_name) 405 | cur_reward += self.coop_gamma * reward[i] 406 | if self.name != 'real_net': 407 | new_reward.append(cur_reward) 408 | else: 409 | n_node = 1 + len(self.nodes[node_name].neighbor) 410 | new_reward.append( 411 | cur_reward / (n_node * REALNET_REWARD_NORM)) 412 | reward = np.array(new_reward) 413 | return state, reward, done, global_reward 414 | 415 | def update_fingerprint(self, policy): 416 | for node_name, pi in zip(self.node_names, policy): 417 | self.nodes[node_name].fingerprint = np.array(pi)[:-1] 418 | -------------------------------------------------------------------------------- /agents/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | A2C, IA2C, MA2C models 3 | @author: Tianshu Chu 4 | """ 5 | 6 | import os 7 | from agents.utils import * 8 | from agents.policies import * 9 | import logging 10 | import multiprocessing as mp 11 | import numpy as np 12 | import tensorflow.compat.v1 as tf 13 | 14 | 15 | class A2C: 16 | def __init__(self, n_s, n_a, total_step, model_config, seed=0, n_f=None): 17 | # load parameters 18 | self.name = 'a2c' 19 | self.n_agent = 1 20 | # init reward norm/clip 21 | self.reward_clip = model_config.getfloat('reward_clip') 22 | self.reward_norm = model_config.getfloat('reward_norm') 23 | self.n_s = n_s 24 | self.n_a = n_a 25 | self.n_step = model_config.getint('batch_size') 26 | # init tf 27 | tf.reset_default_graph() 28 | tf.set_random_seed(seed) 29 | config = tf.ConfigProto(allow_soft_placement=True) 30 | self.sess = tf.Session(config=config) 31 | self.policy = self._init_policy(n_s, n_a, n_f, model_config) 32 | self.saver = tf.train.Saver(max_to_keep=5) 33 | if total_step: 34 | # training 35 | self.total_step = total_step 36 | self._init_scheduler(model_config) 37 | self._init_train(model_config) 38 | self.sess.run(tf.global_variables_initializer()) 39 | 40 | def _init_policy(self, n_s, n_a, n_w, n_f, model_config, agent_name=None): 41 | n_fw = model_config.getint('num_fw') 42 | n_ft = model_config.getint('num_ft') 43 | n_lstm = model_config.getint('num_lstm') 44 | n_fp = model_config.getint('num_fp') 45 | policy = FPLstmACPolicy(n_s, n_a, n_w, n_f, self.n_step, n_fc_wave=n_fw, 46 | n_fc_wait=n_ft, n_fc_fp=n_fp, n_lstm=n_lstm, name=agent_name) 47 | return policy 48 | 49 | def _init_scheduler(self, model_config): 50 | lr_init = model_config.getfloat('lr_init') 51 | lr_decay = model_config.get('lr_decay') 52 | beta_init = model_config.getfloat('entropy_coef_init') 53 | beta_decay = model_config.get('entropy_decay') 54 | if lr_decay == 'constant': 55 | self.lr_scheduler = Scheduler(lr_init, decay=lr_decay) 56 | else: 57 | lr_min = model_config.getfloat('LR_MIN') 58 | self.lr_scheduler = Scheduler( 59 | lr_init, lr_min, self.total_step, decay=lr_decay) 60 | if beta_decay == 'constant': 61 | self.beta_scheduler = Scheduler(beta_init, decay=beta_decay) 62 | else: 63 | beta_min = model_config.getfloat('ENTROPY_COEF_MIN') 64 | beta_ratio = model_config.getfloat('ENTROPY_RATIO') 65 | self.beta_scheduler = Scheduler(beta_init, beta_min, self.total_step * beta_ratio, 66 | decay=beta_decay) 67 | 68 | def _init_train(self, model_config): 69 | # init loss 70 | v_coef = model_config.getfloat('value_coef') 71 | max_grad_norm = model_config.getfloat('max_grad_norm') 72 | alpha = model_config.getfloat('rmsp_alpha') 73 | epsilon = model_config.getfloat('rmsp_epsilon') 74 | self.policy.prepare_loss(v_coef, max_grad_norm, alpha, epsilon) 75 | 76 | # init replay buffer 77 | gamma = model_config.getfloat('gamma') 78 | self.trans_buffer = OnPolicyBuffer(gamma) 79 | 80 | def save(self, model_dir, global_step): 81 | self.saver.save(self.sess, model_dir + 'checkpoint', 82 | global_step=global_step) 83 | 84 | def load(self, model_dir, checkpoint=None): 85 | save_file = None 86 | save_step = 0 87 | if os.path.exists(model_dir): 88 | if checkpoint is None: 89 | for file in os.listdir(model_dir): 90 | if file.startswith('checkpoint'): 91 | prefix = file.split('.')[0] 92 | tokens = prefix.split('-') 93 | if len(tokens) != 2: 94 | continue 95 | cur_step = int(tokens[1]) 96 | if cur_step > save_step: 97 | save_file = prefix 98 | save_step = cur_step 99 | else: 100 | save_file = 'checkpoint-' + str(int(checkpoint)) 101 | if save_file is not None: 102 | self.saver.restore(self.sess, model_dir + save_file) 103 | logging.info('Checkpoint loaded: %s' % save_file) 104 | return True 105 | logging.error('Can not find old checkpoint for %s' % model_dir) 106 | return False 107 | 108 | def reset(self): 109 | self.policy._reset() 110 | 111 | def backward(self, R, summary_writer=None, global_step=None): 112 | cur_lr = self.lr_scheduler.get(self.n_step) 113 | cur_beta = self.beta_scheduler.get(self.n_step) 114 | obs, acts, dones, Rs, Advs = self.trans_buffer.sample_transition(R) 115 | self.policy.backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta, 116 | summary_writer=summary_writer, global_step=global_step) 117 | 118 | def forward(self, ob, done, out_type='pv'): 119 | return self.policy.forward(self.sess, ob, done, out_type) 120 | 121 | def add_transition(self, ob, action, reward, value, done): 122 | # Hard code the reward norm for negative reward only 123 | if (self.reward_norm): 124 | reward /= self.reward_norm 125 | if self.reward_clip: 126 | reward = np.clip(reward, -self.reward_clip, self.reward_clip) 127 | self.trans_buffer.add_transition(ob, action, reward, value, done) 128 | 129 | 130 | class IA2C(A2C): 131 | def __init__(self, n_s_ls, n_a_ls, n_w_ls, total_step, 132 | model_config, seed=0): 133 | self.name = 'ia2c' 134 | self.agents = [] 135 | self.n_agent = len(n_s_ls) 136 | self.reward_clip = model_config.getfloat('reward_clip') 137 | self.reward_norm = model_config.getfloat('reward_norm') 138 | self.n_s_ls = n_s_ls 139 | self.n_a_ls = n_a_ls 140 | self.n_w_ls = n_w_ls 141 | self.n_step = model_config.getint('batch_size') 142 | # init tf 143 | tf.reset_default_graph() 144 | tf.set_random_seed(seed) 145 | config = tf.ConfigProto(allow_soft_placement=True) 146 | self.sess = tf.Session(config=config) 147 | self.policy_ls = [] 148 | for i, (n_s, n_w, n_a) in enumerate(zip(self.n_s_ls, self.n_w_ls, self.n_a_ls)): 149 | # agent_name is needed to differentiate multi-agents 150 | self.policy_ls.append(self._init_policy(n_s - n_w, n_a, n_w, 0, model_config, 151 | agent_name='{:d}a'.format(i))) 152 | self.saver = tf.train.Saver(max_to_keep=5) 153 | if total_step: 154 | # training 155 | self.total_step = total_step 156 | self._init_scheduler(model_config) 157 | self._init_train(model_config) 158 | self.sess.run(tf.global_variables_initializer()) 159 | 160 | def _init_train(self, model_config): 161 | # init loss 162 | v_coef = model_config.getfloat('value_coef') 163 | max_grad_norm = model_config.getfloat('max_grad_norm') 164 | alpha = model_config.getfloat('rmsp_alpha') 165 | epsilon = model_config.getfloat('rmsp_epsilon') 166 | gamma = model_config.getfloat('gamma') 167 | self.trans_buffer_ls = [] 168 | for i in range(self.n_agent): 169 | self.policy_ls[i].prepare_loss( 170 | v_coef, max_grad_norm, alpha, epsilon) 171 | self.trans_buffer_ls.append(OnPolicyBuffer(gamma)) 172 | 173 | def backward(self, R_ls, summary_writer=None, global_step=None): 174 | cur_lr = self.lr_scheduler.get(self.n_step) 175 | cur_beta = self.beta_scheduler.get(self.n_step) 176 | for i in range(self.n_agent): 177 | obs, acts, dones, Rs, Advs = self.trans_buffer_ls[i].sample_transition( 178 | R_ls[i]) 179 | # Check if len(mini_batch) = batch_size or not 180 | if len(obs) == self.n_step: 181 | if i == 0: 182 | self.policy_ls[i].backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta, 183 | summary_writer=summary_writer, global_step=global_step) 184 | else: 185 | self.policy_ls[i].backward( 186 | self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta) 187 | 188 | def forward(self, obs, done, out_type='pv'): 189 | if len(out_type) == 1: 190 | out = [] 191 | elif len(out_type) == 2: 192 | out1, out2 = [], [] 193 | for i in range(self.n_agent): 194 | cur_out = self.policy_ls[i].forward( 195 | self.sess, obs[i], done, out_type) 196 | if len(out_type) == 1: 197 | out.append(cur_out) 198 | else: 199 | out1.append(cur_out[0]) 200 | out2.append(cur_out[1]) 201 | if len(out_type) == 1: 202 | return out 203 | else: 204 | return out1, out2 205 | 206 | def backward_mp(self, R_ls, summary_writer=None, global_step=None): 207 | cur_lr = self.lr_scheduler.get(self.n_step) 208 | cur_beta = self.beta_scheduler.get(self.n_step) 209 | 210 | def worker(i): 211 | obs, acts, dones, Rs, Advs = self.trans_buffer_ls[i].sample_transition( 212 | R_ls[i]) 213 | self.policy_ls[i].backward(self.sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta, 214 | summary_writer=summary_writer, global_step=global_step) 215 | mps = [] 216 | for i in range(self.n_agent): 217 | p = mp.Process(target=worker, args=(i)) 218 | p.start() 219 | mps.append(p) 220 | for p in mps: 221 | p.join() 222 | 223 | def reset(self): 224 | for policy in self.policy_ls: 225 | policy._reset() 226 | 227 | def add_transition(self, obs, actions, rewards, values, done): 228 | if (self.reward_norm): 229 | rewards = rewards / self.reward_norm 230 | if self.reward_clip: 231 | rewards = np.clip(rewards, -self.reward_clip, self.reward_clip) 232 | for i in range(self.n_agent): 233 | self.trans_buffer_ls[i].add_transition(obs[i], actions[i], 234 | rewards[i], values[i], done) 235 | 236 | 237 | class MA2C(IA2C): 238 | def __init__(self, n_s_ls, n_a_ls, n_w_ls, n_f_ls, total_step, 239 | model_config, seed=0): 240 | self.name = 'ma2c' 241 | self.agents = [] 242 | self.n_agent = len(n_s_ls) 243 | self.reward_clip = model_config.getfloat('reward_clip') 244 | self.reward_norm = model_config.getfloat('reward_norm') 245 | self.n_s_ls = n_s_ls 246 | self.n_a_ls = n_a_ls 247 | self.n_f_ls = n_f_ls 248 | self.n_w_ls = n_w_ls 249 | self.n_step = model_config.getint('batch_size') 250 | 251 | tf.reset_default_graph() 252 | tf.set_random_seed(seed) 253 | config = tf.ConfigProto(allow_soft_placement=True) 254 | self.sess = tf.Session(config=config) 255 | self.policy_ls = [] 256 | for i, (n_s, n_a, n_w, n_f) in enumerate(zip(self.n_s_ls, self.n_a_ls, self.n_w_ls, self.n_f_ls)): 257 | # agent_name is needed to differentiate multi-agents 258 | self.policy_ls.append(self._init_policy(n_s - n_f - n_w, n_a, n_w, n_f, model_config, 259 | agent_name='{:d}a'.format(i))) 260 | self.saver = tf.train.Saver(max_to_keep=5) 261 | if total_step: 262 | # training 263 | self.total_step = total_step 264 | self._init_scheduler(model_config) 265 | self._init_train(model_config) 266 | self.sess.run(tf.global_variables_initializer()) 267 | 268 | 269 | class IQL(A2C): 270 | def __init__(self, n_s_ls, n_a_ls, n_w_ls, total_step, model_config, seed=0, model_type='dqn'): 271 | self.name = 'iql' 272 | self.model_type = model_type 273 | self.agents = [] 274 | self.n_agent = len(n_s_ls) 275 | self.reward_clip = model_config.getfloat('reward_clip') 276 | self.reward_norm = model_config.getfloat('reward_norm') 277 | self.n_s_ls = n_s_ls 278 | self.n_a_ls = n_a_ls 279 | self.n_w_ls = n_w_ls 280 | self.n_step = model_config.getint('batch_size') 281 | # init tf 282 | tf.reset_default_graph() 283 | tf.set_random_seed(seed) 284 | config = tf.ConfigProto(allow_soft_placement=True) 285 | self.sess = tf.Session(config=config) 286 | self.policy_ls = [] 287 | for i, (n_s, n_a, n_w) in enumerate(zip(self.n_s_ls, self.n_a_ls, self.n_w_ls)): 288 | # agent_name is needed to differentiate multi-agents 289 | self.policy_ls.append(self._init_policy(n_s, n_a, n_w, model_config, 290 | agent_name='{:d}a'.format(i))) 291 | self.saver = tf.train.Saver(max_to_keep=5) 292 | if total_step: 293 | # training 294 | self.total_step = total_step 295 | self._init_scheduler(model_config) 296 | self._init_train(model_config) 297 | self.cur_step = 0 298 | self.sess.run(tf.global_variables_initializer()) 299 | 300 | def _init_policy(self, n_s, n_a, n_w, model_config, agent_name=None): 301 | if self.model_type == 'dqn': 302 | n_h = model_config.getint('num_h') 303 | n_fc = model_config.getint('num_fc') 304 | policy = DeepQPolicy(n_s - n_w, n_a, n_w, self.n_step, n_fc0=n_fc, n_fc=n_h, 305 | name=agent_name) 306 | else: 307 | policy = LRQPolicy(n_s, n_a, self.n_step, name=agent_name) 308 | return policy 309 | 310 | def _init_scheduler(self, model_config): 311 | lr_init = model_config.getfloat('lr_init') 312 | lr_decay = model_config.get('lr_decay') 313 | eps_init = model_config.getfloat('epsilon_init') 314 | eps_decay = model_config.get('epsilon_decay') 315 | if lr_decay == 'constant': 316 | self.lr_scheduler = Scheduler(lr_init, decay=lr_decay) 317 | else: 318 | lr_min = model_config.getfloat('lr_min') 319 | self.lr_scheduler = Scheduler( 320 | lr_init, lr_min, self.total_step, decay=lr_decay) 321 | if eps_decay == 'constant': 322 | self.eps_scheduler = Scheduler(eps_init, decay=eps_decay) 323 | else: 324 | eps_min = model_config.getfloat('epsilon_min') 325 | eps_ratio = model_config.getfloat('epsilon_ratio') 326 | self.eps_scheduler = Scheduler(eps_init, eps_min, self.total_step * eps_ratio, 327 | decay=eps_decay) 328 | 329 | def _init_train(self, model_config): 330 | # init loss 331 | max_grad_norm = model_config.getfloat('max_grad_norm') 332 | gamma = model_config.getfloat('gamma') 333 | buffer_size = model_config.getfloat('buffer_size') 334 | self.trans_buffer_ls = [] 335 | for i in range(self.n_agent): 336 | self.policy_ls[i].prepare_loss(max_grad_norm, gamma) 337 | self.trans_buffer_ls.append(ReplayBuffer(buffer_size, self.n_step)) 338 | 339 | def backward(self, summary_writer=None, global_step=None): 340 | cur_lr = self.lr_scheduler.get(self.n_step) 341 | if self.trans_buffer_ls[0].size < self.trans_buffer_ls[0].batch_size: 342 | return 343 | for i in range(self.n_agent): 344 | for k in range(10): 345 | obs, acts, next_obs, rs, dones = self.trans_buffer_ls[i].sample_transition( 346 | ) 347 | if i == 0: 348 | self.policy_ls[i].backward(self.sess, obs, acts, next_obs, dones, rs, cur_lr, 349 | summary_writer=summary_writer, 350 | global_step=global_step + k) 351 | else: 352 | self.policy_ls[i].backward( 353 | self.sess, obs, acts, next_obs, dones, rs, cur_lr) 354 | 355 | def forward(self, obs, mode='act', stochastic=False): 356 | if mode == 'explore': 357 | eps = self.eps_scheduler.get(1) 358 | action = [] 359 | qs_ls = [] 360 | for i in range(self.n_agent): 361 | qs = self.policy_ls[i].forward(self.sess, obs[i]) 362 | if (mode == 'explore') and (np.random.random() < eps): 363 | action.append(np.random.randint(self.n_a_ls[i])) 364 | else: 365 | if not stochastic: 366 | action.append(np.argmax(qs)) 367 | else: 368 | qs = qs / np.sum(qs) 369 | action.append(np.random.choice(np.arange(len(qs)), p=qs)) 370 | qs_ls.append(qs) 371 | return action, qs_ls 372 | 373 | def reset(self): 374 | # do nothing 375 | return 376 | 377 | def add_transition(self, obs, actions, rewards, next_obs, done): 378 | if (self.reward_norm): 379 | rewards = rewards / self.reward_norm 380 | if self.reward_clip: 381 | rewards = np.clip(rewards, -self.reward_clip, self.reward_clip) 382 | for i in range(self.n_agent): 383 | 384 | self.trans_buffer_ls[i].add_transition(obs[i], actions[i], 385 | rewards[i], next_obs[i], done) 386 | -------------------------------------------------------------------------------- /agents/policies.py: -------------------------------------------------------------------------------- 1 | from agents.utils import * 2 | import numpy as np 3 | # import tensorflow 4 | # import tensorflow.compat.v1 as tf 5 | # tf.disable_v2_behavior() 6 | import tensorflow as tf 7 | tf.compat.v1.disable_v2_behavior() 8 | 9 | 10 | class ACPolicy: 11 | def __init__(self, n_a, n_s, n_step, policy_name, agent_name): 12 | self.name = policy_name 13 | if agent_name is not None: 14 | # for multi-agent system 15 | self.name += '_' + str(agent_name) 16 | self.n_a = n_a 17 | self.n_s = n_s 18 | self.n_step = n_step 19 | 20 | def forward(self, ob, *_args, **_kwargs): 21 | raise NotImplementedError() 22 | 23 | def _build_out_net(self, h, out_type): 24 | if out_type == 'pi': 25 | pi = fc(h, out_type, self.n_a, act=tf.nn.softmax) 26 | return tf.squeeze(pi) 27 | else: 28 | v = fc(h, out_type, 1, act=lambda x: x) 29 | return tf.squeeze(v) 30 | 31 | def _get_forward_outs(self, out_type): 32 | outs = [] 33 | if 'p' in out_type: 34 | outs.append(self.pi) 35 | if 'v' in out_type: 36 | outs.append(self.v) 37 | return outs 38 | 39 | def _return_forward_outs(self, out_values): 40 | if len(out_values) == 1: 41 | return out_values[0] 42 | return out_values 43 | 44 | def prepare_loss(self, v_coef, max_grad_norm, alpha, epsilon): 45 | self.A = tf.compat.v1.placeholder(tf.int32, [self.n_step]) 46 | self.ADV = tf.compat.v1.placeholder(tf.float32, [self.n_step]) 47 | self.R = tf.compat.v1.placeholder(tf.float32, [self.n_step]) 48 | self.entropy_coef = tf.compat.v1.placeholder(tf.float32, []) 49 | A_sparse = tf.one_hot(self.A, self.n_a) 50 | log_pi = tf.compat.v1.log(tf.clip_by_value(self.pi, 1e-10, 1.0)) 51 | entropy = -tf.reduce_sum(self.pi * log_pi, axis=1) 52 | entropy_loss = -tf.reduce_mean(entropy) * self.entropy_coef 53 | policy_loss = - \ 54 | tf.reduce_mean(tf.reduce_sum(log_pi * A_sparse, axis=1) * self.ADV) 55 | value_loss = tf.reduce_mean(tf.square(self.R - self.v)) * 0.5 * v_coef 56 | self.loss = policy_loss + value_loss + entropy_loss 57 | 58 | wts = tf.compat.v1.trainable_variables(scope=self.name) 59 | grads = tf.compat.v1.gradients(self.loss, wts) 60 | if max_grad_norm > 0: 61 | grads, self.grad_norm = tf.clip_by_global_norm( 62 | grads, max_grad_norm) 63 | self.lr = tf.compat.v1.placeholder(tf.float32, []) 64 | self.optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=self.lr, decay=alpha, 65 | epsilon=epsilon) 66 | self._train = self.optimizer.apply_gradients(list(zip(grads, wts))) 67 | # monitor training 68 | if self.name.endswith('_0a'): 69 | summaries = [] 70 | # summaries.append(tf.summary.scalar('loss/%s_entropy_loss' % self.name, entropy_loss)) 71 | summaries.append(tf.compat.v1.summary.scalar( 72 | 'loss/%s_policy_loss' % self.name, policy_loss)) 73 | summaries.append(tf.compat.v1.summary.scalar( 74 | 'loss/%s_value_loss' % self.name, value_loss)) 75 | summaries.append(tf.compat.v1.summary.scalar( 76 | 'loss/%s_total_loss' % self.name, self.loss)) 77 | # summaries.append(tf.summary.scalar('train/%s_lr' % self.name, self.lr)) 78 | # summaries.append(tf.summary.scalar('train/%s_entropy_beta' % self.name, self.entropy_coef)) 79 | summaries.append(tf.compat.v1.summary.scalar( 80 | 'train/%s_gradnorm' % self.name, self.grad_norm)) 81 | self.summary = tf.compat.v1.summary.merge(summaries) 82 | 83 | 84 | class LstmACPolicy(ACPolicy): 85 | def __init__(self, n_s, n_a, n_w, n_step, n_fc_wave=128, n_fc_wait=32, n_lstm=64, name=None): 86 | super().__init__(n_a, n_s, n_step, 'lstm', name) 87 | self.n_lstm = n_lstm 88 | self.n_fc_wait = n_fc_wait 89 | self.n_fc_wave = n_fc_wave 90 | self.n_w = n_w 91 | self.ob_fw = tf.compat.v1.placeholder( 92 | tf.float32, [1, n_s + n_w]) # forward 1-step 93 | self.done_fw = tf.compat.v1.placeholder(tf.float32, [1]) 94 | self.ob_bw = tf.compat.v1.placeholder( 95 | tf.float32, [n_step, n_s + n_w]) # backward n-step 96 | self.done_bw = tf.compat.v1.placeholder(tf.float32, [n_step]) 97 | self.states = tf.compat.v1.placeholder(tf.float32, [2, n_lstm * 2]) 98 | with tf.variable_scope(self.name): 99 | # pi and v use separate nets 100 | self.pi_fw, pi_state = self._build_net('forward', 'pi') 101 | self.v_fw, v_state = self._build_net('forward', 'v') 102 | pi_state = tf.expand_dims(pi_state, 0) 103 | v_state = tf.expand_dims(v_state, 0) 104 | self.new_states = tf.concat([pi_state, v_state], 0) 105 | with tf.variable_scope(self.name, reuse=True): 106 | self.pi, _ = self._build_net('backward', 'pi') 107 | self.v, _ = self._build_net('backward', 'v') 108 | self._reset() 109 | 110 | def _build_net(self, in_type, out_type): 111 | if in_type == 'forward': 112 | ob = self.ob_fw 113 | done = self.done_fw 114 | else: 115 | ob = self.ob_bw 116 | done = self.done_bw 117 | if out_type == 'pi': 118 | states = self.states[0] 119 | else: 120 | states = self.states[1] 121 | if self.n_w == 0: 122 | h = fc(ob, out_type + '_fcw', self.n_fc_wave) 123 | else: 124 | h0 = fc(ob[:, :self.n_s], out_type + '_fcw', self.n_fc_wave) 125 | h1 = fc(ob[:, self.n_s:], out_type + '_fct', self.n_fc_wait) 126 | h = tf.concat([h0, h1], 1) 127 | h, new_states = lstm(h, done, states, out_type + '_lstm') 128 | out_val = self._build_out_net(h, out_type) 129 | return out_val, new_states 130 | 131 | def _reset(self): 132 | # forget the cumulative states every cum_step 133 | self.states_fw = np.zeros((2, self.n_lstm * 2), dtype=np.float32) 134 | self.states_bw = np.zeros((2, self.n_lstm * 2), dtype=np.float32) 135 | 136 | def forward(self, sess, ob, done, out_type='pv'): 137 | outs = self._get_forward_outs(out_type) 138 | # update state only when p is called 139 | if 'p' in out_type: 140 | outs.append(self.new_states) 141 | out_values = sess.run(outs, {self.ob_fw: np.array([ob]), 142 | self.done_fw: np.array([done]), 143 | self.states: self.states_fw}) 144 | if 'p' in out_type: 145 | self.states_fw = out_values[-1] 146 | out_values = out_values[:-1] 147 | return self._return_forward_outs(out_values) 148 | 149 | def backward(self, sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta, 150 | summary_writer=None, global_step=None): 151 | if summary_writer is None: 152 | ops = self._train 153 | else: 154 | ops = [self.summary, self._train] 155 | outs = sess.run(ops, 156 | {self.ob_bw: obs, 157 | self.done_bw: dones, 158 | self.states: self.states_bw, 159 | self.A: acts, 160 | self.ADV: Advs, 161 | self.R: Rs, 162 | self.lr: cur_lr, 163 | self.entropy_coef: cur_beta}) 164 | self.states_bw = np.copy(self.states_fw) 165 | if summary_writer is not None: 166 | summary_writer.add_summary(outs[0], global_step=global_step) 167 | 168 | def _get_forward_outs(self, out_type): 169 | outs = [] 170 | if 'p' in out_type: 171 | outs.append(self.pi_fw) 172 | if 'v' in out_type: 173 | outs.append(self.v_fw) 174 | return outs 175 | 176 | 177 | class FPLstmACPolicy(LstmACPolicy): 178 | def __init__(self, n_s, n_a, n_w, n_f, n_step, n_fc_wave=128, n_fc_wait=32, n_fc_fp=32, n_lstm=64, name=None): 179 | ACPolicy.__init__(self, n_a, n_s, n_step, 'fplstm', name) 180 | self.n_lstm = n_lstm 181 | self.n_fc_wave = n_fc_wave 182 | self.n_fc_wait = n_fc_wait 183 | self.n_fc_fp = n_fc_fp 184 | self.n_w = n_w 185 | self.ob_fw = tf.compat.v1.placeholder( 186 | tf.float32, [1, n_s + n_w + n_f]) # forward 1-step 187 | self.done_fw = tf.compat.v1.placeholder(tf.float32, [1]) 188 | self.ob_bw = tf.compat.v1.placeholder( 189 | tf.float32, [n_step, n_s + n_w + n_f]) # backward n-step 190 | self.done_bw = tf.compat.v1.placeholder(tf.float32, [n_step]) 191 | self.states = tf.compat.v1.placeholder(tf.float32, [2, n_lstm * 2]) 192 | with tf.compat.v1.variable_scope(self.name): 193 | # pi and v use separate nets 194 | self.pi_fw, pi_state = self._build_net('forward', 'pi') 195 | self.v_fw, v_state = self._build_net('forward', 'v') 196 | pi_state = tf.expand_dims(pi_state, 0) 197 | v_state = tf.expand_dims(v_state, 0) 198 | self.new_states = tf.concat([pi_state, v_state], 0) 199 | with tf.compat.v1.variable_scope(self.name, reuse=True): 200 | self.pi, _ = self._build_net('backward', 'pi') 201 | self.v, _ = self._build_net('backward', 'v') 202 | self._reset() 203 | 204 | def _build_net(self, in_type, out_type): 205 | if in_type == 'forward': 206 | ob = self.ob_fw 207 | done = self.done_fw 208 | else: 209 | ob = self.ob_bw 210 | done = self.done_bw 211 | if out_type == 'pi': 212 | states = self.states[0] 213 | else: 214 | states = self.states[1] 215 | h0 = fc(ob[:, :self.n_s], out_type + '_fcw', self.n_fc_wave) 216 | h1 = fc(ob[:, (self.n_s + self.n_w):], out_type + '_fcf', self.n_fc_fp) 217 | if self.n_w == 0: 218 | h = tf.concat([h0, h1], 1) 219 | else: 220 | h2 = fc(ob[:, self.n_s: (self.n_s + self.n_w)], 221 | out_type + '_fct', self.n_fc_wait) 222 | h = tf.concat([h0, h1, h2], 1) 223 | h, new_states = lstm(h, done, states, out_type + '_lstm') 224 | out_val = self._build_out_net(h, out_type) 225 | return out_val, new_states 226 | 227 | 228 | class FcACPolicy(ACPolicy): 229 | def __init__(self, n_s, n_a, n_w, n_step, n_fc_wave=128, n_fc_wait=32, n_lstm=64, name=None): 230 | super().__init__(n_a, n_s, n_step, 'fc', name) 231 | self.n_fc_wave = n_fc_wave 232 | self.n_fc_wait = n_fc_wait 233 | self.n_fc = n_lstm 234 | self.n_w = n_w 235 | self.obs = tf.placeholder(tf.float32, [None, n_s + n_w]) 236 | with tf.variable_scope(self.name): 237 | # pi and v use separate nets 238 | self.pi = self._build_net('pi') 239 | self.v = self._build_net('v') 240 | 241 | def _build_net(self, out_type): 242 | if self.n_w == 0: 243 | h = fc(self.obs, out_type + '_fcw', self.n_fc_wave) 244 | else: 245 | h0 = fc(self.obs[:, :self.n_s], out_type + '_fcw', self.n_fc_wave) 246 | h1 = fc(self.obs[:, self.n_s:], out_type + '_fct', self.n_fc_wait) 247 | h = tf.concat([h0, h1], 1) 248 | h = fc(h, out_type + '_fc', self.n_fc) 249 | return self._build_out_net(h, out_type) 250 | 251 | def forward(self, sess, ob, done, out_type='pv'): 252 | outs = self._get_forward_outs(out_type) 253 | out_values = sess.run(outs, {self.obs: np.array([ob])}) 254 | return self._return_forward_outs(out_values) 255 | 256 | def backward(self, sess, obs, acts, dones, Rs, Advs, cur_lr, cur_beta, 257 | summary_writer=None, global_step=None): 258 | if summary_writer is None: 259 | ops = self._train 260 | else: 261 | ops = [self.summary, self._train] 262 | outs = sess.run(ops, 263 | {self.obs: obs, 264 | self.A: acts, 265 | self.ADV: Advs, 266 | self.R: Rs, 267 | self.lr: cur_lr, 268 | self.entropy_coef: cur_beta}) 269 | if summary_writer is not None: 270 | summary_writer.add_summary(outs[0], global_step=global_step) 271 | 272 | 273 | class FPFcACPolicy(FcACPolicy): 274 | def __init__(self, n_s, n_a, n_w, n_f, n_step, n_fc_wave=128, n_fc_wait=32, n_fc_fp=32, n_lstm=64, name=None): 275 | ACPolicy.__init__(self, n_a, n_s, n_step, 'fpfc', name) 276 | self.n_fc_wave = n_fc_wave 277 | self.n_fc_wait = n_fc_wait 278 | self.n_fc_fp = n_fc_fp 279 | self.n_fc = n_lstm 280 | self.n_w = n_w 281 | self.obs = tf.placeholder(tf.float32, [None, n_s + n_w + n_f]) 282 | with tf.variable_scope(self.name): 283 | # pi and v use separate nets 284 | self.pi = self._build_net('pi') 285 | self.v = self._build_net('v') 286 | 287 | def _build_net(self, out_type): 288 | h0 = fc(ob[:, :self.n_s], out_type + '_fcw', self.n_fc_wave) 289 | h1 = fc(ob[:, (self.n_s + self.n_w):], out_type + '_fcf', self.n_fc_fp) 290 | if self.n_w == 0: 291 | h = tf.concat([h0, h1], 1) 292 | else: 293 | h2 = fc(ob[:, self.n_s: (self.n_s + self.n_w)], 294 | out_type + '_fct', self.n_fc_wait) 295 | h = tf.concat([h0, h1, h2], 1) 296 | h = fc(h, out_type + '_fc', self.n_fc) 297 | return self._build_out_net(h, out_type) 298 | 299 | 300 | class QPolicy: 301 | def __init__(self, n_a, n_s, n_step, policy_name, agent_name): 302 | self.name = policy_name 303 | if agent_name is not None: 304 | # for multi-agent system 305 | self.name += '_' + str(agent_name) 306 | self.n_a = n_a 307 | self.n_s = n_s 308 | self.n_step = n_step 309 | 310 | def forward(self, ob, *_args, **_kwargs): 311 | raise NotImplementedError() 312 | 313 | def _build_fc_net(self, h, n_fc_ls): 314 | for i, n_fc in enumerate(n_fc_ls): 315 | h = fc(h, 'q_fc_%d' % i, n_fc) 316 | q = fc(h, 'q', self.n_a, act=lambda x: x) 317 | return tf.squeeze(q) 318 | 319 | def _build_net(self): 320 | raise NotImplementedError() 321 | 322 | def prepare_loss(self, max_grad_norm, gamma): 323 | self.A = tf.placeholder(tf.int32, [self.n_step]) 324 | self.S1 = tf.placeholder( 325 | tf.float32, [self.n_step, self.n_s + self.n_w]) 326 | self.R = tf.placeholder(tf.float32, [self.n_step]) 327 | self.DONE = tf.placeholder(tf.bool, [self.n_step]) 328 | A_sparse = tf.one_hot(self.A, self.n_a) 329 | 330 | # backward 331 | with tf.variable_scope(self.name + '_q', reuse=True): 332 | q0s = self._build_net(self.S) 333 | q0 = tf.reduce_sum(q0s * A_sparse, axis=1) 334 | with tf.variable_scope(self.name + '_q', reuse=True): 335 | q1s = self._build_net(self.S1) 336 | q1 = tf.reduce_max(q1s, axis=1) 337 | tq = tf.stop_gradient(tf.where(self.DONE, self.R, self.R + gamma * q1)) 338 | self.loss = tf.reduce_mean(tf.square(q0 - tq)) 339 | 340 | wts = tf.trainable_variables(scope=self.name) 341 | grads = tf.gradients(self.loss, wts) 342 | if max_grad_norm > 0: 343 | grads, self.grad_norm = tf.clip_by_global_norm( 344 | grads, max_grad_norm) 345 | self.lr = tf.placeholder(tf.float32, []) 346 | self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr) 347 | self._train = self.optimizer.apply_gradients(list(zip(grads, wts))) 348 | # monitor training 349 | if self.name.endswith('_0a'): 350 | summaries = [] 351 | summaries.append(tf.summary.scalar( 352 | 'train/%s_loss' % self.name, self.loss)) 353 | summaries.append(tf.summary.scalar('train/%s_q' % 354 | self.name, tf.reduce_mean(q0))) 355 | summaries.append(tf.summary.scalar('train/%s_tq' % 356 | self.name, tf.reduce_mean(tq))) 357 | summaries.append(tf.summary.scalar( 358 | 'train/%s_gradnorm' % self.name, self.grad_norm)) 359 | self.summary = tf.summary.merge(summaries) 360 | 361 | 362 | class DeepQPolicy(QPolicy): 363 | def __init__(self, n_s, n_a, n_w, n_step, n_fc0=128, n_fc=64, name=None): 364 | super().__init__(n_a, n_s, n_step, 'dqn', name) 365 | self.n_fc = n_fc 366 | self.n_fc0 = n_fc0 367 | self.n_w = n_w 368 | self.S = tf.placeholder(tf.float32, [None, n_s + n_w]) 369 | with tf.variable_scope(self.name + '_q'): 370 | self.qvalues = self._build_net(self.S) 371 | 372 | def _build_net(self, S): 373 | if self.n_w == 0: 374 | h = fc(S, 'q_fcw', self.n_fc0) 375 | else: 376 | h0 = fc(S[:, :self.n_s], 'q_fcw', self.n_fc0) 377 | h1 = fc(S[:, self.n_s:], 'q_fct', self.n_fc0 / 4) 378 | h = tf.concat([h0, h1], 1) 379 | return self._build_fc_net(h, [self.n_fc]) 380 | 381 | def forward(self, sess, ob): 382 | return sess.run(self.qvalues, {self.S: np.array([ob])}) 383 | 384 | def backward(self, sess, obs, acts, next_obs, dones, rs, cur_lr, 385 | summary_writer=None, global_step=None): 386 | if summary_writer is None: 387 | ops = self._train 388 | else: 389 | ops = [self.summary, self._train] 390 | outs = sess.run(ops, 391 | {self.S: obs, 392 | self.A: acts, 393 | self.S1: next_obs, 394 | self.DONE: dones, 395 | self.R: rs, 396 | self.lr: cur_lr}) 397 | if summary_writer is not None: 398 | summary_writer.add_summary(outs[0], global_step=global_step) 399 | 400 | 401 | class LRQPolicy(DeepQPolicy): 402 | def __init__(self, n_s, n_a, n_step, name=None): 403 | QPolicy.__init__(self, n_a, n_s, n_step, 'lr', name) 404 | self.S = tf.compat.v1.placeholder(tf.float32, [None, n_s]) 405 | self.n_w = 0 406 | with tf.compat.v1.variable_scope(self.name + '_q'): 407 | self.qvalues = self._build_net(self.S) 408 | 409 | def _build_net(self, S): 410 | return self._build_fc_net(S, []) 411 | -------------------------------------------------------------------------------- /real_net/ma2c/price_data/EUR_USD.csv: -------------------------------------------------------------------------------- 1 | "Ngày","Lần cuối","Mở","Cao","Thấp","% Thay đổi" 2 | 18/01/2018,1.2239,1.2186,1.2267,1.2165,0.43% 3 | 19/01/2018,1.2222,1.2236,1.2296,1.2219,-0.14% 4 | 22/01/2018,1.2262,1.2268,1.2276,1.2214,0.33% 5 | 23/01/2018,1.2299,1.2263,1.2308,1.2223,0.30% 6 | 24/01/2018,1.2408,1.2299,1.2417,1.2293,0.89% 7 | 25/01/2018,1.2396,1.2408,1.2537,1.2364,-0.10% 8 | 26/01/2018,1.2421,1.2395,1.2495,1.2371,0.20% 9 | 29/01/2018,1.2383,1.2427,1.2436,1.2336,-0.31% 10 | 30/01/2018,1.2402,1.2383,1.2455,1.2335,0.15% 11 | 31/01/2018,1.2421,1.2401,1.2476,1.2386,0.15% 12 | 01/02/2018,1.2510,1.2414,1.2524,1.2386,0.72% 13 | 02/02/2018,1.2462,1.2508,1.2519,1.2409,-0.38% 14 | 05/02/2018,1.2368,1.2455,1.2475,1.2362,-0.75% 15 | 06/02/2018,1.2377,1.2367,1.2434,1.2314,0.07% 16 | 07/02/2018,1.2264,1.2378,1.2407,1.2247,-0.91% 17 | 08/02/2018,1.2247,1.2264,1.2297,1.2212,-0.14% 18 | 09/02/2018,1.2235,1.2245,1.2289,1.2206,-0.10% 19 | 12/02/2018,1.2292,1.2253,1.2298,1.2234,0.47% 20 | 13/02/2018,1.2352,1.2293,1.2373,1.2285,0.49% 21 | 14/02/2018,1.2450,1.2352,1.2467,1.2276,0.79% 22 | 15/02/2018,1.2506,1.2449,1.2511,1.2447,0.45% 23 | 16/02/2018,1.2406,1.2506,1.2557,1.2394,-0.80% 24 | 19/02/2018,1.2408,1.2409,1.2435,1.2368,0.02% 25 | 20/02/2018,1.2338,1.2409,1.2414,1.2320,-0.56% 26 | 21/02/2018,1.2284,1.2338,1.2361,1.2281,-0.44% 27 | 22/02/2018,1.2331,1.2283,1.2354,1.2260,0.38% 28 | 23/02/2018,1.2293,1.2329,1.2337,1.2279,-0.31% 29 | 26/02/2018,1.2318,1.2299,1.2356,1.2278,0.20% 30 | 27/02/2018,1.2232,1.2317,1.2348,1.2221,-0.70% 31 | 28/02/2018,1.2194,1.2232,1.2243,1.2188,-0.31% 32 | 01/03/2018,1.2268,1.2193,1.2272,1.2153,0.61% 33 | 02/03/2018,1.2317,1.2266,1.2336,1.2250,0.40% 34 | 05/03/2018,1.2336,1.2323,1.2366,1.2268,0.15% 35 | 06/03/2018,1.2404,1.2336,1.2422,1.2328,0.55% 36 | 07/03/2018,1.2413,1.2404,1.2445,1.2385,0.07% 37 | 08/03/2018,1.2312,1.2411,1.2448,1.2298,-0.81% 38 | 09/03/2018,1.2307,1.2312,1.2336,1.2273,-0.04% 39 | 12/03/2018,1.2334,1.2311,1.2347,1.2291,0.22% 40 | 13/03/2018,1.2390,1.2334,1.2408,1.2314,0.45% 41 | 14/03/2018,1.2367,1.2390,1.2414,1.2347,-0.19% 42 | 15/03/2018,1.2305,1.2366,1.2385,1.2299,-0.50% 43 | 16/03/2018,1.2289,1.2305,1.2338,1.2261,-0.13% 44 | 19/03/2018,1.2335,1.2284,1.2361,1.2258,0.37% 45 | 20/03/2018,1.2242,1.2335,1.2356,1.2239,-0.75% 46 | 21/03/2018,1.2338,1.2242,1.2351,1.2241,0.78% 47 | 22/03/2018,1.2302,1.2337,1.2389,1.2285,-0.29% 48 | 23/03/2018,1.2353,1.2302,1.2375,1.2301,0.41% 49 | 26/03/2018,1.2443,1.2348,1.2463,1.2341,0.73% 50 | 27/03/2018,1.2403,1.2444,1.2478,1.2373,-0.32% 51 | 28/03/2018,1.2308,1.2403,1.2424,1.2300,-0.77% 52 | 29/03/2018,1.2302,1.2308,1.2335,1.2282,-0.05% 53 | 30/03/2018,1.2323,1.2301,1.2331,1.2293,0.17% 54 | 02/04/2018,1.2302,1.2323,1.2346,1.2282,-0.17% 55 | 03/04/2018,1.2270,1.2304,1.2336,1.2253,-0.26% 56 | 04/04/2018,1.2278,1.2269,1.2315,1.2256,0.07% 57 | 05/04/2018,1.2240,1.2278,1.2292,1.2218,-0.31% 58 | 06/04/2018,1.2283,1.2240,1.2292,1.2214,0.35% 59 | 09/04/2018,1.2321,1.2279,1.2331,1.2261,0.31% 60 | 10/04/2018,1.2356,1.2320,1.2379,1.2303,0.28% 61 | 11/04/2018,1.2367,1.2357,1.2397,1.2347,0.09% 62 | 12/04/2018,1.2327,1.2368,1.2382,1.2300,-0.32% 63 | 13/04/2018,1.2331,1.2327,1.2348,1.2307,0.03% 64 | 16/04/2018,1.2380,1.2330,1.2395,1.2325,0.40% 65 | 17/04/2018,1.2370,1.2378,1.2413,1.2335,-0.08% 66 | 18/04/2018,1.2373,1.2369,1.2398,1.2343,0.02% 67 | 19/04/2018,1.2346,1.2373,1.2402,1.2329,-0.22% 68 | 20/04/2018,1.2288,1.2346,1.2354,1.2250,-0.47% 69 | 23/04/2018,1.2209,1.2289,1.2292,1.2198,-0.64% 70 | 24/04/2018,1.2232,1.2209,1.2247,1.2182,0.19% 71 | 25/04/2018,1.2161,1.2232,1.2240,1.2160,-0.58% 72 | 26/04/2018,1.2103,1.2161,1.2211,1.2096,-0.48% 73 | 27/04/2018,1.2130,1.2105,1.2135,1.2055,0.22% 74 | 30/04/2018,1.2079,1.2125,1.2141,1.2064,-0.42% 75 | 01/05/2018,1.1993,1.2076,1.2086,1.1981,-0.71% 76 | 02/05/2018,1.1951,1.1992,1.2033,1.1938,-0.35% 77 | 03/05/2018,1.1988,1.1951,1.2009,1.1946,0.31% 78 | 04/05/2018,1.1960,1.1989,1.1997,1.1911,-0.23% 79 | 07/05/2018,1.1922,1.1953,1.1980,1.1898,-0.32% 80 | 08/05/2018,1.1864,1.1924,1.1941,1.1838,-0.49% 81 | 09/05/2018,1.1850,1.1863,1.1898,1.1822,-0.12% 82 | 10/05/2018,1.1915,1.1852,1.1948,1.1843,0.55% 83 | 11/05/2018,1.1944,1.1917,1.1970,1.1891,0.24% 84 | 14/05/2018,1.1927,1.1947,1.1998,1.1926,-0.14% 85 | 15/05/2018,1.1838,1.1925,1.1939,1.1817,-0.75% 86 | 16/05/2018,1.1807,1.1837,1.1854,1.1762,-0.26% 87 | 17/05/2018,1.1795,1.1808,1.1839,1.1775,-0.10% 88 | 18/05/2018,1.1778,1.1796,1.1825,1.1749,-0.14% 89 | 21/05/2018,1.1791,1.1764,1.1797,1.1717,0.11% 90 | 22/05/2018,1.1779,1.1792,1.1830,1.1754,-0.10% 91 | 23/05/2018,1.1697,1.1778,1.1790,1.1675,-0.70% 92 | 24/05/2018,1.1720,1.1695,1.1750,1.1688,0.20% 93 | 25/05/2018,1.1652,1.1720,1.1735,1.1645,-0.58% 94 | 28/05/2018,1.1623,1.1651,1.1729,1.1605,-0.25% 95 | 29/05/2018,1.1540,1.1625,1.1641,1.1508,-0.71% 96 | 30/05/2018,1.1663,1.1540,1.1678,1.1518,1.07% 97 | 31/05/2018,1.1692,1.1665,1.1726,1.1641,0.25% 98 | 01/06/2018,1.1661,1.1691,1.1718,1.1617,-0.27% 99 | 04/06/2018,1.1699,1.1656,1.1744,1.1652,0.33% 100 | 05/06/2018,1.1718,1.1698,1.1733,1.1652,0.16% 101 | 06/06/2018,1.1773,1.1717,1.1797,1.1708,0.47% 102 | 07/06/2018,1.1799,1.1774,1.1842,1.1772,0.22% 103 | 08/06/2018,1.1768,1.1800,1.1812,1.1726,-0.26% 104 | 11/06/2018,1.1784,1.1766,1.1822,1.1754,0.14% 105 | 12/06/2018,1.1745,1.1784,1.1811,1.1733,-0.33% 106 | 13/06/2018,1.1791,1.1745,1.1803,1.1725,0.39% 107 | 14/06/2018,1.1569,1.1791,1.1853,1.1562,-1.88% 108 | 15/06/2018,1.1609,1.1568,1.1629,1.1543,0.35% 109 | 18/06/2018,1.1623,1.1609,1.1625,1.1563,0.12% 110 | 19/06/2018,1.1590,1.1624,1.1646,1.1530,-0.28% 111 | 20/06/2018,1.1572,1.1590,1.1602,1.1537,-0.16% 112 | 21/06/2018,1.1603,1.1572,1.1635,1.1509,0.27% 113 | 22/06/2018,1.1657,1.1604,1.1677,1.1600,0.47% 114 | 25/06/2018,1.1704,1.1659,1.1715,1.1627,0.40% 115 | 26/06/2018,1.1647,1.1704,1.1722,1.1635,-0.49% 116 | 27/06/2018,1.1554,1.1647,1.1674,1.1541,-0.80% 117 | 28/06/2018,1.1569,1.1555,1.1602,1.1527,0.13% 118 | 29/06/2018,1.1685,1.1569,1.1692,1.1558,1.00% 119 | 02/07/2018,1.1641,1.1651,1.1699,1.1590,-0.38% 120 | 03/07/2018,1.1657,1.1641,1.1675,1.1621,0.14% 121 | 04/07/2018,1.1656,1.1658,1.1684,1.1629,-0.01% 122 | 05/07/2018,1.1691,1.1655,1.1722,1.1649,0.30% 123 | 06/07/2018,1.1747,1.1690,1.1769,1.1679,0.48% 124 | 09/07/2018,1.1751,1.1748,1.1792,1.1731,0.03% 125 | 10/07/2018,1.1744,1.1751,1.1764,1.1690,-0.06% 126 | 11/07/2018,1.1674,1.1745,1.1760,1.1665,-0.60% 127 | 12/07/2018,1.1672,1.1674,1.1697,1.1648,-0.02% 128 | 13/07/2018,1.1685,1.1669,1.1688,1.1610,0.11% 129 | 16/07/2018,1.1710,1.1676,1.1726,1.1674,0.21% 130 | 17/07/2018,1.1661,1.1711,1.1746,1.1650,-0.42% 131 | 18/07/2018,1.1639,1.1660,1.1665,1.1601,-0.19% 132 | 19/07/2018,1.1643,1.1639,1.1680,1.1574,0.03% 133 | 20/07/2018,1.1720,1.1644,1.1740,1.1626,0.66% 134 | 23/07/2018,1.1692,1.1730,1.1752,1.1682,-0.24% 135 | 24/07/2018,1.1686,1.1692,1.1719,1.1654,-0.05% 136 | 25/07/2018,1.1728,1.1684,1.1739,1.1663,0.36% 137 | 26/07/2018,1.1643,1.1728,1.1744,1.1639,-0.72% 138 | 27/07/2018,1.1658,1.1643,1.1665,1.1621,0.13% 139 | 30/07/2018,1.1706,1.1660,1.1721,1.1648,0.41% 140 | 31/07/2018,1.1691,1.1706,1.1745,1.1682,-0.13% 141 | 01/08/2018,1.1660,1.1691,1.1701,1.1656,-0.27% 142 | 02/08/2018,1.1584,1.1660,1.1669,1.1582,-0.65% 143 | 03/08/2018,1.1567,1.1583,1.1611,1.1559,-0.15% 144 | 06/08/2018,1.1554,1.1571,1.1574,1.1527,-0.11% 145 | 07/08/2018,1.1599,1.1555,1.1610,1.1547,0.39% 146 | 08/08/2018,1.1611,1.1598,1.1629,1.1571,0.10% 147 | 09/08/2018,1.1527,1.1609,1.1620,1.1525,-0.72% 148 | 10/08/2018,1.1411,1.1526,1.1537,1.1384,-1.01% 149 | 13/08/2018,1.1409,1.1389,1.1434,1.1364,-0.02% 150 | 14/08/2018,1.1344,1.1410,1.1431,1.1330,-0.57% 151 | 15/08/2018,1.1345,1.1344,1.1357,1.1299,0.01% 152 | 16/08/2018,1.1377,1.1345,1.1411,1.1335,0.28% 153 | 17/08/2018,1.1439,1.1377,1.1447,1.1365,0.54% 154 | 20/08/2018,1.1481,1.1442,1.1486,1.1394,0.37% 155 | 21/08/2018,1.1571,1.1482,1.1602,1.1479,0.78% 156 | 22/08/2018,1.1597,1.1570,1.1624,1.1551,0.22% 157 | 23/08/2018,1.1539,1.1596,1.1601,1.1530,-0.50% 158 | 24/08/2018,1.1623,1.1540,1.1641,1.1533,0.73% 159 | 27/08/2018,1.1678,1.1631,1.1694,1.1592,0.47% 160 | 28/08/2018,1.1695,1.1678,1.1735,1.1662,0.15% 161 | 29/08/2018,1.1708,1.1695,1.1712,1.1652,0.11% 162 | 30/08/2018,1.1671,1.1707,1.1720,1.1641,-0.32% 163 | 31/08/2018,1.1601,1.1671,1.1692,1.1585,-0.60% 164 | 03/09/2018,1.1624,1.1596,1.1629,1.1583,0.20% 165 | 04/09/2018,1.1582,1.1623,1.1629,1.1528,-0.36% 166 | 05/09/2018,1.1629,1.1581,1.1640,1.1542,0.41% 167 | 06/09/2018,1.1622,1.1629,1.1660,1.1603,-0.06% 168 | 07/09/2018,1.1553,1.1624,1.1651,1.1550,-0.59% 169 | 10/09/2018,1.1594,1.1560,1.1617,1.1526,0.35% 170 | 11/09/2018,1.1607,1.1594,1.1645,1.1565,0.11% 171 | 12/09/2018,1.1626,1.1604,1.1651,1.1570,0.16% 172 | 13/09/2018,1.1690,1.1627,1.1703,1.1608,0.55% 173 | 14/09/2018,1.1630,1.1690,1.1724,1.1619,-0.51% 174 | 17/09/2018,1.1683,1.1623,1.1699,1.1617,0.46% 175 | 18/09/2018,1.1667,1.1684,1.1726,1.1651,-0.14% 176 | 19/09/2018,1.1673,1.1667,1.1716,1.1648,0.05% 177 | 20/09/2018,1.1777,1.1671,1.1786,1.1665,0.89% 178 | 21/09/2018,1.1750,1.1776,1.1804,1.1731,-0.23% 179 | 24/09/2018,1.1748,1.1751,1.1817,1.1723,-0.02% 180 | 25/09/2018,1.1772,1.1748,1.1793,1.1730,0.20% 181 | 26/09/2018,1.1740,1.1767,1.1799,1.1726,-0.27% 182 | 27/09/2018,1.1641,1.1740,1.1759,1.1637,-0.84% 183 | 28/09/2018,1.1609,1.1641,1.1652,1.1567,-0.27% 184 | 01/10/2018,1.1578,1.1609,1.1630,1.1563,-0.27% 185 | 02/10/2018,1.1548,1.1578,1.1582,1.1506,-0.26% 186 | 03/10/2018,1.1478,1.1548,1.1595,1.1465,-0.61% 187 | 04/10/2018,1.1515,1.1478,1.1543,1.1464,0.32% 188 | 05/10/2018,1.1524,1.1513,1.1550,1.1483,0.08% 189 | 08/10/2018,1.1492,1.1520,1.1536,1.1459,-0.28% 190 | 09/10/2018,1.1490,1.1491,1.1504,1.1430,-0.02% 191 | 10/10/2018,1.1520,1.1491,1.1547,1.1479,0.26% 192 | 11/10/2018,1.1594,1.1519,1.1600,1.1517,0.64% 193 | 12/10/2018,1.1563,1.1592,1.1611,1.1534,-0.27% 194 | 15/10/2018,1.1579,1.1559,1.1608,1.1537,0.14% 195 | 16/10/2018,1.1575,1.1580,1.1623,1.1564,-0.03% 196 | 17/10/2018,1.1501,1.1575,1.1585,1.1495,-0.64% 197 | 18/10/2018,1.1453,1.1500,1.1528,1.1448,-0.42% 198 | 19/10/2018,1.1515,1.1451,1.1536,1.1433,0.54% 199 | 22/10/2018,1.1465,1.1507,1.1552,1.1455,-0.43% 200 | 23/10/2018,1.1471,1.1466,1.1495,1.1438,0.05% 201 | 24/10/2018,1.1392,1.1470,1.1477,1.1378,-0.69% 202 | 25/10/2018,1.1376,1.1392,1.1434,1.1355,-0.14% 203 | 26/10/2018,1.1403,1.1376,1.1422,1.1334,0.24% 204 | 29/10/2018,1.1373,1.1408,1.1418,1.1361,-0.26% 205 | 30/10/2018,1.1344,1.1372,1.1387,1.1338,-0.25% 206 | 31/10/2018,1.1312,1.1345,1.1363,1.1301,-0.28% 207 | 01/11/2018,1.1409,1.1311,1.1426,1.1306,0.86% 208 | 02/11/2018,1.1387,1.1408,1.1458,1.1372,-0.19% 209 | 05/11/2018,1.1407,1.1391,1.1425,1.1356,0.18% 210 | 06/11/2018,1.1427,1.1406,1.1440,1.1389,0.18% 211 | 07/11/2018,1.1426,1.1427,1.1502,1.1395,-0.01% 212 | 08/11/2018,1.1363,1.1425,1.1448,1.1352,-0.55% 213 | 09/11/2018,1.1336,1.1364,1.1371,1.1315,-0.24% 214 | 12/11/2018,1.1218,1.1331,1.1333,1.1214,-1.04% 215 | 13/11/2018,1.1291,1.1219,1.1296,1.1216,0.65% 216 | 14/11/2018,1.1310,1.1290,1.1349,1.1262,0.17% 217 | 15/11/2018,1.1328,1.1309,1.1364,1.1271,0.16% 218 | 16/11/2018,1.1420,1.1330,1.1423,1.1320,0.81% 219 | 19/11/2018,1.1453,1.1403,1.1467,1.1393,0.29% 220 | 20/11/2018,1.1371,1.1453,1.1473,1.1357,-0.72% 221 | 21/11/2018,1.1385,1.1371,1.1426,1.1363,0.12% 222 | 22/11/2018,1.1408,1.1382,1.1434,1.1378,0.20% 223 | 23/11/2018,1.1341,1.1403,1.1422,1.1325,-0.59% 224 | 26/11/2018,1.1328,1.1343,1.1385,1.1323,-0.11% 225 | 27/11/2018,1.1288,1.1326,1.1344,1.1276,-0.35% 226 | 28/11/2018,1.1366,1.1291,1.1387,1.1264,0.69% 227 | 29/11/2018,1.1393,1.1367,1.1403,1.1348,0.24% 228 | 30/11/2018,1.1317,1.1395,1.1402,1.1304,-0.67% 229 | 03/12/2018,1.1354,1.1315,1.1382,1.1315,0.33% 230 | 04/12/2018,1.1345,1.1354,1.1421,1.1318,-0.08% 231 | 05/12/2018,1.1344,1.1342,1.1361,1.1309,-0.01% 232 | 06/12/2018,1.1376,1.1345,1.1414,1.1321,0.28% 233 | 07/12/2018,1.1378,1.1377,1.1425,1.1359,0.02% 234 | 10/12/2018,1.1356,1.1401,1.1444,1.1349,-0.19% 235 | 11/12/2018,1.1316,1.1361,1.1402,1.1304,-0.35% 236 | 12/12/2018,1.1368,1.1317,1.1388,1.1312,0.46% 237 | 13/12/2018,1.1363,1.1369,1.1396,1.1330,-0.04% 238 | 14/12/2018,1.1309,1.1362,1.1374,1.1268,-0.48% 239 | 17/12/2018,1.1347,1.1308,1.1360,1.1299,0.34% 240 | 18/12/2018,1.1361,1.1346,1.1402,1.1336,0.12% 241 | 19/12/2018,1.1377,1.1361,1.1442,1.1360,0.14% 242 | 20/12/2018,1.1446,1.1377,1.1487,1.1371,0.61% 243 | 21/12/2018,1.1369,1.1447,1.1477,1.1353,-0.67% 244 | 24/12/2018,1.1400,1.1370,1.1439,1.1350,0.27% 245 | 25/12/2018,1.1362,1.1400,1.1429,1.1361,-0.33% 246 | 26/12/2018,1.1353,1.1363,1.1423,1.1342,-0.08% 247 | 27/12/2018,1.1430,1.1353,1.1457,1.1349,0.68% 248 | 28/12/2018,1.1438,1.1429,1.1478,1.1424,0.07% 249 | 31/12/2018,1.1470,1.1443,1.1470,1.1421,0.28% 250 | 01/01/2019,1.1464,1.1466,1.1481,1.1425,-0.05% 251 | 02/01/2019,1.1343,1.1464,1.1499,1.1325,-1.06% 252 | 03/01/2019,1.1394,1.1345,1.1413,1.1309,0.45% 253 | 04/01/2019,1.1395,1.1393,1.1422,1.1345,0.01% 254 | 07/01/2019,1.1476,1.1392,1.1485,1.1392,0.71% 255 | 08/01/2019,1.1442,1.1476,1.1487,1.1422,-0.30% 256 | 09/01/2019,1.1543,1.1444,1.1556,1.1435,0.88% 257 | 10/01/2019,1.1500,1.1542,1.1572,1.1485,-0.37% 258 | 11/01/2019,1.1469,1.1499,1.1542,1.1457,-0.27% 259 | 14/01/2019,1.1477,1.1471,1.1485,1.1440,0.07% 260 | 15/01/2019,1.1417,1.1473,1.1494,1.1381,-0.52% 261 | 16/01/2019,1.1400,1.1414,1.1429,1.1377,-0.15% 262 | 17/01/2019,1.1398,1.1396,1.1410,1.1369,-0.02% 263 | 18/01/2019,1.1363,1.1389,1.1413,1.1352,-0.31% 264 | 21/01/2019,1.1366,1.1371,1.1409,1.1355,0.03% 265 | 22/01/2019,1.1359,1.1365,1.1386,1.1334,-0.06% 266 | 23/01/2019,1.1381,1.1358,1.1397,1.1348,0.19% 267 | 24/01/2019,1.1306,1.1380,1.1398,1.1287,-0.66% 268 | 25/01/2019,1.1415,1.1306,1.1421,1.1298,0.96% 269 | 28/01/2019,1.1434,1.1404,1.1445,1.1388,0.17% 270 | 29/01/2019,1.1432,1.1428,1.1453,1.1410,-0.02% 271 | 30/01/2019,1.1479,1.1433,1.1503,1.1404,0.41% 272 | 31/01/2019,1.1446,1.1480,1.1517,1.1434,-0.29% 273 | 01/02/2019,1.1456,1.1447,1.1490,1.1433,0.09% 274 | 04/02/2019,1.1438,1.1457,1.1467,1.1421,-0.16% 275 | 05/02/2019,1.1415,1.1442,1.1456,1.1400,-0.20% 276 | 06/02/2019,1.1362,1.1406,1.1420,1.1359,-0.46% 277 | 07/02/2019,1.1341,1.1360,1.1375,1.1322,-0.18% 278 | 08/02/2019,1.1330,1.1346,1.1355,1.1320,-0.10% 279 | 11/02/2019,1.1276,1.1328,1.1331,1.1264,-0.48% 280 | 12/02/2019,1.1325,1.1280,1.1342,1.1256,0.43% 281 | 13/02/2019,1.1267,1.1326,1.1346,1.1260,-0.51% 282 | 14/02/2019,1.1301,1.1262,1.1313,1.1247,0.30% 283 | 15/02/2019,1.1295,1.1295,1.1313,1.1233,-0.05% 284 | 18/02/2019,1.1311,1.1288,1.1337,1.1284,0.14% 285 | 19/02/2019,1.1341,1.1310,1.1358,1.1274,0.27% 286 | 20/02/2019,1.1337,1.1342,1.1374,1.1324,-0.04% 287 | 21/02/2019,1.1336,1.1338,1.1368,1.1320,-0.01% 288 | 22/02/2019,1.1343,1.1337,1.1360,1.1315,0.06% 289 | 25/02/2019,1.1359,1.1338,1.1369,1.1323,0.14% 290 | 26/02/2019,1.1387,1.1366,1.1404,1.1343,0.25% 291 | 27/02/2019,1.1369,1.1396,1.1404,1.1360,-0.16% 292 | 28/02/2019,1.1371,1.1371,1.1421,1.1357,0.02% 293 | 01/03/2019,1.1377,1.1372,1.1410,1.1353,0.05% 294 | 04/03/2019,1.1340,1.1380,1.1398,1.1309,-0.33% 295 | 05/03/2019,1.1308,1.1339,1.1353,1.1290,-0.28% 296 | 06/03/2019,1.1307,1.1313,1.1328,1.1286,-0.01% 297 | 07/03/2019,1.1194,1.1306,1.1322,1.1176,-1.00% 298 | 08/03/2019,1.1240,1.1194,1.1248,1.1185,0.41% 299 | 11/03/2019,1.1247,1.1238,1.1259,1.1221,0.06% 300 | 12/03/2019,1.1286,1.1250,1.1308,1.1243,0.35% 301 | 13/03/2019,1.1325,1.1287,1.1341,1.1277,0.35% 302 | 14/03/2019,1.1302,1.1329,1.1342,1.1293,-0.20% 303 | 15/03/2019,1.1325,1.1305,1.1346,1.1296,0.20% 304 | 18/03/2019,1.1336,1.1323,1.1360,1.1317,0.10% 305 | 19/03/2019,1.1349,1.1337,1.1364,1.1332,0.11% 306 | 20/03/2019,1.1411,1.1351,1.1450,1.1335,0.55% 307 | 21/03/2019,1.1373,1.1414,1.1439,1.1342,-0.33% 308 | 22/03/2019,1.1313,1.1375,1.1394,1.1272,-0.53% 309 | 25/03/2019,1.1311,1.1294,1.1333,1.1284,-0.02% 310 | 26/03/2019,1.1264,1.1313,1.1329,1.1262,-0.42% 311 | 27/03/2019,1.1247,1.1269,1.1287,1.1242,-0.15% 312 | 28/03/2019,1.1220,1.1244,1.1264,1.1213,-0.24% 313 | 29/03/2019,1.1217,1.1221,1.1249,1.1208,-0.03% 314 | 01/04/2019,1.1212,1.1218,1.1250,1.1202,-0.04% 315 | 02/04/2019,1.1202,1.1216,1.1219,1.1183,-0.09% 316 | 03/04/2019,1.1234,1.1205,1.1256,1.1200,0.29% 317 | 04/04/2019,1.1220,1.1235,1.1250,1.1205,-0.12% 318 | 05/04/2019,1.1214,1.1223,1.1247,1.1209,-0.05% 319 | 08/04/2019,1.1259,1.1218,1.1277,1.1213,0.40% 320 | 09/04/2019,1.1261,1.1272,1.1286,1.1253,0.02% 321 | 10/04/2019,1.1273,1.1264,1.1288,1.1229,0.11% 322 | 11/04/2019,1.1250,1.1273,1.1291,1.1249,-0.20% 323 | 12/04/2019,1.1300,1.1254,1.1327,1.1252,0.44% 324 | 15/04/2019,1.1307,1.1300,1.1322,1.1294,0.06% 325 | 16/04/2019,1.1280,1.1303,1.1315,1.1279,-0.24% 326 | 17/04/2019,1.1294,1.1281,1.1326,1.1279,0.12% 327 | 18/04/2019,1.1229,1.1297,1.1307,1.1226,-0.58% 328 | 19/04/2019,1.1246,1.1241,1.1254,1.1228,0.15% 329 | 22/04/2019,1.1255,1.1244,1.1264,1.1236,0.08% 330 | 23/04/2019,1.1225,1.1257,1.1264,1.1192,-0.27% 331 | 24/04/2019,1.1152,1.1229,1.1233,1.1140,-0.65% 332 | 25/04/2019,1.1130,1.1161,1.1164,1.1118,-0.20% 333 | 26/04/2019,1.1148,1.1131,1.1176,1.1111,0.16% 334 | 29/04/2019,1.1185,1.1152,1.1192,1.1141,0.33% 335 | 30/04/2019,1.1215,1.1186,1.1231,1.1175,0.27% 336 | 01/05/2019,1.1194,1.1215,1.1266,1.1187,-0.19% 337 | 02/05/2019,1.1176,1.1202,1.1221,1.1170,-0.16% 338 | 03/05/2019,1.1200,1.1178,1.1207,1.1135,0.21% 339 | 06/05/2019,1.1197,1.1172,1.1211,1.1159,-0.03% 340 | 07/05/2019,1.1190,1.1199,1.1222,1.1165,-0.06% 341 | 08/05/2019,1.1191,1.1191,1.1215,1.1182,0.01% 342 | 09/05/2019,1.1220,1.1192,1.1253,1.1172,0.26% 343 | 10/05/2019,1.1233,1.1215,1.1255,1.1214,0.12% 344 | 13/05/2019,1.1223,1.1242,1.1266,1.1221,-0.09% 345 | 14/05/2019,1.1203,1.1224,1.1247,1.1200,-0.18% 346 | 15/05/2019,1.1200,1.1204,1.1226,1.1177,-0.03% 347 | 16/05/2019,1.1172,1.1202,1.1227,1.1165,-0.25% 348 | 17/05/2019,1.1156,1.1174,1.1185,1.1154,-0.14% 349 | 20/05/2019,1.1170,1.1158,1.1177,1.1149,0.13% 350 | 21/05/2019,1.1158,1.1171,1.1188,1.1141,-0.11% 351 | 22/05/2019,1.1152,1.1167,1.1181,1.1147,-0.05% 352 | 23/05/2019,1.1180,1.1151,1.1189,1.1107,0.25% 353 | 24/05/2019,1.1202,1.1182,1.1214,1.1174,0.20% 354 | 27/05/2019,1.1196,1.1196,1.1216,1.1185,-0.05% 355 | 28/05/2019,1.1160,1.1199,1.1204,1.1158,-0.32% 356 | 29/05/2019,1.1129,1.1161,1.1174,1.1123,-0.28% 357 | 30/05/2019,1.1130,1.1132,1.1145,1.1115,0.01% 358 | 31/05/2019,1.1167,1.1135,1.1180,1.1125,0.33% 359 | 03/06/2019,1.1240,1.1168,1.1264,1.1156,0.65% 360 | 04/06/2019,1.1251,1.1241,1.1279,1.1226,0.10% 361 | 05/06/2019,1.1219,1.1251,1.1307,1.1219,-0.28% 362 | 06/06/2019,1.1274,1.1229,1.1309,1.1214,0.49% 363 | 07/06/2019,1.1331,1.1276,1.1349,1.1250,0.51% 364 | 10/06/2019,1.1312,1.1324,1.1331,1.1289,-0.17% 365 | 11/06/2019,1.1329,1.1312,1.1338,1.1301,0.15% 366 | 12/06/2019,1.1287,1.1326,1.1344,1.1281,-0.37% 367 | 13/06/2019,1.1275,1.1287,1.1305,1.1267,-0.11% 368 | 14/06/2019,1.1207,1.1283,1.1291,1.1201,-0.60% 369 | 17/06/2019,1.1217,1.1216,1.1248,1.1204,0.09% 370 | 18/06/2019,1.1191,1.1219,1.1243,1.1180,-0.23% 371 | 19/06/2019,1.1224,1.1193,1.1254,1.1186,0.29% 372 | 20/06/2019,1.1291,1.1226,1.1319,1.1226,0.60% 373 | 21/06/2019,1.1366,1.1292,1.1379,1.1282,0.66% 374 | 24/06/2019,1.1396,1.1361,1.1405,1.1361,0.26% 375 | 25/06/2019,1.1365,1.1399,1.1413,1.1343,-0.27% 376 | 26/06/2019,1.1368,1.1365,1.1392,1.1348,0.03% 377 | 27/06/2019,1.1368,1.1369,1.1382,1.1348,0.00% 378 | 28/06/2019,1.1368,1.1370,1.1394,1.1351,0.00% 379 | 01/07/2019,1.1285,1.1376,1.1376,1.1280,-0.73% 380 | 02/07/2019,1.1283,1.1286,1.1321,1.1274,-0.02% 381 | 03/07/2019,1.1277,1.1285,1.1312,1.1268,-0.05% 382 | 04/07/2019,1.1284,1.1278,1.1296,1.1272,0.06% 383 | 05/07/2019,1.1224,1.1285,1.1288,1.1207,-0.53% 384 | 08/07/2019,1.1214,1.1223,1.1235,1.1207,-0.09% 385 | 09/07/2019,1.1206,1.1215,1.1220,1.1193,-0.07% 386 | 10/07/2019,1.1249,1.1208,1.1264,1.1201,0.38% 387 | 11/07/2019,1.1252,1.1251,1.1286,1.1244,0.03% 388 | 12/07/2019,1.1269,1.1254,1.1276,1.1237,0.15% 389 | 15/07/2019,1.1257,1.1269,1.1285,1.1252,-0.11% 390 | 16/07/2019,1.1209,1.1257,1.1264,1.1201,-0.43% 391 | 17/07/2019,1.1223,1.1211,1.1235,1.1200,0.12% 392 | 18/07/2019,1.1275,1.1224,1.1281,1.1205,0.46% 393 | 19/07/2019,1.1220,1.1277,1.1283,1.1203,-0.49% 394 | 22/07/2019,1.1208,1.1218,1.1226,1.1206,-0.11% 395 | 23/07/2019,1.1151,1.1210,1.1211,1.1145,-0.51% 396 | 24/07/2019,1.1139,1.1152,1.1159,1.1127,-0.11% 397 | 25/07/2019,1.1145,1.1141,1.1188,1.1101,0.05% 398 | 26/07/2019,1.1125,1.1147,1.1151,1.1112,-0.18% 399 | 29/07/2019,1.1144,1.1127,1.1151,1.1113,0.17% 400 | 30/07/2019,1.1153,1.1144,1.1162,1.1132,0.08% 401 | 31/07/2019,1.1074,1.1155,1.1163,1.1059,-0.71% 402 | 01/08/2019,1.1083,1.1076,1.1096,1.1027,0.08% 403 | 02/08/2019,1.1107,1.1085,1.1118,1.1070,0.22% 404 | 05/08/2019,1.1202,1.1103,1.1214,1.1103,0.86% 405 | 06/08/2019,1.1198,1.1203,1.1250,1.1168,-0.04% 406 | 07/08/2019,1.1197,1.1199,1.1243,1.1179,-0.01% 407 | 08/08/2019,1.1178,1.1199,1.1233,1.1177,-0.17% 408 | 09/08/2019,1.1198,1.1180,1.1223,1.1179,0.18% 409 | 12/08/2019,1.1212,1.1204,1.1231,1.1162,0.13% 410 | 13/08/2019,1.1169,1.1214,1.1229,1.1170,-0.38% 411 | 14/08/2019,1.1138,1.1171,1.1192,1.1130,-0.28% 412 | 15/08/2019,1.1106,1.1139,1.1159,1.1091,-0.29% 413 | 16/08/2019,1.1089,1.1107,1.1114,1.1066,-0.15% 414 | 19/08/2019,1.1076,1.1092,1.1114,1.1076,-0.12% 415 | 20/08/2019,1.1099,1.1079,1.1107,1.1065,0.21% 416 | 21/08/2019,1.1083,1.1100,1.1108,1.1080,-0.14% 417 | 22/08/2019,1.1078,1.1085,1.1114,1.1063,-0.05% 418 | 23/08/2019,1.1144,1.1079,1.1154,1.1051,0.60% 419 | 26/08/2019,1.1100,1.1145,1.1166,1.1093,-0.39% 420 | 27/08/2019,1.1090,1.1103,1.1116,1.1085,-0.09% 421 | 28/08/2019,1.1077,1.1091,1.1099,1.1072,-0.12% 422 | 29/08/2019,1.1055,1.1077,1.1092,1.1042,-0.20% 423 | 30/08/2019,1.0989,1.1056,1.1062,1.0963,-0.60% 424 | 02/09/2019,1.0966,1.0989,1.1000,1.0957,-0.21% 425 | 03/09/2019,1.0972,1.0970,1.0979,1.0926,0.05% 426 | 04/09/2019,1.1033,1.0974,1.1039,1.0968,0.56% 427 | 05/09/2019,1.1033,1.1037,1.1085,1.1016,0.00% 428 | 06/09/2019,1.1027,1.1034,1.1057,1.1020,-0.05% 429 | 09/09/2019,1.1046,1.1024,1.1069,1.1015,0.17% 430 | 10/09/2019,1.1043,1.1047,1.1061,1.1030,-0.03% 431 | 11/09/2019,1.1009,1.1044,1.1056,1.0984,-0.31% 432 | 12/09/2019,1.1061,1.1011,1.1087,1.0927,0.47% 433 | 13/09/2019,1.1073,1.1063,1.1110,1.1055,0.11% 434 | 16/09/2019,1.1000,1.1087,1.1093,1.0993,-0.66% 435 | 17/09/2019,1.1071,1.1000,1.1076,1.0990,0.65% 436 | 18/09/2019,1.1029,1.1073,1.1077,1.1013,-0.38% 437 | 19/09/2019,1.1040,1.1030,1.1075,1.1022,0.10% 438 | 20/09/2019,1.1017,1.1042,1.1069,1.0996,-0.21% 439 | 23/09/2019,1.0991,1.1016,1.1027,1.0965,-0.24% 440 | 24/09/2019,1.1018,1.0994,1.1025,1.0983,0.25% 441 | 25/09/2019,1.0941,1.1020,1.1025,1.0937,-0.70% 442 | 26/09/2019,1.0921,1.0942,1.0969,1.0908,-0.18% 443 | 27/09/2019,1.0938,1.0921,1.0960,1.0905,0.16% 444 | 30/09/2019,1.0898,1.0940,1.0948,1.0884,-0.37% 445 | 01/10/2019,1.0930,1.0900,1.0943,1.0878,0.29% 446 | 02/10/2019,1.0958,1.0933,1.0964,1.0904,0.26% 447 | 03/10/2019,1.0964,1.0959,1.1000,1.0941,0.05% 448 | 04/10/2019,1.0976,1.0963,1.0999,1.0956,0.11% 449 | 07/10/2019,1.0970,1.0976,1.1001,1.0962,-0.05% 450 | 08/10/2019,1.0954,1.0971,1.0997,1.0941,-0.15% 451 | 09/10/2019,1.0969,1.0956,1.0991,1.0951,0.14% 452 | 10/10/2019,1.1004,1.0971,1.1035,1.0970,0.32% 453 | 11/10/2019,1.1040,1.1006,1.1063,1.1000,0.33% 454 | 14/10/2019,1.1028,1.1044,1.1050,1.1013,-0.11% 455 | 15/10/2019,1.1031,1.1028,1.1047,1.0991,0.03% 456 | 16/10/2019,1.1070,1.1033,1.1086,1.1021,0.35% 457 | 17/10/2019,1.1122,1.1072,1.1141,1.1064,0.47% 458 | 18/10/2019,1.1169,1.1125,1.1173,1.1114,0.42% 459 | 21/10/2019,1.1148,1.1143,1.1180,1.1137,-0.19% 460 | 22/10/2019,1.1124,1.1150,1.1158,1.1118,-0.22% 461 | 23/10/2019,1.1129,1.1125,1.1141,1.1106,0.04% 462 | 24/10/2019,1.1104,1.1131,1.1164,1.1092,-0.22% 463 | 25/10/2019,1.1078,1.1104,1.1123,1.1072,-0.23% 464 | 28/10/2019,1.1098,1.1082,1.1108,1.1076,0.18% 465 | 29/10/2019,1.1110,1.1099,1.1120,1.1073,0.11% 466 | 30/10/2019,1.1148,1.1112,1.1152,1.1079,0.34% 467 | 31/10/2019,1.1150,1.1151,1.1176,1.1130,0.02% 468 | 01/11/2019,1.1165,1.1152,1.1172,1.1128,0.13% 469 | 04/11/2019,1.1126,1.1165,1.1176,1.1124,-0.35% 470 | 05/11/2019,1.1074,1.1128,1.1141,1.1063,-0.47% 471 | 06/11/2019,1.1065,1.1075,1.1094,1.1064,-0.08% 472 | 07/11/2019,1.1049,1.1066,1.1092,1.1035,-0.14% 473 | 08/11/2019,1.1016,1.1050,1.1056,1.1016,-0.30% 474 | 11/11/2019,1.1032,1.1022,1.1044,1.1016,0.15% 475 | 12/11/2019,1.1007,1.1033,1.1040,1.1002,-0.23% 476 | 13/11/2019,1.1006,1.1010,1.1021,1.0994,-0.01% 477 | 14/11/2019,1.1021,1.1006,1.1028,1.0989,0.14% 478 | 15/11/2019,1.1050,1.1021,1.1058,1.1014,0.26% 479 | 18/11/2019,1.1070,1.1052,1.1091,1.1048,0.18% 480 | 19/11/2019,1.1078,1.1072,1.1085,1.1062,0.07% 481 | 20/11/2019,1.1072,1.1078,1.1083,1.1053,-0.05% 482 | 21/11/2019,1.1057,1.1074,1.1098,1.1051,-0.14% 483 | 22/11/2019,1.1022,1.1058,1.1086,1.1014,-0.32% 484 | 25/11/2019,1.1013,1.1024,1.1033,1.1004,-0.08% 485 | 26/11/2019,1.1018,1.1014,1.1027,1.1006,0.05% 486 | 27/11/2019,1.0998,1.1020,1.1026,1.0991,-0.18% 487 | 28/11/2019,1.1007,1.0999,1.1019,1.0999,0.08% 488 | 29/11/2019,1.1015,1.1008,1.1029,1.0980,0.07% 489 | 02/12/2019,1.1077,1.1014,1.1090,1.1003,0.56% 490 | 03/12/2019,1.1081,1.1078,1.1094,1.1065,0.04% 491 | 04/12/2019,1.1076,1.1082,1.1116,1.1066,-0.05% 492 | 05/12/2019,1.1102,1.1079,1.1109,1.1077,0.23% 493 | 06/12/2019,1.1057,1.1105,1.1111,1.1040,-0.41% 494 | 09/12/2019,1.1062,1.1055,1.1079,1.1052,0.05% 495 | 10/12/2019,1.1092,1.1063,1.1099,1.1062,0.27% 496 | 11/12/2019,1.1128,1.1092,1.1145,1.1070,0.32% 497 | 12/12/2019,1.1128,1.1130,1.1155,1.1102,0.00% 498 | 13/12/2019,1.1119,1.1129,1.1200,1.1111,-0.08% 499 | 16/12/2019,1.1142,1.1122,1.1159,1.1122,0.21% 500 | 17/12/2019,1.1149,1.1143,1.1176,1.1128,0.06% 501 | 18/12/2019,1.1111,1.1150,1.1156,1.1110,-0.34% 502 | 19/12/2019,1.1120,1.1113,1.1145,1.1107,0.08% 503 | 20/12/2019,1.1078,1.1122,1.1127,1.1065,-0.38% 504 | 23/12/2019,1.1086,1.1081,1.1097,1.1069,0.07% 505 | 24/12/2019,1.1087,1.1089,1.1095,1.1069,0.01% 506 | 25/12/2019,1.1090,1.1089,1.1107,1.1073,0.03% 507 | 26/12/2019,1.1096,1.1091,1.1110,1.1082,0.05% 508 | 27/12/2019,1.1175,1.1097,1.1188,1.1095,0.71% 509 | 30/12/2019,1.1197,1.1175,1.1223,1.1170,0.20% 510 | 31/12/2019,1.1210,1.1198,1.1240,1.1198,0.12% 511 | 01/01/2020,1.1210,1.1213,1.1223,1.1205,0.00% 512 | 02/01/2020,1.1170,1.1212,1.1227,1.1163,-0.36% 513 | 03/01/2020,1.1158,1.1172,1.1181,1.1125,-0.11% 514 | 06/01/2020,1.1193,1.1168,1.1207,1.1155,0.31% 515 | 07/01/2020,1.1151,1.1198,1.1198,1.1133,-0.38% 516 | 08/01/2020,1.1103,1.1154,1.1169,1.1101,-0.43% 517 | 09/01/2020,1.1105,1.1106,1.1121,1.1092,0.02% 518 | 10/01/2020,1.1120,1.1106,1.1130,1.1085,0.14% 519 | 13/01/2020,1.1133,1.1125,1.1148,1.1112,0.12% 520 | 14/01/2020,1.1127,1.1135,1.1145,1.1104,-0.05% 521 | 15/01/2020,1.1149,1.1130,1.1164,1.1118,0.20% 522 | 16/01/2020,1.1135,1.1151,1.1173,1.1128,-0.13% 523 | 17/01/2020,1.1088,1.1136,1.1143,1.1085,-0.42% 524 | 20/01/2020,1.1094,1.1092,1.1104,1.1076,0.05% 525 | 21/01/2020,1.1082,1.1095,1.1119,1.1080,-0.11% 526 | 22/01/2020,1.1091,1.1084,1.1099,1.1070,0.08% 527 | 23/01/2020,1.1052,1.1092,1.1109,1.1036,-0.35% 528 | 24/01/2020,1.1023,1.1055,1.1063,1.1020,-0.26% 529 | 27/01/2020,1.1016,1.1021,1.1039,1.1009,-0.06% 530 | 28/01/2020,1.1020,1.1019,1.1026,1.0998,0.04% 531 | 29/01/2020,1.1009,1.1022,1.1028,1.0991,-0.10% 532 | 30/01/2020,1.1030,1.1010,1.1040,1.1006,0.19% 533 | 31/01/2020,1.1093,1.1032,1.1097,1.1016,0.57% 534 | 03/02/2020,1.1058,1.1092,1.1096,1.1035,-0.32% 535 | 04/02/2020,1.1042,1.1059,1.1065,1.1033,-0.14% 536 | 05/02/2020,1.0997,1.1044,1.1049,1.0993,-0.41% 537 | 06/02/2020,1.0980,1.0999,1.1014,1.0964,-0.15% 538 | 07/02/2020,1.0943,1.0983,1.0986,1.0941,-0.34% 539 | 10/02/2020,1.0909,1.0951,1.0958,1.0907,-0.31% 540 | 11/02/2020,1.0914,1.0911,1.0926,1.0891,0.05% 541 | 12/02/2020,1.0871,1.0916,1.0927,1.0864,-0.39% 542 | 13/02/2020,1.0840,1.0874,1.0890,1.0833,-0.29% 543 | 14/02/2020,1.0830,1.0841,1.0862,1.0827,-0.09% 544 | 17/02/2020,1.0834,1.0832,1.0852,1.0821,0.04% 545 | 18/02/2020,1.0791,1.0838,1.0839,1.0785,-0.40% 546 | 19/02/2020,1.0804,1.0791,1.0812,1.0782,0.12% 547 | 20/02/2020,1.0783,1.0806,1.0821,1.0776,-0.19% 548 | 21/02/2020,1.0843,1.0785,1.0864,1.0783,0.56% 549 | 24/02/2020,1.0852,1.0841,1.0872,1.0805,0.08% 550 | 25/02/2020,1.0879,1.0854,1.0891,1.0829,0.25% 551 | 26/02/2020,1.0879,1.0881,1.0910,1.0855,0.00% 552 | 27/02/2020,1.0998,1.0881,1.1006,1.0878,1.09% 553 | 28/02/2020,1.1025,1.1001,1.1054,1.0950,0.25% 554 | 02/03/2020,1.1132,1.1003,1.1186,1.1003,0.97% 555 | 03/03/2020,1.1171,1.1133,1.1214,1.1095,0.35% 556 | 04/03/2020,1.1134,1.1172,1.1187,1.1094,-0.33% 557 | 05/03/2020,1.1239,1.1136,1.1245,1.1119,0.94% 558 | 06/03/2020,1.1285,1.1238,1.1356,1.1212,0.41% 559 | 09/03/2020,1.1447,1.1292,1.1494,1.1284,1.44% 560 | 10/03/2020,1.1279,1.1446,1.1459,1.1274,-1.47% 561 | 11/03/2020,1.1267,1.1280,1.1367,1.1257,-0.11% 562 | 12/03/2020,1.1183,1.1269,1.1334,1.1055,-0.75% 563 | 13/03/2020,1.1105,1.1185,1.1222,1.1054,-0.70% 564 | 16/03/2020,1.1181,1.1085,1.1237,1.1046,0.68% 565 | 17/03/2020,1.0996,1.1183,1.1190,1.0955,-1.65% 566 | 18/03/2020,1.0913,1.0997,1.1046,1.0802,-0.75% 567 | 19/03/2020,1.0690,1.0915,1.0982,1.0654,-2.04% 568 | 20/03/2020,1.0694,1.0691,1.0832,1.0637,0.04% 569 | 23/03/2020,1.0721,1.0696,1.0828,1.0637,0.25% 570 | 24/03/2020,1.0787,1.0726,1.0889,1.0722,0.62% 571 | 25/03/2020,1.0880,1.0789,1.0895,1.0761,0.86% 572 | 26/03/2020,1.1028,1.0881,1.1059,1.0869,1.36% 573 | 27/03/2020,1.1140,1.1030,1.1148,1.0953,1.02% 574 | 30/03/2020,1.1046,1.1136,1.1145,1.1010,-0.84% 575 | 31/03/2020,1.1029,1.1049,1.1056,1.0926,-0.15% 576 | 01/04/2020,1.0962,1.1031,1.1040,1.0903,-0.61% 577 | 02/04/2020,1.0856,1.0964,1.0970,1.0820,-0.97% 578 | 03/04/2020,1.0808,1.0858,1.0865,1.0773,-0.44% 579 | 06/04/2020,1.0791,1.0813,1.0835,1.0768,-0.16% 580 | 07/04/2020,1.0889,1.0793,1.0926,1.0783,0.91% 581 | 08/04/2020,1.0856,1.0891,1.0904,1.0829,-0.30% 582 | 09/04/2020,1.0927,1.0857,1.0953,1.0841,0.65% 583 | 10/04/2020,1.0935,1.0930,1.0953,1.0919,0.07% 584 | 13/04/2020,1.0913,1.0941,1.0969,1.0892,-0.20% 585 | 14/04/2020,1.0979,1.0914,1.0988,1.0904,0.60% 586 | 15/04/2020,1.0907,1.0980,1.0991,1.0856,-0.66% 587 | 16/04/2020,1.0835,1.0911,1.0913,1.0817,-0.66% 588 | 17/04/2020,1.0876,1.0835,1.0893,1.0812,0.38% 589 | 20/04/2020,1.0862,1.0875,1.0898,1.0841,-0.13% 590 | 21/04/2020,1.0856,1.0862,1.0882,1.0816,-0.06% 591 | 22/04/2020,1.0822,1.0857,1.0886,1.0802,-0.31% 592 | 23/04/2020,1.0776,1.0824,1.0848,1.0755,-0.43% 593 | 24/04/2020,1.0820,1.0778,1.0831,1.0726,0.41% 594 | 27/04/2020,1.0828,1.0809,1.0862,1.0808,0.07% 595 | 28/04/2020,1.0818,1.0829,1.0890,1.0810,-0.09% 596 | 29/04/2020,1.0873,1.0819,1.0886,1.0818,0.51% 597 | 30/04/2020,1.0955,1.0872,1.0973,1.0833,0.75% 598 | 01/05/2020,1.0983,1.0956,1.1019,1.0934,0.26% 599 | 04/05/2020,1.0906,1.0980,1.0980,1.0895,-0.70% 600 | 05/05/2020,1.0838,1.0907,1.0927,1.0826,-0.62% 601 | 06/05/2020,1.0794,1.0836,1.0848,1.0782,-0.41% 602 | 07/05/2020,1.0832,1.0795,1.0835,1.0766,0.35% 603 | 08/05/2020,1.0840,1.0833,1.0876,1.0814,0.07% 604 | 11/05/2020,1.0806,1.0838,1.0851,1.0800,-0.31% 605 | 12/05/2020,1.0846,1.0807,1.0885,1.0784,0.37% 606 | 13/05/2020,1.0816,1.0848,1.0897,1.0811,-0.28% 607 | 14/05/2020,1.0804,1.0819,1.0826,1.0775,-0.11% 608 | 15/05/2020,1.0815,1.0805,1.0851,1.0789,0.10% 609 | 18/05/2020,1.0912,1.0816,1.0927,1.0799,0.90% 610 | 19/05/2020,1.0921,1.0913,1.0977,1.0901,0.08% 611 | 20/05/2020,1.0977,1.0928,1.1000,1.0918,0.51% 612 | 21/05/2020,1.0949,1.0979,1.1010,1.0936,-0.26% 613 | 22/05/2020,1.0900,1.0949,1.0961,1.0885,-0.45% 614 | 25/05/2020,1.0899,1.0903,1.0915,1.0870,-0.01% 615 | 26/05/2020,1.0980,1.0899,1.0997,1.0891,0.74% 616 | 27/05/2020,1.1003,1.0983,1.1032,1.0934,0.21% 617 | 28/05/2020,1.1076,1.1005,1.1094,1.0991,0.66% 618 | 29/05/2020,1.1098,1.1078,1.1146,1.1070,0.20% 619 | 01/06/2020,1.1134,1.1097,1.1155,1.1095,0.32% 620 | 02/06/2020,1.1169,1.1137,1.1197,1.1114,0.31% 621 | 03/06/2020,1.1232,1.1172,1.1257,1.1166,0.56% 622 | 04/06/2020,1.1336,1.1234,1.1363,1.1194,0.93% 623 | 05/06/2020,1.1284,1.1337,1.1385,1.1278,-0.46% 624 | 08/06/2020,1.1292,1.1289,1.1321,1.1267,0.07% 625 | 09/06/2020,1.1340,1.1297,1.1365,1.1241,0.43% 626 | 10/06/2020,1.1369,1.1340,1.1423,1.1322,0.26% 627 | 11/06/2020,1.1297,1.1373,1.1404,1.1287,-0.63% 628 | 12/06/2020,1.1254,1.1299,1.1341,1.1212,-0.38% 629 | 15/06/2020,1.1322,1.1245,1.1334,1.1226,0.60% 630 | 16/06/2020,1.1263,1.1325,1.1353,1.1228,-0.52% 631 | 17/06/2020,1.1243,1.1264,1.1295,1.1207,-0.18% 632 | 18/06/2020,1.1202,1.1244,1.1264,1.1185,-0.36% 633 | 19/06/2020,1.1175,1.1205,1.1255,1.1168,-0.24% 634 | 22/06/2020,1.1258,1.1185,1.1271,1.1168,0.74% 635 | 23/06/2020,1.1306,1.1259,1.1350,1.1233,0.43% 636 | 24/06/2020,1.1250,1.1308,1.1327,1.1248,-0.50% 637 | 25/06/2020,1.1217,1.1251,1.1261,1.1190,-0.29% 638 | 26/06/2020,1.1217,1.1218,1.1241,1.1194,0.00% 639 | 29/06/2020,1.1240,1.1227,1.1289,1.1216,0.21% 640 | 30/06/2020,1.1231,1.1242,1.1263,1.1191,-0.08% 641 | 01/07/2020,1.1250,1.1233,1.1276,1.1185,0.17% 642 | 02/07/2020,1.1238,1.1252,1.1303,1.1222,-0.11% 643 | 03/07/2020,1.1248,1.1239,1.1254,1.1219,0.09% 644 | 06/07/2020,1.1308,1.1242,1.1346,1.1240,0.53% 645 | 07/07/2020,1.1270,1.1308,1.1334,1.1258,-0.34% 646 | 08/07/2020,1.1329,1.1273,1.1352,1.1262,0.52% 647 | 09/07/2020,1.1281,1.1329,1.1372,1.1279,-0.42% 648 | 10/07/2020,1.1298,1.1285,1.1326,1.1255,0.15% 649 | 13/07/2020,1.1340,1.1300,1.1375,1.1297,0.37% 650 | 14/07/2020,1.1396,1.1343,1.1409,1.1324,0.49% 651 | 15/07/2020,1.1410,1.1400,1.1452,1.1391,0.12% 652 | 16/07/2020,1.1383,1.1411,1.1443,1.1370,-0.24% 653 | 17/07/2020,1.1426,1.1384,1.1444,1.1377,0.38% 654 | 20/07/2020,1.1444,1.1410,1.1468,1.1401,0.16% 655 | 21/07/2020,1.1526,1.1447,1.1540,1.1422,0.72% 656 | 22/07/2020,1.1568,1.1528,1.1602,1.1507,0.36% 657 | 23/07/2020,1.1594,1.1575,1.1628,1.1539,0.22% 658 | 24/07/2020,1.1654,1.1597,1.1659,1.1580,0.52% 659 | 27/07/2020,1.1751,1.1646,1.1782,1.1637,0.83% 660 | 28/07/2020,1.1714,1.1753,1.1774,1.1698,-0.31% 661 | 29/07/2020,1.1790,1.1718,1.1807,1.1714,0.65% 662 | 30/07/2020,1.1846,1.1788,1.1849,1.1731,0.47% 663 | 31/07/2020,1.1774,1.1846,1.1910,1.1760,-0.61% 664 | 03/08/2020,1.1761,1.1783,1.1797,1.1696,-0.11% 665 | 04/08/2020,1.1800,1.1765,1.1807,1.1720,0.33% 666 | 05/08/2020,1.1861,1.1802,1.1906,1.1793,0.52% 667 | 06/08/2020,1.1875,1.1863,1.1917,1.1818,0.12% 668 | 07/08/2020,1.1786,1.1877,1.1884,1.1755,-0.75% 669 | 10/08/2020,1.1736,1.1788,1.1803,1.1736,-0.42% 670 | 11/08/2020,1.1739,1.1738,1.1808,1.1722,0.03% 671 | 12/08/2020,1.1782,1.1740,1.1816,1.1710,0.37% 672 | 13/08/2020,1.1812,1.1786,1.1865,1.1781,0.25% 673 | 14/08/2020,1.1841,1.1815,1.1852,1.1782,0.25% 674 | 17/08/2020,1.1869,1.1840,1.1882,1.1829,0.24% 675 | 18/08/2020,1.1929,1.1870,1.1967,1.1865,0.51% 676 | 19/08/2020,1.1836,1.1931,1.1954,1.1830,-0.78% 677 | 20/08/2020,1.1859,1.1839,1.1869,1.1802,0.19% 678 | 21/08/2020,1.1795,1.1860,1.1883,1.1754,-0.54% 679 | 24/08/2020,1.1787,1.1787,1.1851,1.1783,-0.07% 680 | 25/08/2020,1.1833,1.1793,1.1845,1.1783,0.39% 681 | 26/08/2020,1.1829,1.1834,1.1841,1.1772,-0.03% 682 | 27/08/2020,1.1821,1.1831,1.1902,1.1762,-0.07% 683 | 28/08/2020,1.1903,1.1822,1.1920,1.1810,0.69% 684 | 31/08/2020,1.1936,1.1899,1.1967,1.1883,0.28% 685 | 01/09/2020,1.1910,1.1936,1.2012,1.1901,-0.22% 686 | 02/09/2020,1.1853,1.1911,1.1931,1.1822,-0.48% 687 | 03/09/2020,1.1849,1.1852,1.1866,1.1789,-0.03% 688 | 04/09/2020,1.1838,1.1850,1.1866,1.1781,-0.09% 689 | 07/09/2020,1.1817,1.1833,1.1849,1.1811,-0.18% 690 | 08/09/2020,1.1779,1.1817,1.1829,1.1765,-0.32% 691 | 09/09/2020,1.1802,1.1777,1.1834,1.1753,0.20% 692 | 10/09/2020,1.1813,1.1801,1.1918,1.1800,0.09% 693 | 11/09/2020,1.1845,1.1814,1.1875,1.1812,0.27% 694 | 14/09/2020,1.1868,1.1840,1.1889,1.1830,0.19% 695 | 15/09/2020,1.1845,1.1865,1.1901,1.1839,-0.19% 696 | 16/09/2020,1.1814,1.1850,1.1883,1.1787,-0.26% 697 | 17/09/2020,1.1847,1.1816,1.1854,1.1737,0.28% 698 | 18/09/2020,1.1837,1.1848,1.1871,1.1826,-0.08% 699 | 21/09/2020,1.1769,1.1848,1.1873,1.1731,-0.57% 700 | 22/09/2020,1.1706,1.1770,1.1774,1.1691,-0.54% 701 | 23/09/2020,1.1659,1.1708,1.1719,1.1651,-0.40% 702 | 24/09/2020,1.1672,1.1660,1.1688,1.1626,0.11% 703 | 25/09/2020,1.1630,1.1671,1.1686,1.1612,-0.36% 704 | 28/09/2020,1.1664,1.1638,1.1679,1.1615,0.29% 705 | 29/09/2020,1.1742,1.1666,1.1746,1.1660,0.67% 706 | 30/09/2020,1.1718,1.1743,1.1756,1.1684,-0.20% 707 | 01/10/2020,1.1747,1.1719,1.1770,1.1717,0.25% 708 | 02/10/2020,1.1713,1.1748,1.1751,1.1695,-0.29% 709 | 05/10/2020,1.1781,1.1716,1.1798,1.1707,0.58% 710 | 06/10/2020,1.1734,1.1782,1.1809,1.1731,-0.40% 711 | 07/10/2020,1.1760,1.1735,1.1783,1.1724,0.22% 712 | 08/10/2020,1.1758,1.1762,1.1782,1.1732,-0.02% 713 | 09/10/2020,1.1824,1.1759,1.1832,1.1753,0.56% 714 | 12/10/2020,1.1813,1.1819,1.1827,1.1786,-0.09% 715 | 13/10/2020,1.1744,1.1813,1.1817,1.1731,-0.58% 716 | 14/10/2020,1.1746,1.1746,1.1772,1.1719,0.02% 717 | 15/10/2020,1.1706,1.1747,1.1760,1.1688,-0.34% 718 | 16/10/2020,1.1718,1.1708,1.1746,1.1694,0.10% 719 | 19/10/2020,1.1766,1.1722,1.1795,1.1703,0.41% 720 | 20/10/2020,1.1821,1.1769,1.1841,1.1760,0.47% 721 | 21/10/2020,1.1861,1.1822,1.1882,1.1821,0.34% 722 | 22/10/2020,1.1816,1.1862,1.1868,1.1811,-0.38% 723 | 23/10/2020,1.1859,1.1819,1.1866,1.1787,0.36% 724 | 26/10/2020,1.1808,1.1858,1.1861,1.1803,-0.43% 725 | 27/10/2020,1.1795,1.1810,1.1839,1.1793,-0.11% 726 | 28/10/2020,1.1744,1.1796,1.1798,1.1717,-0.43% 727 | 29/10/2020,1.1674,1.1746,1.1760,1.1650,-0.60% 728 | 30/10/2020,1.1647,1.1674,1.1705,1.1639,-0.23% 729 | 02/11/2020,1.1640,1.1658,1.1658,1.1622,-0.06% 730 | 03/11/2020,1.1711,1.1640,1.1740,1.1633,0.61% 731 | 04/11/2020,1.1722,1.1715,1.1770,1.1603,0.09% 732 | 05/11/2020,1.1821,1.1725,1.1861,1.1710,0.84% 733 | 06/11/2020,1.1872,1.1826,1.1891,1.1795,0.43% 734 | 09/11/2020,1.1813,1.1882,1.1920,1.1795,-0.50% 735 | 10/11/2020,1.1814,1.1812,1.1844,1.1780,0.01% 736 | 11/11/2020,1.1777,1.1810,1.1834,1.1745,-0.31% 737 | 12/11/2020,1.1804,1.1777,1.1824,1.1758,0.23% 738 | 13/11/2020,1.1832,1.1805,1.1838,1.1798,0.24% 739 | 16/11/2020,1.1853,1.1829,1.1869,1.1813,0.18% 740 | 17/11/2020,1.1861,1.1852,1.1895,1.1842,0.07% 741 | 18/11/2020,1.1852,1.1863,1.1892,1.1848,-0.08% 742 | 19/11/2020,1.1873,1.1853,1.1883,1.1816,0.18% 743 | 20/11/2020,1.1853,1.1875,1.1891,1.1849,-0.17% 744 | 23/11/2020,1.1840,1.1853,1.1906,1.1800,-0.11% 745 | 24/11/2020,1.1888,1.1842,1.1897,1.1837,0.41% 746 | 25/11/2020,1.1913,1.1891,1.1931,1.1882,0.21% 747 | 26/11/2020,1.1913,1.1916,1.1941,1.1884,0.00% 748 | 27/11/2020,1.1962,1.1914,1.1965,1.1907,0.41% 749 | 30/11/2020,1.1928,1.1957,1.2004,1.1924,-0.28% 750 | 01/12/2020,1.2070,1.1928,1.2077,1.1926,1.19% 751 | 02/12/2020,1.2115,1.2070,1.2119,1.2040,0.37% 752 | 03/12/2020,1.2140,1.2115,1.2177,1.2100,0.21% 753 | 04/12/2020,1.2120,1.2142,1.2178,1.2110,-0.16% 754 | 07/12/2020,1.2108,1.2128,1.2167,1.2078,-0.10% 755 | 08/12/2020,1.2101,1.2109,1.2134,1.2095,-0.06% 756 | 09/12/2020,1.2081,1.2103,1.2148,1.2059,-0.17% 757 | 10/12/2020,1.2136,1.2083,1.2160,1.2075,0.46% 758 | 11/12/2020,1.2111,1.2137,1.2163,1.2105,-0.21% 759 | 14/12/2020,1.2143,1.2131,1.2177,1.2115,0.26% 760 | 15/12/2020,1.2151,1.2143,1.2170,1.2121,0.07% 761 | 16/12/2020,1.2197,1.2152,1.2213,1.2125,0.38% 762 | 17/12/2020,1.2266,1.2199,1.2274,1.2191,0.57% 763 | 18/12/2020,1.2255,1.2268,1.2273,1.2225,-0.09% 764 | 21/12/2020,1.2242,1.2242,1.2254,1.2129,-0.11% 765 | 22/12/2020,1.2161,1.2244,1.2257,1.2151,-0.66% 766 | 23/12/2020,1.2185,1.2163,1.2221,1.2154,0.20% 767 | 24/12/2020,1.2186,1.2188,1.2217,1.2177,0.01% 768 | 25/12/2020,1.2204,1.2186,1.2212,1.2165,0.15% 769 | 28/12/2020,1.2214,1.2202,1.2251,1.2181,0.08% 770 | 29/12/2020,1.2247,1.2217,1.2276,1.2207,0.27% 771 | 30/12/2020,1.2295,1.2248,1.2311,1.2246,0.39% 772 | 31/12/2020,1.2213,1.2298,1.2310,1.2209,-0.67% 773 | 01/01/2021,1.2212,1.2216,1.2217,1.2210,-0.01% 774 | 04/01/2021,1.2248,1.2231,1.2310,1.2223,0.29% 775 | 05/01/2021,1.2294,1.2247,1.2307,1.2246,0.38% 776 | 06/01/2021,1.2325,1.2296,1.2350,1.2265,0.25% 777 | 07/01/2021,1.2270,1.2327,1.2346,1.2244,-0.45% 778 | 08/01/2021,1.2218,1.2273,1.2284,1.2191,-0.42% 779 | 11/01/2021,1.2149,1.2217,1.2227,1.2132,-0.56% 780 | 12/01/2021,1.2207,1.2152,1.2211,1.2136,0.48% 781 | 13/01/2021,1.2157,1.2208,1.2224,1.2140,-0.41% 782 | 14/01/2021,1.2156,1.2157,1.2179,1.2111,-0.01% 783 | 15/01/2021,1.2078,1.2155,1.2163,1.2074,-0.64% 784 | 18/01/2021,1.2076,1.2085,1.2091,1.2054,-0.02% 785 | --------------------------------------------------------------------------------