├── figures
├── general_framework.png
└── imputation_framework.png
├── requirements.txt
├── main.py
├── preprocess.py
├── README.md
├── postprocess.py
├── llm_imputation.py
├── graph_search.py
└── data
└── house.csv
/figures/general_framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xinrui17/LLM-Forest/HEAD/figures/general_framework.png
--------------------------------------------------------------------------------
/figures/imputation_framework.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Xinrui17/LLM-Forest/HEAD/figures/imputation_framework.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | python==3.9.12
2 | numpy==1.22.4
3 | pandas==1.4.2
4 | networkx==3.2.1
5 | torch==2.2.1
6 | openai==1.10.0
7 | mistralai==1.0.2
8 |
9 | lightgbm==4.6.0
10 | xgboost==2.0.2
11 | missforest==1.1.3
12 | missingpy==0.2.0
13 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import pickle
4 | import argparse
5 | import inspect
6 | import logging
7 | import sys
8 | import random
9 | import torch
10 | from graph_search import neighbor_search
11 | from llm_imputation import llm_imputation
12 | from preprocess import preprocess
13 | from postprocess import postprocess
14 |
15 |
16 | def parse_global_args(parser):
17 | parser.add_argument("--data_path", type = str, default = "/data/", help="path to dataset")
18 | parser.add_argument("--model_name", type = str, default="gpt", help="gpt or mixtral")
19 | parser.add_argument("--dataset", type=str, default="house", help="dataset name")
20 | parser.add_argument("--num_round", type=int, default=3, help="number of trees in the forest")
21 | parser.add_argument("--group_size", type=2, help="size to merge graphs")
22 | parser.add_argument("--num_neighbors", type=str, help="number of neighbors in the context")
23 | return parser
24 |
25 | if __name__ == "__main__":
26 | parser = argparse.ArgumentParser()
27 | parser = parse_global_args(parser)
28 | args = parser.parse_args()
29 | preprocess(args.dataset, args.data_path)
30 | neighbor_search(args)
31 | for round_id in range(args.num_round):
32 | llm_imputation(args.model_name, args.dataset, round_id)
33 | postprocess(args.dataset, args.data_path, args.num_round)
34 |
35 |
--------------------------------------------------------------------------------
/preprocess.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import json
3 | import os
4 |
5 | def generate_description_from_row(row):
6 | description = []
7 | missing_features = []
8 |
9 | for col in row.index:
10 | value = row[col]
11 | if pd.isna(value):
12 | missing_features.append(col) # Track missing features
13 | else:
14 | description.append(f"{col}: {value}")
15 |
16 | # Add information about missing features if any
17 | if missing_features:
18 | description.append(f"the house has missing features: {', '.join(missing_features)}")
19 |
20 | return ",".join(description)
21 |
22 | def preprocess(dataset, data_path):
23 | df = pd.read_csv(os.path.join(data_path,f"{dataset}", ".csv"))
24 | with open(os.path.join(data_path,f'{dataset}_descriptions.json'), 'w') as outfile:
25 | outfile.write('[')
26 |
27 | first_entry = True
28 | for index, row in df.iterrows():
29 | try:
30 | description = generate_description_from_row(row)
31 | description_entry = {"user": index, "description": description}
32 |
33 | if not first_entry:
34 | outfile.write(',\n') # Add comma and newline before the next entry, except for the first
35 | else:
36 | first_entry = False
37 |
38 | json.dump(description_entry, outfile)
39 | print("Generated Description for index", index, ":\n", description, "\n---\n")
40 |
41 | except Exception as e:
42 | print(f"An error occurred at index {index}: {e}")
43 |
44 | outfile.write('\n]') # Close the list with a newline
45 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 🌲 LLM-Forest
2 |
3 | This repository contains the implementation of [**LLM-Forest**](https://arxiv.org/abs/2410.21520), our ACL 2025 Findings paper.
4 | LLM-Forest is a **task-agnostic framework** that ensembles the outputs of multiple large language models (LLMs) based on diverse prompt generations, inspired by the idea of random forests.
5 |
6 | ---
7 |
8 | ## General Framework
9 |
10 | LLM-Forest constructs diverse prompts for a given task (e.g., Question Answering, Text Generation, Classification), obtains multiple outputs from LLMs, and aggregates them through self-evaluated confidence-weighted voting.
11 |
12 |
13 |
14 |
15 | ---
16 |
17 | ## LLM-Forest for Data Imputation
18 |
19 | We instantiate LLM-Forest for **tabular data imputation**, where the goal is to fill in missing values in health datasets.
20 | The process includes graph construction, prompt generation, LLM-based imputation, and ensemble aggregation to yield an **imputed table** for downstream tasks (e.g., classification).
21 |
22 |
23 |
24 |
25 | ---
26 |
27 | ## Reproducing the Results
28 |
29 | There are **no strict environment requirements** for this project and it can run under most standard Python setups.
30 | You can simply install the minimal dependencies with:
31 |
32 | ```bash
33 | pip install -r requirements.txt
34 | ```
35 |
36 | To reproduce the results in our paper, here is an example for Diabetes dataset with GPT-4:
37 |
38 | ```bash
39 | python main.py \
40 | --data_path ./data/diabetes_train4.csv \
41 | --dataset diabetes \
42 | --model_name gpt \
43 | --num_round 3 \
44 | --group_size 2 \
45 | --num_neighbors 5
46 | ```
47 | ---
48 |
49 | ## Citation
50 | If you find this work useful, please cite our paper:
51 |
52 | ```
53 | @inproceedings{he-etal-2025-llm,
54 | title = "{LLM}-Forest: Ensemble Learning of {LLM}s with Graph-Augmented Prompts for Data Imputation",
55 | author = "He, Xinrui and
56 | Ban, Yikun and
57 | Zou, Jiaru and
58 | Wei, Tianxin and
59 | Cook, Curtiss and
60 | He, Jingrui",
61 | editor = "Che, Wanxiang and
62 | Nabende, Joyce and
63 | Shutova, Ekaterina and
64 | Pilehvar, Mohammad Taher",
65 | booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
66 | month = jul,
67 | year = "2025",
68 | address = "Vienna, Austria",
69 | publisher = "Association for Computational Linguistics",
70 | url = "https://aclanthology.org/2025.findings-acl.361/",
71 | doi = "10.18653/v1/2025.findings-acl.361",
72 | pages = "6921--6936",
73 | ISBN = "979-8-89176-256-5"
74 | }
75 | ```
76 |
77 |
--------------------------------------------------------------------------------
/postprocess.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import json
3 | import re
4 | import os
5 |
6 | def extract_json_blocks(record_str):
7 | json_matches = re.findall(r'```json\s*(\{.*?\})\s*```', record_str, re.DOTALL)
8 | inferred_values = {}
9 | confidence_levels = {}
10 |
11 | if json_matches:
12 | try:
13 | # Assume the first JSON block contains inferred values
14 | inferred_values = json.loads(json_matches[0])
15 | # Assume the second JSON block contains confidence levels, if present
16 | if len(json_matches) > 1:
17 | confidence_levels = json.loads(json_matches[1])
18 | except json.JSONDecodeError as e:
19 | print(f"JSON decoding error: {e}")
20 |
21 | return inferred_values, confidence_levels
22 |
23 | def parse_free_text_records(record_str):
24 | inferred_values = {}
25 | confidence_levels = {}
26 |
27 | try:
28 | inferred_matches = re.findall(r'"([^"]+)":\s*([\d.]+)', record_str)
29 | for key, value in inferred_matches:
30 | inferred_values[key.strip()] = float(value)
31 |
32 | confidence_matches = re.findall(r'"([^"]+)":\s*"([^"]+)"', record_str)
33 | for key, value in confidence_matches:
34 | confidence_levels[key.strip()] = value.strip()
35 |
36 | except Exception as e:
37 | print(f"Error parsing free-text record: {e}")
38 |
39 | return inferred_values, confidence_levels
40 |
41 | def post_process(dataset, data_path, num_round):
42 | imputed_data_list = []
43 | confidence_data_list = []
44 | confidence_rank = {"high": 1, "medium": 0.4, "low": 0.3}
45 | for round_id in range(num_round):
46 | data = pd.read_csv(os.path.join(data_path, f"{dataset}", ".csv"))
47 | original_columns = data.columns.tolist()
48 |
49 | column_mapping = {col[:9].lower(): col for col in original_columns} # Map shortened column names to original names
50 |
51 | json_path = os.path.join(data_path, f'{dataset}_imputation_{round_id}.json')
52 | with open(json_path, 'r') as file:
53 | imputed_data = json.load(file)
54 |
55 | inferred_data = data.copy()
56 | confidence_data = data.copy()
57 |
58 | for entry in imputed_data:
59 | patient_id = entry['patient']
60 | updated_records_str = entry['description']
61 |
62 | inferred_values, confidence_levels = extract_json_blocks(updated_records_str)
63 |
64 | # If JSON extraction fails, fall back to free-text parsing
65 | if not inferred_values and not confidence_levels:
66 | inferred_values, confidence_levels = parse_free_text_records(updated_records_str)
67 |
68 | # Skip the record if both methods fail
69 | if not inferred_values:
70 | print(f"No valid data found for patient {patient_id}. Skipping.")
71 | continue
72 |
73 | for key, value in inferred_values.items():
74 | normalized_key = key[:9].lower()
75 | if normalized_key in column_mapping:
76 | original_key = column_mapping[normalized_key]
77 | inferred_data.at[patient_id, original_key] = value
78 | else:
79 | print(f"Unknown key in inferred values for patient {patient_id}: {key}")
80 |
81 | for key, value in confidence_levels.items():
82 | normalized_key = key[:9].lower()
83 | if normalized_key in column_mapping:
84 | original_key = column_mapping[normalized_key]
85 | confidence_data.at[patient_id, original_key] = value
86 | else:
87 | print(f"Unknown key in confidence levels for patient {patient_id}: {key}")
88 |
89 | inferred_csv_path = os.path.join(data_path, f'{dataset}_{round_id}_values.csv')
90 | confidence_csv_path = os.path.join(data_path, f'{dataset}_{round_id}_confidence.csv')
91 | imputed_data_list.append(inferred_csv_path)
92 | confidence_data_list.append(confidence_data)
93 | inferred_data.to_csv(inferred_csv_path, index=False)
94 | confidence_data.to_csv(confidence_csv_path, index=False)
95 |
96 | print(f"Inferred values saved to {inferred_csv_path}")
97 | print(f"Confidence levels saved to {confidence_csv_path}")
98 |
99 | for patient_id in data.index:
100 | for column in data.columns:
101 | normalized_key = column.lower()
102 |
103 | if normalized_key not in column_mapping:
104 | print("unknown normalized_key")
105 | continue
106 |
107 | if pd.isna(data.at[patient_id, column]):
108 | imputed_values = []
109 | confidences = []
110 |
111 | for i in range(len(imputed_data_list)):
112 | imputed_data = imputed_data_list[i]
113 | confidence_data = confidence_data_list[i]
114 |
115 | if patient_id < len(imputed_data):
116 | imputed_value = imputed_data.at[patient_id, column_mapping[normalized_key]]
117 | confidence = confidence_data.at[patient_id, column_mapping[normalized_key]]
118 |
119 | if pd.notna(imputed_value) and pd.notna(confidence):
120 | confidence_score = confidence_rank.get(str(confidence).strip().lower(), 0) # Default to 0 if unrecognized
121 | imputed_values.append(imputed_value)
122 | confidences.append(confidence_score)
123 |
124 | if len(imputed_values) > 0:
125 | max_confidence = max(confidences)
126 | max_confidence_indices = [i for i, conf in enumerate(confidences) if conf == max_confidence]
127 |
128 | if len(max_confidence_indices) > 1:
129 | final_value = sum(imputed_values[i] for i in max_confidence_indices) / len(max_confidence_indices) # Use average
130 | else:
131 | final_value = imputed_values[max_confidence_indices[0]]
132 |
133 |
134 | data.at[patient_id, column] = final_value
135 |
136 | updated_csv_path = os.path.join(data_path, f'{dataset}_result.csv')
137 | data.to_csv(updated_csv_path, index=False)
138 |
139 | print(f"Updated data saved to {updated_csv_path}")
140 |
141 |
--------------------------------------------------------------------------------
/llm_imputation.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from openai import OpenAI
4 | from mistralai import Mistral
5 | import time
6 |
7 |
8 | descriptions = """
9 | ## Below are the detailed information about the house's feature description. Its follws the format of
10 | Name - Description
11 |
12 | # crim - per capita crime rate by town
13 | # zn - proportion of residential land zoned for lots over 25,000 sq.ft.
14 | # indus - proportion of non-retail business acres per town.
15 | # chas - Charles River dummy variable (1 if tract bounds river; 0 otherwise)
16 | # nox - nitric oxides concentration (parts per 10 million)
17 | # rm - average number of rooms per dwelling
18 | # age - proportion of owner-occupied units built prior to 1940
19 | # dis - weighted distances to five Boston employment centres
20 | # rad - index of accessibility to radial highways
21 | # tax - full-value property-tax rate per $10,000
22 | # ptratio - pupil-teacher ratio by town
23 | # b - 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
24 | # lstat - %% lower status of the population
25 | # medvgpt - Median value of owner-occupied homes in $1000's
26 | """
27 |
28 | def read_json_file(file_path):
29 | """Reads a JSON file and returns the data."""
30 | with open(file_path, 'r') as file:
31 | data = json.load(file)
32 | return data
33 |
34 | def append_to_json_file(data, file_path):
35 | """Appends data to a JSON file."""
36 | try:
37 | with open(file_path, 'r+') as file:
38 | file_data = json.load(file)
39 | file_data.append(data)
40 | file.seek(0)
41 | json.dump(file_data, file, indent=4)
42 | except json.JSONDecodeError:
43 | with open(file_path, 'w') as file:
44 | json.dump([data], file, indent=4)
45 |
46 | def imputation_missing_value(client, model_name, patinet_id, records):
47 | # print(records)
48 | """Generates a concise description for patient records using OpenAI's Chat Completion."""
49 | try:
50 | if model_name == 'gpt':
51 | # print(records)
52 | response = client.chat.completions.create(
53 | model="gpt-4o",
54 | temperature=0.001,
55 | messages=[
56 | {"role": "system","content": f"You are a helpful assistant tasked with filling the missing values for hosue {patinet_id} in the Boston Housing Dataset. You must give the results for all the missing value. You have the flexibility to determine the best approach for each missing feature by analyzing both the house's own records and the data of other houses. Possible methods may include in no particular sequence (1) using the most common values of the similar houses (2) using the most similar houses' values (3) uncovering patterns and relationships between the features (4) applying your knowledge in the related domains (5)... After performing the imputation, ensure your inferred results are consistent with domain knowledge and validate them using common sense and reasonable assumptions within the related domains."},
57 | {"role":"user", "content": "Here are some patterns observed based on the correlations and feature distributions: crim and rad have a strong positive correlation of 0.814, indicating that higher crime rates (crim) are associated with higher accessibility to radial highways (rad). indus and nox have strong positive correlation of 0.780, showing that areas with higher proportions of non-retail business acres (indus) also tend to have higher nitric oxide concentrations (nox). rad and tax have a positive correlation (0.69) showing that towns with better highway accessibility (rad) tend to have higher property tax rates (tax). This could reflect higher infrastructure investment in these areas. indus and rad have moderate positive correlation of 0.564, suggesting that industrial areas are more likely to have better access to radial highways (rad). indus and zn have a moderate negative correlation of -0.537, implying that regions with higher industrial activity (indus) are less likely to have a higher proportion of residential land zoned for lots over 25,000 sq. ft. (zn). For crim, most values cluster around 0.63. The majority of zn values are concentrated at 0, reflecting that many towns have minimal or no large residential zoning. Few towns show much higher values, indicating skewness. For rm, the mean is around 6.24, with a moderate spread, indicating a relatively consistent distribution of housing room sizes. For ptratio, the majority of values are centered around 18.5, showing minimal variability and suggesting uniformity in educational resources. For b, most values cluster around 356.0, with a relatively narrow spread and a few extreme outliers showing significant deviation. For chas,the majority of values are concentrated at 0, indicating that most towns do not bound the Charles River. A small portion of the towns has a value of 1."},
58 | {"role":"user", "content": f'''The data is from Boston Housing dataset, containing information collected by the U.S Census Service concerning housing in the area of Boston Mass and aiming to predict the price, in which the median value of a home is to be predicted. Some values are missing in the orginal records due to various reasons. Given feature descriptionn {descriptions} and house's records {records}.
59 | Imagine that your are working as an analyst to complete this dataset for further research purpose. Provide your understanding of the missing feature and your explanations for choosing the appropriate value before the final inference. Give the imputation results in a succinct JSON format strictly starting with ```json with the following structure: \"feature name\": \"inferred value\". Output your confidence level for the imputation results in a JSON format starting with ```json with the following structure: \"feature name\": \" confidence level\" at the end without any explanations. In the results, use the exact feature names as those in the feature description.'''}
60 | ]
61 | )
62 |
63 | print(response.choices[0].message.content)
64 | return response.choices[0].message.content
65 | elif model_name == 'mixtral':
66 | retry_count = 0
67 | max_retries = 50
68 | backoff_factor = 2 # Exponential backoff factor
69 | while retry_count < max_retries:
70 | try:
71 | chat_response = client.chat.complete(
72 | model="open-mixtral-8x22b",
73 | temperature=0.001,
74 | messages=[
75 | {"role": "system","content": f"You are a helpful assistant tasked with filling the missing values for hosue {patinet_id} in the Boston Housing Dataset. You must give the results for all the missing value. You have the flexibility to determine the best approach for each missing feature by analyzing both the house's own records and the data of other houses. Possible methods may include in no particular sequence (1) using the most common values of the similar houses (2) using the most similar houses' values (3) uncovering patterns and relationships between the features (4) applying your knowledge in the related domains (5)... After performing the imputation, ensure your inferred results are consistent with domain knowledge and validate them using common sense and reasonable assumptions within the related domains."},
76 | {"role":"user", "content": "Here are some patterns observed based on the correlations and feature distributions: crim and rad have a strong positive correlation of 0.814, indicating that higher crime rates (crim) are associated with higher accessibility to radial highways (rad). indus and nox have strong positive correlation of 0.780, showing that areas with higher proportions of non-retail business acres (indus) also tend to have higher nitric oxide concentrations (nox). rad and tax have a positive correlation (0.69) showing that towns with better highway accessibility (rad) tend to have higher property tax rates (tax). This could reflect higher infrastructure investment in these areas. indus and rad have moderate positive correlation of 0.564, suggesting that industrial areas are more likely to have better access to radial highways (rad). indus and zn have a moderate negative correlation of -0.537, implying that regions with higher industrial activity (indus) are less likely to have a higher proportion of residential land zoned for lots over 25,000 sq. ft. (zn). For crim, most values cluster around 0.63. The majority of zn values are concentrated at 0, reflecting that many towns have minimal or no large residential zoning. Few towns show much higher values, indicating skewness. For rm, the mean is around 6.24, with a moderate spread, indicating a relatively consistent distribution of housing room sizes. For ptratio, the majority of values are centered around 18.5, showing minimal variability and suggesting uniformity in educational resources. For b, most values cluster around 356.0, with a relatively narrow spread and a few extreme outliers showing significant deviation. For chas,the majority of values are concentrated at 0, indicating that most towns do not bound the Charles River. A small portion of the towns has a value of 1."},
77 | {"role":"user", "content": f'''The data is from Boston Housing dataset, containing information collected by the U.S Census Service concerning housing in the area of Boston Mass and aiming to predict the price, in which the median value of a home is to be predicted. Some values are missing in the orginal records due to various reasons. Given feature descriptionn {descriptions} and house's records {records}.
78 | Imagine that your are working as an analyst to complete this dataset for further research purpose. Provide your understanding of the missing feature and your explanations for choosing the appropriate value before the final inference. Give the imputation results in a succinct JSON format strictly starting with ```json with the following structure: \"feature name\": \"inferred value\". Output your confidence level for the imputation results in a JSON format starting with ```json with the following structure: \"feature name\": \" confidence level\" at the end without any explanations. In the results, use the exact feature names as those in the feature description.'''}
79 | ]
80 | )
81 |
82 | print(chat_response.choices[0].message.content)
83 | return chat_response.choices[0].message.content
84 |
85 | except Exception as e:
86 | if "Status 429" in str(e) or "Status 403" in str(e):
87 | retry_count += 1
88 | wait_time = backoff_factor ** retry_count
89 | print(f"Rate limit exceeded. Retrying in {wait_time} seconds...")
90 | time.sleep(wait_time)
91 | else:
92 | print(f"An error occurred: {e}")
93 | return "Failed to generate description due to an error."
94 |
95 | except Exception as e:
96 | print(f"An error occurred: {e}")
97 | return "Failed to generate description due to an error."
98 | print("Maximum retries reached. Skipping this patient.")
99 | return "Failed to generate description after retries."
100 |
101 |
102 | def process_and_write_patient_record(client, model_name, round_id, patient, output_file_path):
103 | """Generates concise description for a single patient and appends to JSON file."""
104 | imputation_results = imputation_missing_value(client, model_name, patient["user"], patient[f"UpdatedRecords_Round_{round_id}"])
105 |
106 | updated_patient_data = {
107 | "patient": patient["user"],
108 | "UpdatedRecords": imputation_results
109 | }
110 | append_to_json_file(updated_patient_data, output_file_path)
111 |
112 | def llm_imputation(model_name, data_path, dataset, round_id):
113 | if model_name == "gpt":
114 | client = OpenAI(api_key = 'put_your_key_here')
115 | elif model_name == "mixtral":
116 | client = Mistral(api_key="put_your_key_here")
117 |
118 | input_file_path = os.path.join(data_path,f'{dataset}_neighbors_{round_id}.json')
119 | output_file_path = os.path.join(data_path, f'{dataset}_imputation_{round_id}.json')
120 | open(output_file_path, 'w').close()
121 | patient_data = read_json_file(input_file_path)
122 |
123 | for patient in patient_data:
124 | process_and_write_patient_record(client, model_name, patient, output_file_path)
125 |
126 | print(f"Updated User data has been written to {output_file_path}")
127 |
--------------------------------------------------------------------------------
/graph_search.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import networkx as nx
3 | import numpy as np
4 | import random
5 | import json
6 | from collections import Counter
7 | import time
8 | import os
9 |
10 |
11 | def sigmoid(x):
12 | return 1 / (1 + np.exp(-x))
13 |
14 |
15 | def load_neighbors_from_txt(file_path):
16 | round_neighbors = []
17 | current_round_neighbors = {}
18 | current_round = None
19 |
20 | with open(file_path, 'r') as file:
21 | for line in file:
22 | if 'Neighbors found in Round' in line:
23 | if current_round_neighbors:
24 | round_neighbors.append(current_round_neighbors)
25 | current_round_neighbors = {}
26 | current_round = int(line.strip().split()[-1][:-1]) # Extract round number
27 | elif line.strip():
28 | parts = line.split(":")
29 | patient_id = int(parts[0].split()[1])
30 | neighbors = eval(parts[1].strip())
31 | current_round_neighbors[patient_id] = neighbors
32 | # Append the last round
33 | if current_round_neighbors:
34 | round_neighbors.append(current_round_neighbors)
35 |
36 | return round_neighbors
37 |
38 |
39 | def dynamic_binning(data, feature, num_bins=10, min_patients_per_bin=5):
40 | """
41 | Dynamically bins a feature to ensure each bin has at least min_patients_per_bin patients.
42 | Handles cases where distinct values are fewer than the number of bins.
43 | """
44 | unique_values = sorted(data[feature].dropna().unique())
45 | if len(unique_values) <= num_bins:
46 | bins_assigned = pd.cut(data[feature], bins=len(unique_values), labels=False, include_lowest=True)
47 | bin_edges = unique_values + [unique_values[-1] + 1] # Extend to cover the last value
48 | return bins_assigned, bin_edges
49 |
50 | _, bin_edges = pd.cut(data[feature], bins=num_bins, retbins=True, labels=False, include_lowest=True)
51 | bins_assigned = pd.cut(data[feature], bins=bin_edges, labels=False, include_lowest=True)
52 | bin_counts = bins_assigned.value_counts().sort_index()
53 |
54 | # Adjust bins dynamically
55 | max_iterations = 100
56 | iterations = 0
57 |
58 | while bin_counts.min() < min_patients_per_bin:
59 | if iterations >= max_iterations:
60 | print(f"Warning: Max iterations reached for {feature}.")
61 | break
62 |
63 | new_bin_edges = [bin_edges[0]] # Always include the first bin edge
64 |
65 | i = 0
66 | while i < len(bin_counts) - 1: # Iterate through bins
67 | current_count = bin_counts.iloc[i]
68 | next_count = bin_counts.iloc[i + 1]
69 |
70 | if current_count < min_patients_per_bin:
71 | merged_count = current_count + next_count
72 | if merged_count >= min_patients_per_bin:
73 | bin_counts.iloc[i + 1] = merged_count
74 | bin_counts.iloc[i] = 0 # Mark the current bin as merged
75 | new_bin_edges.append(bin_edges[i + 2]) # Skip the next edge
76 | i += 2
77 | else:
78 | bin_counts.iloc[i + 1] = merged_count
79 | bin_counts.iloc[i] = 0
80 | i += 1
81 | else:
82 | new_bin_edges.append(bin_edges[i + 1])
83 | i += 1
84 |
85 | if bin_counts.iloc[-1] < min_patients_per_bin:
86 | bin_counts.iloc[-2] += bin_counts.iloc[-1]
87 | bin_counts.iloc[-1] = 0
88 | new_bin_edges = new_bin_edges[:-1] # Remove the second-to-last edge
89 |
90 | new_bin_edges.append(bin_edges[-1])
91 | new_bin_edges = sorted(set(new_bin_edges))
92 | bin_edges = new_bin_edges
93 | bins_assigned = pd.cut(data[feature], bins=bin_edges, labels=False, include_lowest=True)
94 | bin_counts = bins_assigned.value_counts().sort_index()
95 | iterations += 1
96 |
97 | return bins_assigned, bin_edges
98 |
99 |
100 | def create_bipartite_graph_for_binned(binned_data, data, feature, bin_edges):
101 | G = nx.Graph()
102 | patients = data.index.tolist()
103 | bin_edges = np.array(bin_edges)
104 |
105 | # Compute bin centers
106 | bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
107 | bin_nodes = [f'{feature}_bin_{i}' for i in range(len(bin_centers))]
108 |
109 | G.add_nodes_from(patients, bipartite=0)
110 | G.add_nodes_from(bin_nodes, bipartite=1)
111 |
112 | # Connect each patient to their respective bin
113 | for patient in patients:
114 | bin_index = binned_data.at[patient, feature]
115 | if pd.notna(bin_index):
116 | bin_node = f'{feature}_bin_{bin_index}'
117 | bin_center = bin_centers[int(bin_index)]
118 | value = data.at[patient, feature]
119 |
120 | # Calculate the relative distance
121 | if bin_center != 0:
122 | distance_to_center = abs(value - bin_center) / bin_center
123 | else:
124 | distance_to_center = abs(value - bin_center)
125 |
126 | # Calculate weight
127 | weight = 1 / (2 + distance_to_center)
128 | G.add_edge(patient, bin_node, weight=weight)
129 |
130 | return G
131 |
132 |
133 | def bin_continuous_features(data, num_bins=10, min_patients_per_bin=4):
134 | """
135 | Bins all continuous features dynamically in the dataset.
136 | Handles cases with fewer distinct values than the number of bins.
137 | """
138 | binned_data = data.copy()
139 | bin_edges_dict = {}
140 |
141 | for col in data.columns:
142 | if pd.api.types.is_numeric_dtype(data[col]):
143 | bins_assigned, bin_edges = dynamic_binning(data, col, num_bins=num_bins, min_patients_per_bin=min_patients_per_bin)
144 | binned_data[col] = bins_assigned
145 | bin_edges_dict[col] = bin_edges
146 |
147 | return binned_data, bin_edges_dict
148 |
149 |
150 | def group_graphs_randomly(graphs, group_size=3):
151 | remaining_features = list(graphs.keys())
152 | graph_groups = []
153 |
154 | while len(remaining_features) >= group_size:
155 | group = random.sample(remaining_features, group_size)
156 | graph_groups.append(group)
157 | for g in group:
158 | remaining_features.remove(g)
159 |
160 | # Handle any remaining graphs (less than group_size)
161 | if len(remaining_features) > 0:
162 | graph_groups.append(remaining_features)
163 |
164 | return graph_groups
165 |
166 | # Function to randomly pair graphs for merging
167 | def pair_graphs_randomly(graphs):
168 | remaining_features = list(graphs.keys())
169 | graph_pairs = []
170 |
171 | while len(remaining_features) > 1:
172 | pair = random.sample(remaining_features, 2)
173 | graph_pairs.append(pair)
174 | remaining_features.remove(pair[0])
175 | remaining_features.remove(pair[1])
176 |
177 | if len(remaining_features) == 1:
178 | graph_pairs.append([remaining_features[0]])
179 |
180 | return graph_pairs
181 |
182 | def merge_three_graphs_by_patient_connections(G1, G2, G3, threshold=3):
183 | merged_graph = nx.Graph()
184 |
185 | patients = [n for n, d in G1.nodes(data=True) if d.get('bipartite') == 0]
186 | values1 = [n for n, d in G1.nodes(data=True) if d.get('bipartite') == 1]
187 | values2 = [n for n, d in G2.nodes(data=True) if d.get('bipartite') == 1]
188 | values3 = [n for n, d in G3.nodes(data=True) if d.get('bipartite') == 1]
189 |
190 | # Calculate common patients between every pair of value nodes across three graphs
191 | value_pairs_common_patients = []
192 | for v1 in values1:
193 | for v2 in values2:
194 | for v3 in values3:
195 | common_patients = set(G1.neighbors(v1)) & set(G2.neighbors(v2)) & set(G3.neighbors(v3))
196 | if len(common_patients) >= threshold:
197 | value_pairs_common_patients.append((v1, v2, v3, len(common_patients)))
198 |
199 | value_pairs_common_patients.sort(key=lambda x: x[3], reverse=True)
200 |
201 | # Merge nodes based on common patient connections
202 | merged_values = set()
203 | for v1, v2, v3, common_count in value_pairs_common_patients:
204 | if v1 in merged_values or v2 in merged_values or v3 in merged_values:
205 | continue # Skip if any value has already been merged
206 |
207 | merged_graph.add_node((v1, v2, v3), bipartite=1)
208 | merged_values.add(v1)
209 | merged_values.add(v2)
210 | merged_values.add(v3)
211 |
212 | # Add edges between patients and merged value nodes
213 | for patient in patients:
214 | merged_graph.add_node(patient, bipartite=0)
215 | for v1, v2, v3, _ in value_pairs_common_patients:
216 | weight1 = G1[patient][v1]['weight'] if G1.has_edge(patient, v1) else 0
217 | weight2 = G2[patient][v2]['weight'] if G2.has_edge(patient, v2) else 0
218 | weight3 = G3[patient][v3]['weight'] if G3.has_edge(patient, v3) else 0
219 | if weight1 > 0 or weight2 > 0 or weight3 > 0:
220 | if merged_graph.has_node((v1, v2, v3)):
221 | merged_graph.add_edge(patient, (v1, v2, v3), weight=weight1 + weight2 + weight3)
222 |
223 | # Add remaining unmerged value nodes to the graph
224 | for v in set(values1 + values2 + values3) - merged_values:
225 | bipartite_value = 1 if v in values1 or v in values2 or v in values3 else 0 # Determine if it's a patient or value node
226 | merged_graph.add_node(v, bipartite=bipartite_value)
227 | for patient in G1.neighbors(v) if v in values1 else (G2.neighbors(v) if v in values2 else G3.neighbors(v)):
228 | weight = G1[patient][v]['weight'] if G1.has_edge(patient, v) else (G2[patient][v]['weight'] if G2.has_edge(patient, v) else G3[patient][v]['weight'])
229 | merged_graph.add_edge(patient, v, weight=weight)
230 |
231 | return merged_graph
232 |
233 | # Function to merge two graphs by computing common patients between value nodes
234 | def merge_two_graphs_by_patient_connections(G1, G2, threshold=3):
235 | merged_graph = nx.Graph()
236 |
237 | patients = [n for n, d in G1.nodes(data=True) if d.get('bipartite') == 0]
238 | values1 = [n for n, d in G1.nodes(data=True) if d.get('bipartite') == 1]
239 | values2 = [n for n, d in G2.nodes(data=True) if d.get('bipartite') == 1]
240 |
241 | # Calculate common patients between every pair of value nodes across three graphs
242 | value_pairs_common_patients = []
243 | for v1 in values1:
244 | for v2 in values2:
245 |
246 | common_patients = set(G1.neighbors(v1)) & set(G2.neighbors(v2))
247 | if len(common_patients) >= threshold:
248 | value_pairs_common_patients.append((v1, v2, len(common_patients)))
249 |
250 | # Sort value pairs by the number of common patients (descending order)
251 | value_pairs_common_patients.sort(key=lambda x: x[3], reverse=True)
252 |
253 | # Merge nodes based on common patient connections
254 | merged_values = set()
255 | for v1, v2, common_count in value_pairs_common_patients:
256 | if v1 in merged_values or v2 in merged_values:
257 | continue # Skip if any value has already been merged
258 |
259 | merged_graph.add_node((v1, v2), bipartite=1)
260 | merged_values.add(v1)
261 | merged_values.add(v2)
262 |
263 | # Add edges between patients and merged value nodes
264 | for patient in patients:
265 | merged_graph.add_node(patient, bipartite=0)
266 | for v1, v2, _ in value_pairs_common_patients:
267 | weight1 = G1[patient][v1]['weight'] if G1.has_edge(patient, v1) else 0
268 | weight2 = G2[patient][v2]['weight'] if G2.has_edge(patient, v2) else 0
269 | if weight1 > 0 or weight2 > 0:
270 | if merged_graph.has_node((v1, v2)):
271 | merged_graph.add_edge(patient, (v1, v2), weight=weight1 + weight2)
272 |
273 | # Add remaining unmerged value nodes to the graph
274 | for v in set(values1 + values2 ) - merged_values:
275 | bipartite_value = 1 if v in values1 or v in values2 else 0 # Determine if it's a patient or value node
276 | merged_graph.add_node(v, bipartite=bipartite_value)
277 | for patient in G1.neighbors(v) if v in values1 else G2.neighbors(v):
278 | weight = G1[patient][v]['weight'] if G1.has_edge(patient, v) else G2[patient][v]['weight']
279 | merged_graph.add_edge(patient, v, weight=weight)
280 |
281 | return merged_graph
282 |
283 |
284 | def merge_and_process_graphs(graphs, group_size=2):
285 | graph_groups = group_graphs_randomly(graphs, group_size=group_size)
286 | merged_graphs = []
287 |
288 | for group in graph_groups:
289 | if len(group) == 3:
290 | g1, g2, g3 = group
291 | merged_graph = merge_three_graphs_by_patient_connections(graphs[g1], graphs[g2], graphs[g3], threshold=10)
292 | merged_graphs.append(merged_graph)
293 | elif len(group) == 2:
294 | g1, g2 = group
295 | merged_graph = merge_two_graphs_by_patient_connections(graphs[g1], graphs[g2], threshold=5)
296 | merged_graphs.append(merged_graph)
297 | else:
298 | merged_graphs.append(graphs[group[0]])
299 |
300 | return merged_graphs
301 |
302 | def list_depth(lst):
303 | if isinstance(lst, list):
304 | return 1 + max(list_depth(item) for item in lst) if lst else 1
305 | return 0
306 |
307 | def flatten_node(node):
308 | if isinstance(node, tuple):
309 | return '_'.join(map(str, node))
310 | return node
311 |
312 | def weighted_random_walk(graph, start_node, steps=5):
313 | if start_node not in graph:
314 | raise ValueError(f"The node {start_node} is not present in the graph.")
315 |
316 | current_node = start_node
317 | for _ in range(steps):
318 | neighbors = list(graph.neighbors(current_node))
319 | if not neighbors:
320 | break
321 | weights = np.array([graph[current_node][neighbor]['weight'] for neighbor in neighbors])
322 | sigmoid_weights = sigmoid(weights)
323 | probabilities = sigmoid_weights / sigmoid_weights.sum()
324 | neighbors_flat = [flatten_node(neighbor) for neighbor in neighbors]
325 | next_node_flat = np.random.choice(neighbors_flat, p=probabilities)
326 | next_node = neighbors[neighbors_flat.index(next_node_flat)]
327 | current_node = next_node
328 | yield current_node
329 |
330 | # Function to get top neighbors by weight using random walk
331 | def top_neighbors_by_weight(graph, start_node, steps=10, max_neighbors=10, num_walks=50, feature_value_count=1):
332 | neighbors_weights = Counter()
333 | for _ in range(num_walks):
334 | for neighbor in weighted_random_walk(graph, start_node, steps):
335 | neighbors_weights[neighbor] += 1
336 |
337 | sorted_neighbors = neighbors_weights.most_common(max_neighbors)
338 | top_patient_nodes_with_weights = {
339 | neighbor: neighbors_weights[neighbor] for neighbor, _ in sorted_neighbors if graph.nodes[neighbor].get('bipartite') == 0
340 | }
341 |
342 | return top_patient_nodes_with_weights
343 |
344 | def save_neighbors_to_txt(all_neighbors, file_path='house_neighbors_list.txt'):
345 | with open(file_path, 'w') as file:
346 | for round_num, neighbors in enumerate(all_neighbors, 1):
347 | file.write(f"Neighbors found in Round {round_num}:\n")
348 | for patient, neighbor_dict in neighbors.items():
349 | file.write(f"Patient {patient}: {list(neighbor_dict.keys())}\n")
350 | file.write("\n")
351 |
352 | def load_json_data(file_path):
353 | with open(file_path, 'r') as file:
354 | data = json.load(file)
355 | return data
356 |
357 | def save_json_data(data, file_path):
358 | with open(file_path, 'w') as file:
359 | json.dump(data, file, indent=4)
360 |
361 |
362 | def update_patient_records_with_neighbors(patients_data, final_neighbors, round_num):
363 | for patient in patients_data:
364 | patient_id = patient['user']
365 | if patient_id in final_neighbors:
366 | neighbor_records = []
367 | i = 1
368 | for neighbor_id in final_neighbors[patient_id].keys():
369 | neighbor_data = next((item for item in patients_data if item['user'] == neighbor_id), None)
370 | if neighbor_data:
371 | neighbor_record = neighbor_data.get('description', 'No record available')
372 | neighbor_records.append(f"Similar house records {i}: {neighbor_record}")
373 | i += 1
374 |
375 | original_record = patient.get('description', 'No original record available')
376 | neighbors_record = ' '.join(neighbor_records)
377 | updated_record = (
378 | f"the information of the similar houses for house {patient_id} are : {neighbors_record}\n"
379 | f"Only infer the missing values in house {patient_id}'s records: {original_record} "
380 | # f"Provide only the imputation results using the following succinct output format in JSON without any extra explanations: \"feature name\": \"inferred value\""
381 | )
382 | new_round_num = round_num+3
383 | patient[f'UpdatedRecords_Round_{round_num}'] = updated_record
384 |
385 | def find_neighbors_and_update_json(data, patients_data, graphs, dataset, data_path, rounds, group_size, num_neighbors):
386 | all_neighbors = [] # To store neighbors from all rounds
387 |
388 | previous_neighbors = {patient: set() for patient in data.index.tolist()}
389 | for round_num in range(1, rounds + 1):
390 | print(f"Starting round {round_num}")
391 | current_neighbors = {}
392 | merged_graphs = merge_and_process_graphs(graphs, group_size=group_size)
393 |
394 | for patient in data.index.tolist():
395 | aggregated_neighbors = Counter()
396 | for merged_graph in merged_graphs:
397 | feature_value_count = len(set(n for n, d in merged_graph.nodes(data=True) if d.get('bipartite') == 1))
398 | neighbors = top_neighbors_by_weight(merged_graph, patient, steps=2, max_neighbors=100, num_walks=100, feature_value_count=feature_value_count)
399 |
400 | for neighbor, weight in neighbors.items():
401 | aggregated_neighbors[neighbor] += weight * feature_value_count
402 | sorted_aggregated_neighbors = aggregated_neighbors.most_common(num_neighbors) # Get more neighbors to account for filtering
403 | filtered_neighbors = sorted_aggregated_neighbors[:num_neighbors]
404 |
405 | current_neighbors[patient] = {neighbor: weight for neighbor, weight in filtered_neighbors}
406 |
407 | previous_neighbors[patient].update(current_neighbors[patient].keys())
408 |
409 | all_neighbors.append(current_neighbors)
410 |
411 | update_patient_records_with_neighbors(patients_data, current_neighbors, round_num)
412 | json_file_path = os.path.join(data_path, f'{dataset}_neighbors_{round_num}.json')
413 | save_json_data(patients_data, json_file_path)
414 |
415 | return all_neighbors
416 |
417 |
418 | def neighbor_search(args):
419 | graphs = {}
420 | e = 0.001
421 | data = pd.read_csv(os.path.join(args.data_path, args.dataset, '.csv'))
422 | binned_data, bin_edges_dict = bin_continuous_features(data, num_bins=args.num_neighbors)
423 |
424 | graphs = {}
425 | for feature in binned_data.columns:
426 | if feature in bin_edges_dict:
427 | graphs[feature] = create_bipartite_graph_for_binned(binned_data, data, feature, bin_edges_dict[feature])
428 |
429 | original_file_path = os.path.join(args.data_path, f'{args.dataset}_descriptions.json') # Replace with your actual file path
430 | patients_data = load_json_data(original_file_path)
431 |
432 | all_neighbors = find_neighbors_and_update_json(data, patients_data, graphs, dataset = args.dataset, data_path=args.data_path, rounds=args.num_round, group_size=args.group_size)
433 |
434 | save_neighbors_to_txt(all_neighbors, file_path=os.path.join(args.data_path, f'{args.dataset}_neighbors.txt'))
--------------------------------------------------------------------------------
/data/house.csv:
--------------------------------------------------------------------------------
1 | crim,zn,indus,chas,nox,rm,age,dis,rad,tax,ptratio,b,lstat,medv
2 | ,0.0,18.1,0.0,0.614,,,2.1007,,,20.2,349.48,24.91,12.0
3 | 0.62739,0.0,8.14,,,5.834,56.5,,4.0,307.0,,,8.47,19.9
4 | ,35.0,,,,,,6.6407,1.0,,,362.25,7.83,19.4
5 | ,0.0,,,0.614,6.103,85.1,2.0218,24.0,,20.2,2.52,23.29,13.4
6 | ,0.0,,0.0,,5.727,,3.7965,4.0,,,,11.28,18.2
7 | ,0.0,7.38,0.0,,6.431,,5.4159,,287.0,19.6,,,24.6
8 | ,0.0,,0.0,,,34.5,,,,20.2,396.9,8.01,21.1
9 | 0.02055,,,0.0,0.41,6.383,35.7,,,313.0,17.3,396.9,,24.7
10 | 15.1772,,18.1,,,6.152,100.0,1.9142,,,,9.32,,8.7
11 | ,0.0,18.1,,0.597,6.852,100.0,,,666.0,20.2,179.36,,27.5
12 | 0.03738,0.0,5.19,,0.515,,38.5,,5.0,224.0,20.2,389.4,6.75,20.7
13 | 0.06888,0.0,,0.0,0.488,6.144,62.2,2.5979,3.0,,,396.9,,36.2
14 | 0.41238,,,0.0,0.504,7.163,79.9,3.2157,,,,372.08,6.36,31.6
15 | ,0.0,,0.0,0.713,,95.0,2.2222,24.0,,20.2,,15.17,11.7
16 | 0.06588,0.0,2.46,,0.488,7.765,83.3,,3.0,193.0,17.8,,7.56,39.8
17 | ,0.0,8.14,,0.538,5.599,,4.4546,4.0,307.0,21.0,,16.51,13.9
18 | 0.17331,0.0,9.69,,,5.707,,2.3817,6.0,,,396.9,12.01,21.8
19 | 0.08244,,,0.0,0.428,,18.5,6.1899,,300.0,,379.41,,23.7
20 | ,,,,0.431,,,,7.0,,19.1,,12.5,17.6
21 | ,,5.86,,0.431,6.487,13.0,7.3967,7.0,330.0,19.1,396.28,5.9,24.4
22 | 73.5341,0.0,18.1,0.0,,5.957,,1.8026,,666.0,20.2,,,8.8
23 | 0.15098,0.0,,,,,82.6,2.7474,6.0,432.0,17.8,394.51,10.3,19.2
24 | ,,6.91,0.0,0.448,,,,3.0,,17.9,,5.81,25.3
25 | 0.35114,0.0,7.38,0.0,0.493,,,4.7211,5.0,,19.6,396.9,7.7,20.4
26 | ,85.0,4.15,,0.429,6.516,27.7,,4.0,,,392.43,,23.1
27 | 0.09103,0.0,,0.0,0.488,7.155,92.2,2.7006,,,17.8,394.12,4.82,37.9
28 | ,0.0,19.58,,0.871,,82.6,1.7455,5.0,403.0,,88.01,15.02,15.6
29 | ,20.0,3.33,,0.4429,7.82,,,,216.0,,387.31,3.76,45.4
30 | 0.38735,0.0,,0.0,0.581,5.613,95.6,,,188.0,19.1,359.29,,15.7
31 | ,0.0,3.24,0.0,0.46,,,,4.0,,16.9,,,22.6
32 | 1.35472,,8.14,,,6.072,100.0,,4.0,307.0,21.0,,13.04,14.5
33 | ,0.0,,,,6.092,95.4,2.548,,432.0,,396.9,,18.7
34 | ,,,0.0,,5.186,,1.5296,,403.0,14.7,,28.32,17.8
35 | 6.44405,,18.1,,0.584,6.425,,,24.0,,20.2,97.95,12.03,16.1
36 | 0.03306,,5.19,0.0,0.515,6.059,,,,,20.2,,,20.6
37 | ,,1.32,,0.411,6.816,,8.3248,,,15.1,392.9,,31.6
38 | 0.01439,,2.93,0.0,,,18.8,6.2196,1.0,,,,,29.1
39 | ,0.0,8.14,,,5.924,94.1,,4.0,,21.0,394.33,,15.6
40 | 0.7842,,,,0.538,5.99,,4.2579,4.0,307.0,21.0,386.75,,17.5
41 | ,,,0.0,0.4,6.345,,7.8278,,358.0,14.8,368.24,,22.5
42 | 0.04379,80.0,,,0.398,5.787,31.1,,4.0,337.0,,396.9,10.24,19.4
43 | 0.37578,,10.59,1.0,,5.404,88.6,3.665,,,,395.24,23.98,19.3
44 | ,0.0,,,0.693,5.531,,1.6074,,,,329.46,27.38,8.5
45 | ,28.0,15.04,,0.464,,77.3,3.615,,,,,10.59,20.6
46 | ,,19.58,1.0,0.871,6.129,,1.7494,,,14.7,321.02,15.12,17.0
47 | ,0.0,,0.0,,6.406,,,24.0,666.0,,,19.52,17.1
48 | ,0.0,8.14,0.0,,,100.0,4.0952,4.0,,21.0,394.54,19.88,14.5
49 | 0.52693,0.0,,0.0,,,83.0,,8.0,,,382.0,,50.0
50 | ,0.0,18.1,0.0,,6.436,87.9,2.3158,,666.0,20.2,100.19,16.22,14.3
51 | ,,18.1,0.0,0.74,,96.6,2.198,24.0,666.0,,,16.44,12.6
52 | 0.02985,0.0,,,0.458,6.43,,6.0622,3.0,,,394.12,5.21,28.7
53 | 0.13158,,,,0.547,,72.5,2.7301,,432.0,,,12.04,21.2
54 | ,0.0,6.91,,0.448,5.682,33.8,5.1004,,,,396.9,10.21,19.3
55 | ,0.0,,,0.538,,29.3,4.4986,,307.0,21.0,386.85,,23.1
56 | 15.5757,,18.1,0.0,0.58,5.926,71.0,2.9084,24.0,666.0,20.2,368.74,,19.1
57 | 4.54192,0.0,18.1,0.0,0.77,,,2.5182,,666.0,,,,25.0
58 | ,0.0,2.18,0.0,0.458,,45.8,,3.0,,18.7,394.63,,33.4
59 | 67.9208,,18.1,0.0,0.693,5.683,,1.4254,,666.0,20.2,384.97,22.98,5.0
60 | 0.06047,,2.46,0.0,0.488,6.153,68.8,3.2797,3.0,193.0,17.8,,13.15,29.6
61 | 0.14932,,5.13,0.0,0.453,5.741,,,8.0,,19.7,,,18.7
62 | ,,,,0.52,6.195,54.4,,5.0,384.0,20.9,393.49,,21.7
63 | 0.18159,,7.38,,,6.376,54.3,4.5404,,,,,,23.1
64 | 0.76162,20.0,,,0.647,,,1.9865,5.0,264.0,13.0,392.4,,22.8
65 | 1.00245,0.0,8.14,0.0,,6.674,87.3,,4.0,307.0,21.0,380.23,,21.0
66 | 0.52014,,,0.0,,,91.5,,5.0,264.0,13.0,386.86,5.91,48.8
67 | 10.233,0.0,18.1,0.0,0.614,6.185,,2.1705,,,20.2,379.7,18.03,14.6
68 | 0.67191,0.0,8.14,,0.538,,90.3,,,,21.0,376.88,14.81,16.6
69 | 0.14455,12.5,7.87,,0.524,,96.1,,5.0,311.0,,396.9,,27.1
70 | 0.11132,,27.74,,,,,2.1099,4.0,711.0,20.1,396.9,13.35,20.1
71 | 0.12802,,8.56,,0.52,,97.1,,5.0,384.0,20.9,395.24,,19.8
72 | 0.08014,,5.96,0.0,,5.85,,3.9342,5.0,279.0,19.2,,8.77,21.0
73 | ,,19.58,0.0,0.605,6.943,,1.8773,5.0,403.0,,363.43,,41.3
74 | 3.56868,0.0,18.1,,0.58,6.437,75.0,,24.0,666.0,,393.37,14.36,23.2
75 | ,0.0,,0.0,,,73.1,2.4775,6.0,432.0,17.8,338.63,,20.4
76 | ,,10.01,0.0,0.547,,,2.2565,6.0,432.0,17.8,388.74,10.45,18.5
77 | 0.06664,0.0,,,0.51,6.546,,3.1323,,,16.6,,5.33,29.4
78 | ,45.0,3.44,,,7.178,,,5.0,,15.2,,2.87,36.4
79 | ,,6.96,0.0,,,58.7,3.9175,,,,,7.73,24.4
80 | ,,19.58,,,,,1.3459,,,,,29.29,11.8
81 | ,0.0,18.1,,,4.906,100.0,1.1742,24.0,,,,,13.8
82 | 7.99248,,,0.0,0.7,5.52,,1.5331,24.0,666.0,20.2,,24.56,12.3
83 | ,0.0,18.1,1.0,0.77,,,,24.0,666.0,,377.73,,17.8
84 | ,,,,,6.826,27.6,,,,17.6,393.45,4.16,33.1
85 | 0.35809,,,1.0,0.507,,,,8.0,307.0,17.4,391.7,9.71,26.7
86 | 6.71772,0.0,18.1,0.0,0.713,,92.6,,,,,0.32,17.44,13.4
87 | ,,21.89,,0.624,,,1.4394,4.0,437.0,,396.9,34.41,14.4
88 | ,0.0,18.1,,0.631,6.683,96.8,1.3567,24.0,666.0,20.2,,3.73,50.0
89 | 0.05789,,,0.0,0.409,5.878,21.4,6.498,4.0,345.0,,,,22.0
90 | 3.83684,0.0,18.1,,0.77,6.251,,2.2955,24.0,666.0,,,,19.9
91 | ,0.0,19.58,,0.605,6.319,96.1,2.1,,,,297.09,11.1,23.8
92 | 0.17783,,,0.0,0.585,5.569,73.5,2.3999,6.0,,19.2,,,17.5
93 | 13.3598,0.0,18.1,0.0,0.693,5.887,,1.7821,24.0,666.0,20.2,396.9,16.35,12.7
94 | ,0.0,18.1,0.0,0.693,5.987,,,24.0,666.0,,,26.77,5.6
95 | 0.02187,60.0,2.93,0.0,,,,6.2196,1.0,265.0,15.6,393.37,5.03,31.1
96 | ,22.0,5.86,,0.431,6.718,,7.8265,,330.0,,,6.56,26.2
97 | 0.26363,,8.56,0.0,,,91.2,2.5451,,,20.9,391.23,15.55,19.4
98 | ,0.0,18.1,0.0,0.718,,,1.8589,,666.0,,,15.02,16.7
99 | ,0.0,,,0.871,,,1.4191,,403.0,,,,13.8
100 | 0.04203,,,0.0,,6.442,,,4.0,270.0,18.2,395.01,8.16,22.9
101 | 1.12658,0.0,19.58,,,,,1.6102,,,14.7,343.28,12.12,15.3
102 | 0.62356,0.0,6.2,,0.507,6.879,,3.2721,,,,,9.93,27.5
103 | ,,2.18,,,7.236,41.1,,7.0,222.0,,393.68,6.93,36.1
104 | ,,4.86,0.0,0.426,6.167,46.7,,,281.0,,,7.51,22.9
105 | 0.16439,22.0,5.86,,,,,7.8265,7.0,,,,,24.5
106 | ,0.0,19.58,0.0,,,93.0,,5.0,403.0,,,9.81,25.0
107 | 1.51902,0.0,19.58,1.0,,8.375,93.9,2.162,5.0,403.0,14.7,,,50.0
108 | 0.0315,95.0,1.47,0.0,,,15.3,7.6534,3.0,402.0,17.0,396.9,,34.9
109 | 0.46296,0.0,6.2,0.0,0.504,,76.9,3.6715,8.0,,17.4,,,31.7
110 | 0.07896,0.0,12.83,0.0,0.437,,,4.2515,,398.0,,394.92,6.78,24.1
111 | 0.79041,,9.9,,,6.122,,2.6403,4.0,304.0,18.4,,5.98,22.1
112 | ,0.0,,,0.713,,,2.4358,24.0,666.0,20.2,50.92,,14.1
113 | 0.36894,22.0,5.86,0.0,,,8.4,8.9067,7.0,,,396.9,3.54,42.8
114 | 0.14476,0.0,10.01,,0.547,5.731,65.2,2.7592,6.0,,17.8,391.5,,19.3
115 | 0.00906,90.0,,,0.4,7.088,20.8,,,285.0,15.3,394.72,7.85,32.2
116 | 0.09266,,6.09,0.0,0.433,,,5.4917,7.0,329.0,16.1,,,26.4
117 | 2.81838,0.0,18.1,,0.532,5.762,,,24.0,666.0,20.2,392.92,,21.8
118 | 3.8497,,18.1,1.0,0.77,,,2.5052,24.0,,20.2,391.34,13.27,21.7
119 | ,0.0,,,0.693,5.349,96.0,,24.0,666.0,,396.9,19.77,8.3
120 | ,0.0,,0.0,0.504,,,3.3751,,307.0,,377.51,3.92,46.7
121 | 0.53412,,,0.0,0.647,7.52,89.4,,5.0,264.0,,388.37,7.26,43.1
122 | 0.51183,0.0,6.2,,,,,,,307.0,,,4.73,31.5
123 | ,,18.1,0.0,0.7,,100.0,1.4672,,666.0,20.2,396.9,,10.5
124 | ,,,0.0,0.614,6.484,93.6,2.3053,24.0,,20.2,396.21,18.68,16.7
125 | 0.09744,0.0,5.96,,0.499,5.841,,3.3779,,279.0,19.2,377.56,,20.0
126 | 0.04011,,1.52,,0.404,7.287,,,,329.0,,396.9,4.08,33.3
127 | 0.54452,0.0,21.89,,0.624,,,1.6687,,437.0,,396.9,,17.8
128 | 4.89822,0.0,,0.0,,,,,24.0,,20.2,375.52,3.26,50.0
129 | 0.19657,22.0,5.86,,0.431,6.226,79.2,,,330.0,19.1,376.14,10.15,20.5
130 | 0.03871,,5.32,0.0,0.405,,,7.3172,6.0,,16.6,396.9,,23.2
131 | ,0.0,,0.0,0.671,6.38,,1.3861,,,20.2,396.9,,13.1
132 | ,25.0,5.13,,,5.927,,6.932,8.0,284.0,,396.9,,19.6
133 | ,0.0,10.01,,0.547,6.715,81.6,2.6775,6.0,432.0,,395.59,10.16,22.8
134 | 0.05302,0.0,3.41,0.0,0.489,7.079,,3.4145,2.0,270.0,17.8,,,28.7
135 | ,,3.97,0.0,,7.014,84.6,2.1329,,264.0,13.0,384.07,14.79,30.7
136 | 0.08829,,7.87,0.0,,,66.6,5.5605,,311.0,15.2,,,22.9
137 | ,0.0,,1.0,0.718,,82.9,1.9047,24.0,666.0,,,5.29,21.9
138 | ,,,,,,,2.1675,,,,,,23.9
139 | 0.01301,35.0,1.52,0.0,,,49.3,,1.0,284.0,15.5,394.74,,32.7
140 | 1.34284,,19.58,0.0,0.605,6.066,100.0,,5.0,403.0,,353.89,,24.3
141 | ,0.0,19.58,0.0,,6.122,,1.618,,403.0,,,,21.5
142 | ,,4.05,0.0,0.51,6.315,73.4,3.3175,5.0,296.0,16.6,395.6,6.29,24.6
143 | 7.67202,0.0,18.1,0.0,0.693,5.747,98.9,1.6334,24.0,666.0,20.2,393.1,19.92,8.5
144 | 0.08308,0.0,2.46,0.0,0.488,5.604,,,,,,,13.98,26.4
145 | 0.40202,0.0,9.9,0.0,,,,3.5325,,304.0,,395.21,,23.1
146 | 0.22489,,,0.0,0.524,,94.3,,,,15.2,392.52,,15.0
147 | ,,18.1,0.0,0.7,4.368,91.2,1.4395,,,20.2,,30.63,8.8
148 | 0.21161,,,,,6.137,87.4,2.7147,5.0,384.0,20.9,,13.44,19.3
149 | 0.04462,,,0.0,0.426,,70.4,5.4007,,281.0,,,7.22,23.9
150 | 0.17505,,5.96,0.0,0.499,5.966,,3.8473,5.0,,19.2,,10.13,24.7
151 | 0.24522,,9.9,0.0,0.544,5.782,71.7,,4.0,304.0,18.4,396.9,,19.8
152 | 1.80028,,19.58,0.0,,,79.2,2.4259,,403.0,,227.61,12.14,23.8
153 | 6.39312,,18.1,,0.584,6.162,97.4,2.206,24.0,666.0,20.2,302.76,24.1,13.3
154 | 0.05561,70.0,2.24,0.0,,,,7.8278,5.0,358.0,,,4.74,29.0
155 | 0.05372,,13.92,0.0,0.437,,,,4.0,,,,7.39,27.1
156 | ,,1.52,,,,38.3,,2.0,,,392.2,,34.6
157 | ,0.0,18.1,0.0,0.671,6.794,98.8,,24.0,666.0,,396.9,21.24,13.3
158 | ,0.0,,0.0,,5.628,,1.5166,5.0,,14.7,169.27,16.65,15.6
159 | ,0.0,18.1,,0.693,6.405,96.0,1.6768,24.0,,20.2,,19.37,12.5
160 | ,,,,0.871,4.926,95.7,1.4608,,403.0,,,29.53,14.6
161 | 7.36711,,18.1,0.0,0.679,,,1.9356,,,20.2,96.73,21.52,11.0
162 | ,52.5,5.32,,0.405,,,7.3172,,,16.6,371.72,,24.8
163 | 0.15038,,25.65,0.0,0.581,,,1.9444,,,19.1,370.31,25.41,17.3
164 | 0.20746,0.0,27.74,0.0,,,98.0,1.8226,,,20.1,,,8.1
165 | ,0.0,2.89,0.0,0.445,,,3.4952,2.0,276.0,,,11.34,21.4
166 | 4.0974,0.0,,0.0,0.871,,,1.4118,5.0,,14.7,,26.42,15.6
167 | 0.09252,30.0,4.93,0.0,0.428,6.606,42.2,,,,16.6,383.78,7.37,23.3
168 | ,40.0,,,0.447,6.854,42.8,4.2673,4.0,254.0,,396.9,2.98,32.0
169 | 0.12083,,2.89,0.0,0.445,8.069,76.0,3.4952,,276.0,,,4.21,38.7
170 | 0.01709,,2.02,0.0,,,36.1,,5.0,,17.0,384.46,4.5,30.1
171 | 0.09299,,25.65,0.0,,,92.9,2.0869,2.0,,19.1,,,20.5
172 | ,0.0,,0.0,,,95.6,2.847,,,17.8,396.9,5.68,32.5
173 | 0.02177,,2.03,0.0,0.415,7.61,15.7,,,348.0,14.7,,3.11,42.3
174 | 0.33983,,,0.0,,6.108,,8.0555,7.0,,19.1,390.18,9.16,24.3
175 | 2.37857,0.0,18.1,,0.583,5.871,,3.724,24.0,,20.2,370.73,,20.6
176 | ,34.0,6.09,0.0,0.433,6.59,,,7.0,,,395.75,9.5,22.0
177 | ,,1.91,0.0,,,,,,334.0,22.0,,8.05,18.2
178 | ,0.0,,,,5.757,,1.413,,666.0,,2.6,10.11,15.0
179 | 9.91655,,,0.0,,5.852,77.8,,24.0,666.0,20.2,,29.97,6.3
180 | 0.01965,80.0,1.76,0.0,,6.23,31.5,9.0892,1.0,241.0,18.2,341.6,,20.1
181 | 0.16902,0.0,,0.0,,5.986,,,2.0,188.0,,385.02,14.81,21.4
182 | 0.05479,33.0,,0.0,0.472,6.616,58.1,,7.0,,18.4,,,28.4
183 | 0.6147,,6.2,0.0,0.507,,80.8,3.2721,8.0,307.0,17.4,396.9,7.6,30.1
184 | 12.0482,,18.1,0.0,,,87.6,1.9512,,666.0,20.2,,14.1,20.8
185 | 0.11425,0.0,,1.0,,,92.4,3.3633,5.0,276.0,16.4,393.74,10.5,23.0
186 | 0.88125,0.0,21.89,,,5.637,94.7,,4.0,,21.2,396.9,,14.3
187 | 8.79212,0.0,,0.0,0.584,5.565,70.6,2.0635,24.0,666.0,20.2,3.65,17.16,11.7
188 | ,,,0.0,,7.148,27.7,,4.0,245.0,19.2,396.9,3.56,37.3
189 | 0.05023,35.0,6.06,0.0,,5.706,28.4,6.6407,,304.0,16.9,394.02,12.43,17.1
190 | 88.9762,0.0,18.1,,0.671,6.968,,,,666.0,20.2,,17.21,10.4
191 | 5.82401,0.0,18.1,,,6.242,64.7,3.4242,,,,396.9,10.74,23.0
192 | 5.20177,,,1.0,0.77,6.127,,2.7227,24.0,666.0,,395.43,,22.7
193 | 0.14103,0.0,13.92,0.0,,,58.0,6.32,4.0,289.0,,,,20.3
194 | ,0.0,13.92,0.0,0.437,6.009,42.3,5.5027,4.0,,16.0,,10.4,21.7
195 | 6.53876,,,1.0,0.631,7.016,,1.2024,24.0,666.0,20.2,392.05,,50.0
196 | 13.6781,,,0.0,,,,1.8206,24.0,666.0,,,34.02,8.4
197 | 0.12329,0.0,10.01,,0.547,5.913,92.9,2.3534,6.0,432.0,,394.95,,18.8
198 | 0.0578,0.0,,0.0,0.488,6.98,58.4,,3.0,193.0,17.8,396.9,5.04,37.2
199 | 2.63548,,9.9,0.0,0.544,,37.8,,4.0,304.0,,,,16.1
200 | 0.02498,0.0,1.89,0.0,,6.54,59.7,,1.0,422.0,,389.96,,16.5
201 | ,,,0.0,,6.316,38.1,6.4584,5.0,224.0,20.2,389.71,,22.2
202 | ,0.0,,0.0,0.583,,53.2,,,666.0,,,11.45,20.6
203 | 8.20058,0.0,18.1,0.0,0.713,5.936,80.3,2.7792,,666.0,20.2,,,13.5
204 | ,0.0,6.2,0.0,,8.247,70.4,,,,17.4,,,48.3
205 | 0.3692,,9.9,0.0,,6.567,87.3,,4.0,304.0,,,,23.8
206 | 2.24236,,,,,5.854,,2.422,5.0,403.0,,395.11,11.64,22.7
207 | 0.32264,0.0,21.89,0.0,,5.942,,1.9669,,437.0,21.2,,,17.4
208 | 0.04666,,,,,7.107,36.6,7.309,,,12.6,354.31,,30.3
209 | 0.66351,20.0,,0.0,0.647,7.333,,1.8946,5.0,,13.0,383.29,,36.0
210 | ,,,0.0,,,73.3,3.8384,8.0,,,385.91,,41.7
211 | 0.17134,0.0,10.01,0.0,,5.928,88.2,,6.0,,17.8,,15.76,18.3
212 | 0.06899,0.0,25.65,,0.581,5.87,69.7,2.2577,2.0,188.0,,,,22.0
213 | 0.07244,,1.69,,0.411,,,10.7103,,,,392.33,7.79,18.6
214 | 0.31533,0.0,6.2,,0.504,8.266,,2.8944,,307.0,17.4,,4.14,44.8
215 | 20.7162,,,0.0,0.659,4.138,100.0,,24.0,,20.2,,,11.9
216 | 0.06151,0.0,5.19,,,5.968,58.5,,,,20.2,396.9,,18.7
217 | 0.25915,0.0,,0.0,0.624,5.693,,1.7883,,,21.2,,17.19,16.2
218 | 0.01096,55.0,2.25,,0.389,,,,1.0,300.0,15.3,394.72,8.23,22.0
219 | 18.0846,0.0,18.1,,,6.434,100.0,,24.0,666.0,,27.25,,7.2
220 | ,0.0,8.56,0.0,0.52,6.127,85.2,,5.0,384.0,20.9,,,20.4
221 | ,0.0,18.1,0.0,,4.138,100.0,,,666.0,20.2,396.9,37.97,13.8
222 | ,0.0,,,0.713,,98.3,,24.0,,,304.21,19.31,13.0
223 | 0.32982,0.0,21.89,0.0,0.624,,95.4,2.4699,,437.0,,388.69,,18.4
224 | 13.5222,0.0,,0.0,0.631,,100.0,1.5106,24.0,,20.2,,,23.1
225 | ,0.0,,0.0,,,40.0,,3.0,233.0,17.9,389.39,9.55,21.2
226 | ,0.0,9.69,0.0,,,,2.7986,,391.0,,393.29,17.6,23.1
227 | 0.03584,80.0,3.37,0.0,0.398,,17.8,6.6115,4.0,,,,4.67,23.5
228 | ,90.0,,1.0,0.401,7.923,,5.885,1.0,198.0,13.6,,3.16,50.0
229 | 0.05735,,4.49,0.0,,6.63,56.1,4.4377,3.0,247.0,18.5,,6.53,26.6
230 | 0.1029,30.0,,,0.428,,,7.0355,6.0,300.0,16.6,372.75,11.22,22.2
231 | ,,,,0.488,7.831,53.6,,3.0,193.0,17.8,392.63,4.45,50.0
232 | ,,18.1,,,,,1.9096,,666.0,20.2,,24.39,8.3
233 | ,,19.58,,,6.51,100.0,1.7659,5.0,403.0,,,,23.3
234 | ,12.5,,,,5.889,,,5.0,,,390.5,,21.7
235 | 0.06417,,,0.0,0.499,,68.2,,,279.0,19.2,,,18.9
236 | 0.77299,0.0,,0.0,0.538,6.495,,,4.0,307.0,21.0,,,18.4
237 | 1.20742,0.0,19.58,,0.605,5.875,94.6,,5.0,,14.7,292.29,14.43,17.4
238 | 3.32105,0.0,19.58,1.0,,5.403,,1.3216,,403.0,14.7,,26.82,13.4
239 | ,,,,0.693,6.404,100.0,,,666.0,20.2,,20.31,12.1
240 | ,,,0.0,0.429,6.939,34.5,8.7921,1.0,,,,,26.6
241 | 0.40771,,,1.0,,6.164,,3.048,,307.0,17.4,395.24,21.46,21.7
242 | ,,,,,,57.8,3.4952,2.0,,,357.98,,28.4
243 | 0.04337,21.0,5.64,0.0,0.439,,63.0,,,243.0,,393.97,9.43,20.5
244 | ,30.0,4.93,,0.428,6.897,,6.3361,6.0,300.0,16.6,391.25,,22.0
245 | 15.288,,18.1,0.0,0.671,6.649,,1.3449,,,20.2,,23.24,13.9
246 | 9.18702,,18.1,0.0,,,,1.5804,,666.0,,396.9,,11.3
247 | 0.06642,,4.05,0.0,0.51,,74.4,2.9153,5.0,296.0,16.6,,6.92,29.9
248 | ,0.0,,0.0,0.448,,2.9,,3.0,233.0,17.9,,,26.6
249 | 22.0511,,,0.0,0.74,,,1.8662,24.0,666.0,20.2,391.45,,10.5
250 | 5.29305,0.0,18.1,0.0,,,82.5,,24.0,666.0,20.2,,18.76,23.2
251 | 0.22969,0.0,10.59,0.0,,6.326,52.5,4.3549,,277.0,18.6,,10.97,24.4
252 | 0.06129,,3.33,1.0,,7.645,49.7,,5.0,216.0,14.9,377.07,3.01,46.0
253 | 0.04819,,3.64,0.0,,6.108,32.0,,1.0,,16.4,,,21.9
254 | ,,,0.0,,,90.8,1.8195,24.0,,,21.57,,7.5
255 | 0.06905,0.0,,0.0,,7.147,,6.0622,3.0,222.0,,,5.33,36.2
256 | 0.01538,90.0,3.75,0.0,,7.454,34.2,6.3361,3.0,,,,3.11,44.0
257 | 8.24809,0.0,18.1,,0.713,,99.3,2.4527,,666.0,20.2,375.87,16.74,17.8
258 | 0.14866,0.0,8.56,0.0,0.52,6.727,,,,,20.9,,9.42,27.5
259 | ,,6.2,0.0,,8.04,86.5,3.2157,8.0,,17.4,,,37.6
260 | ,0.0,,,,6.833,94.3,,24.0,,20.2,,,14.1
261 | 0.14052,,10.59,0.0,0.489,,,3.9454,4.0,277.0,18.6,385.81,,28.1
262 | 12.2472,0.0,,,0.584,5.837,59.7,1.9976,,,,24.65,15.69,10.2
263 | ,,19.58,,,5.88,97.3,2.3887,5.0,403.0,,348.13,,19.1
264 | ,0.0,,,,7.82,36.9,3.4952,,,,,3.57,43.8
265 | 0.03615,80.0,4.95,0.0,0.411,6.63,,5.1167,,245.0,,396.9,4.7,27.9
266 | ,0.0,10.59,,,,,3.9454,,,18.6,393.63,,25.0
267 | ,25.0,,,0.453,5.966,93.4,,8.0,,,378.08,,16.0
268 | ,0.0,6.91,0.0,0.448,,,,,,17.9,392.74,18.8,16.6
269 | 1.38799,0.0,,0.0,0.538,5.95,,3.99,,307.0,,232.6,,13.2
270 | 0.57834,,3.97,0.0,0.575,8.297,67.0,,5.0,,,,7.44,50.0
271 | 0.24103,0.0,7.38,,0.493,6.083,43.7,,5.0,287.0,,396.9,12.79,22.2
272 | ,95.0,1.47,0.0,,7.135,,,,402.0,17.0,384.3,4.45,32.9
273 | ,,,,,6.655,98.2,2.3552,24.0,,,355.29,,15.2
274 | 0.95577,,8.14,0.0,,,,,,307.0,,306.38,17.28,14.8
275 | 8.64476,,18.1,,,,,1.7912,24.0,666.0,20.2,396.9,15.17,13.8
276 | 0.537,0.0,6.2,0.0,,5.981,,,8.0,307.0,17.4,,11.65,24.3
277 | ,20.0,,0.0,0.647,,,2.1121,,,13.0,392.8,9.59,33.8
278 | 0.0459,,5.32,0.0,,,45.6,7.3172,6.0,293.0,,396.9,7.6,22.3
279 | ,0.0,19.58,1.0,0.605,,,2.0407,5.0,403.0,,,1.92,50.0
280 | 9.33889,0.0,,,0.679,,95.6,1.9682,,,20.2,,,9.5
281 | ,,,,,5.857,98.2,1.6686,,,,,21.32,13.3
282 | 0.11027,25.0,,,0.453,,67.8,,,284.0,19.7,396.9,,22.2
283 | ,,,,,,,,4.0,,,394.67,16.96,18.1
284 | ,0.0,21.89,0.0,0.624,6.431,98.8,,4.0,,21.2,,,18.0
285 | 5.73116,,,,0.532,,77.0,3.4106,,666.0,20.2,395.28,7.01,25.0
286 | 0.21124,12.5,7.87,0.0,0.524,5.631,,,,,15.2,386.63,29.93,16.5
287 | ,0.0,,0.0,,,28.9,5.4159,,287.0,,396.9,,23.0
288 | ,,,0.0,0.58,,,,24.0,666.0,,396.9,14.76,20.1
289 | 0.01951,17.5,1.38,,,,59.5,9.2229,,,,,8.05,33.0
290 | 0.04417,,2.24,,,,47.4,,,358.0,,,6.07,24.8
291 | ,0.0,8.14,0.0,0.538,6.096,84.5,,4.0,,21.0,,,18.2
292 | ,0.0,,0.0,0.871,5.272,94.0,,,,14.7,88.63,16.14,13.1
293 | 0.03359,75.0,2.95,0.0,,,15.8,5.4011,3.0,,,,1.98,34.9
294 | 17.8667,0.0,,0.0,,,100.0,1.3861,24.0,,20.2,393.74,21.78,10.2
295 | ,0.0,18.1,,0.655,,48.2,,24.0,666.0,20.2,334.4,14.13,19.9
296 | ,0.0,,0.0,0.659,5.608,100.0,,24.0,666.0,20.2,,12.13,27.9
297 | 0.0456,0.0,13.89,,0.55,5.888,,,,276.0,16.4,392.8,,23.3
298 | 0.21038,,3.33,0.0,0.4429,,,4.1007,,216.0,14.9,396.9,,35.1
299 | 9.39063,,18.1,,,5.627,93.9,,,,,396.9,22.88,12.8
300 | 0.10959,,,0.0,0.573,6.794,,2.3889,1.0,273.0,21.0,393.45,6.48,22.0
301 | 0.03041,0.0,,0.0,,5.895,59.6,,,224.0,,,,18.5
302 | ,,6.2,1.0,0.507,,76.5,4.148,8.0,,,388.45,,25.1
303 | 0.25199,0.0,,,,,,,4.0,277.0,,389.43,18.06,22.5
304 | 0.21719,0.0,10.59,1.0,0.489,5.807,53.8,,4.0,277.0,,390.94,,22.4
305 | 0.12932,0.0,13.92,0.0,,,,,4.0,,16.0,396.9,,28.6
306 | 6.65492,,,,,6.317,83.0,,,666.0,20.2,396.9,,19.5
307 | 0.21409,,5.86,0.0,,6.438,8.9,,7.0,330.0,19.1,377.07,3.59,24.8
308 | ,0.0,9.69,0.0,0.585,5.926,42.6,2.3817,,391.0,,,13.59,24.5
309 | 7.83932,0.0,18.1,0.0,,,65.4,2.9634,24.0,666.0,20.2,396.9,13.22,21.4
310 | 0.1,,,,0.433,,17.7,5.4917,7.0,,16.1,390.43,4.86,33.1
311 | ,40.0,1.25,0.0,0.429,6.49,44.4,,,335.0,19.7,396.9,5.98,22.9
312 | 0.09065,,6.96,,0.464,5.92,61.5,3.9175,,223.0,18.6,391.34,13.65,20.7
313 | ,,2.03,,,6.162,38.4,,2.0,348.0,,,7.43,24.1
314 | 1.46336,0.0,19.58,,,7.489,,1.9709,,,14.7,374.43,1.73,50.0
315 | ,0.0,,0.0,0.448,,,,,233.0,17.9,394.46,7.44,24.7
316 | 0.07013,0.0,,0.0,0.55,,,,5.0,276.0,,392.78,9.69,28.7
317 | ,0.0,,,,6.343,100.0,,,,,396.9,20.32,7.2
318 | ,45.0,,0.0,0.437,,,,5.0,398.0,15.2,,5.1,37.0
319 | 0.3494,,,0.0,0.544,5.972,76.7,3.1025,4.0,304.0,18.4,,,20.3
320 | ,,,0.0,0.647,,100.0,,5.0,,13.0,391.93,,30.1
321 | ,,,0.0,,,96.7,,5.0,384.0,20.9,,,19.5
322 | 0.04981,,5.64,,0.439,,21.4,6.8147,,243.0,,396.9,,23.4
323 | 8.15174,0.0,18.1,,0.7,5.39,98.9,1.7281,24.0,666.0,20.2,,20.85,11.5
324 | 0.02731,,7.07,,0.469,6.421,,4.9671,,242.0,17.8,396.9,9.14,21.6
325 | ,0.0,18.1,0.0,,6.341,96.4,2.072,,666.0,,,17.79,14.9
326 | 0.15086,0.0,27.74,0.0,,5.454,92.7,,,711.0,20.1,395.09,18.06,15.2
327 | 0.21977,,,0.0,,5.602,62.0,6.0877,3.0,233.0,17.9,396.9,,19.4
328 | ,0.0,,,,6.824,76.5,,,666.0,20.2,48.45,22.74,8.4
329 | 0.04113,25.0,,0.0,,6.727,33.5,5.4007,4.0,281.0,19.0,,5.29,28.0
330 | 0.13642,0.0,10.59,,0.489,5.891,,,,277.0,,,,22.6
331 | 1.61282,0.0,8.14,0.0,,6.096,96.9,,4.0,,21.0,248.31,20.34,13.5
332 | ,0.0,18.1,,0.584,6.348,,,24.0,666.0,,,,14.5
333 | 0.82526,20.0,3.97,0.0,,7.327,94.5,,5.0,264.0,,,,31.0
334 | ,0.0,,0.0,0.679,6.202,,1.8629,24.0,666.0,20.2,18.82,,10.9
335 | 3.69695,0.0,18.1,,,,,1.7523,24.0,666.0,,316.03,14.0,21.9
336 | ,,,0.0,0.489,6.405,73.9,3.0921,,270.0,17.8,393.55,8.2,22.0
337 | 0.05497,,5.19,,,,45.4,4.8122,5.0,224.0,20.2,396.9,9.74,19.0
338 | 14.3337,0.0,18.1,,,6.229,88.0,1.9512,24.0,666.0,20.2,383.32,,21.4
339 | 0.0536,21.0,,,0.439,6.511,21.1,6.8147,4.0,,16.8,,5.28,25.0
340 | 0.03113,,,,,6.014,48.5,8.0136,3.0,352.0,18.8,385.64,10.53,17.5
341 | ,,3.97,0.0,0.647,7.206,91.6,1.9301,5.0,264.0,13.0,,,36.5
342 | ,,,,,6.095,65.1,,,,16.6,394.62,,20.1
343 | 0.62976,,8.14,0.0,,,,,,307.0,21.0,396.9,8.26,20.4
344 | ,0.0,,,0.544,,77.7,3.945,4.0,,18.4,396.42,,16.2
345 | ,0.0,3.41,,0.489,7.007,,3.4217,2.0,,17.8,,5.5,23.6
346 | ,0.0,,0.0,0.7,,89.5,1.5184,24.0,666.0,,396.9,31.99,7.4
347 | ,20.0,6.96,1.0,0.464,,,4.3665,,223.0,18.6,,,35.2
348 | 2.01019,0.0,19.58,,0.605,7.929,96.2,,5.0,403.0,,369.3,,50.0
349 | ,0.0,,,0.46,,,,4.0,,16.9,,,19.3
350 | ,0.0,,0.0,0.585,6.019,65.3,2.4091,6.0,,19.2,396.9,12.92,21.2
351 | 0.97617,0.0,21.89,0.0,0.624,5.757,,2.346,,437.0,21.2,262.76,17.31,15.6
352 | ,33.0,2.18,,,,71.9,3.0992,,,18.4,396.9,6.47,33.4
353 | ,0.0,18.1,0.0,0.583,6.114,,3.5459,,,20.2,,14.98,19.1
354 | 0.47547,0.0,,,0.544,6.113,58.8,4.0019,4.0,,18.4,,12.73,21.0
355 | 0.12757,30.0,4.93,0.0,0.428,6.393,7.8,7.0355,6.0,,,374.71,,23.7
356 | 0.0136,,,,,,,,3.0,,21.1,,,18.9
357 | 4.22239,,18.1,,0.77,5.803,,,,666.0,,,,16.8
358 | 0.08873,21.0,,,0.439,5.963,45.7,,4.0,243.0,,395.56,13.45,19.7
359 | 3.69311,0.0,18.1,,0.713,6.376,,,,,,391.43,14.65,17.7
360 | 0.08447,0.0,,0.0,0.51,5.859,,2.7019,,296.0,16.6,393.23,,22.6
361 | ,,18.1,0.0,0.74,,,,24.0,666.0,20.2,43.06,,11.8
362 | 0.0837,45.0,3.44,0.0,,,38.9,,5.0,398.0,,396.9,5.39,34.9
363 | ,0.0,,,,6.12,,2.2875,1.0,273.0,21.0,396.9,9.08,20.6
364 | 5.82115,0.0,,0.0,0.713,,89.9,2.8016,24.0,,20.2,,10.29,20.2
365 | 0.07875,,3.44,0.0,0.437,6.782,41.1,,,,15.2,393.87,6.68,32.0
366 | 2.44953,,,0.0,0.605,,95.2,,5.0,403.0,,,,22.3
367 | ,,5.13,0.0,,,,,8.0,284.0,19.7,,6.86,23.3
368 | 0.25387,0.0,,,0.448,,95.3,5.87,,,17.9,396.9,,14.4
369 | 0.03049,55.0,3.78,0.0,0.484,6.874,,,5.0,370.0,17.6,,4.61,31.2
370 | ,0.0,6.2,0.0,,,61.5,,8.0,,,376.75,,24.0
371 | ,,5.86,0.0,0.431,6.957,6.8,,,330.0,19.1,,,29.6
372 | 0.85204,,,0.0,0.538,5.965,89.2,,,307.0,,,13.83,19.6
373 | ,0.0,,,,,82.8,3.2628,4.0,,,,,21.6
374 | 6.80117,0.0,,0.0,0.713,6.081,84.4,2.7175,,666.0,20.2,,,20.0
375 | ,,,1.0,0.605,6.25,92.6,1.7984,,403.0,,338.92,5.5,27.0
376 | ,40.0,,1.0,,7.267,,4.7872,,,,,6.05,33.2
377 | 9.96654,0.0,,0.0,0.74,,100.0,1.9784,,666.0,20.2,,,15.4
378 | ,45.0,3.44,0.0,,6.739,30.8,6.4798,5.0,,,,,30.5
379 | ,,18.1,0.0,0.7,,,,24.0,,20.2,396.9,,7.2
380 | 0.08265,,13.92,,0.437,6.127,18.4,5.5027,4.0,,,396.9,8.58,23.9
381 | ,,18.1,0.0,0.597,5.155,100.0,1.5894,,666.0,20.2,210.97,20.08,16.3
382 | 0.02543,55.0,,0.0,,6.696,,,5.0,,,,,23.9
383 | ,20.0,,0.0,0.647,8.704,86.9,1.801,5.0,264.0,,389.7,5.12,50.0
384 | 0.49298,0.0,,,,6.635,82.5,,4.0,304.0,18.4,,,22.8
385 | 2.73397,0.0,,0.0,0.871,,,1.5257,5.0,403.0,,351.85,21.45,15.4
386 | 0.34006,0.0,,,0.624,6.458,,,4.0,437.0,21.2,395.04,12.6,19.2
387 | ,0.0,19.58,0.0,,5.404,,1.5916,,403.0,,341.6,13.28,19.6
388 | ,,,,0.77,,81.3,,24.0,666.0,20.2,390.74,12.67,22.6
389 | 0.0686,0.0,2.89,,,7.416,62.5,3.4952,,276.0,,396.9,,33.2
390 | 8.26725,,,,,5.875,,,24.0,,20.2,347.88,,50.0
391 | 0.07151,,4.49,0.0,,,,3.7476,3.0,247.0,18.5,395.15,8.44,22.2
392 | 7.75223,,18.1,0.0,,6.301,83.7,,24.0,666.0,20.2,272.21,,14.9
393 | ,,,,,,32.2,5.8736,4.0,430.0,,368.57,9.09,19.8
394 | 0.28955,0.0,10.59,,0.489,5.412,9.8,3.5875,4.0,,,,29.55,23.7
395 | ,0.0,18.1,0.0,0.655,5.952,84.7,2.8715,24.0,,20.2,22.01,17.15,19.0
396 | 0.07165,0.0,25.65,0.0,0.581,6.004,,2.1974,2.0,188.0,19.1,,14.27,20.3
397 | 0.04741,0.0,,0.0,0.573,6.03,80.8,2.505,,273.0,21.0,396.9,,11.9
398 | 1.25179,,,0.0,0.538,5.57,98.1,3.7979,4.0,307.0,21.0,376.57,21.02,13.6
399 | 0.12579,45.0,,0.0,0.437,,29.1,,,,,,4.56,29.8
400 | 0.15876,,,,0.413,,,,,,,,,21.7
401 | ,,8.56,0.0,0.52,,91.9,,5.0,,20.9,,,19.5
402 | 0.29916,20.0,,,0.464,,42.1,4.429,3.0,,18.6,,,21.1
403 | 0.01501,,,,0.435,6.635,29.7,8.344,4.0,,17.0,390.94,5.99,24.5
404 | 11.1604,0.0,18.1,,0.74,6.629,94.6,,24.0,,20.2,109.85,23.27,13.4
405 | ,0.0,,,0.52,,85.4,,,,,,,18.6
406 |
--------------------------------------------------------------------------------