├── .python-version ├── .gitignore ├── data ├── ai-knowledge-graph-example.png └── industrial-revolution.txt ├── src ├── knowledge_graph │ ├── __init__.py │ ├── config.py │ ├── text_utils.py │ ├── llm.py │ ├── prompts.py │ ├── main.py │ ├── visualization.py │ ├── entity_standardization.py │ └── templates │ │ └── graph_template.html └── generate_graph.py ├── generate-graph.py ├── pyproject.toml ├── requirements.txt ├── config.toml ├── json_to_html.py ├── LICENSE ├── README.md └── uv.lock /.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | */__pycache__ 2 | */.DS_Store 3 | .DS_Store 4 | # PyVis generated libraries 5 | lib/ 6 | 7 | *.egg-info 8 | __pycache__ 9 | -------------------------------------------------------------------------------- /data/ai-knowledge-graph-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/robert-mcdermott/ai-knowledge-graph/HEAD/data/ai-knowledge-graph-example.png -------------------------------------------------------------------------------- /src/knowledge_graph/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Knowledge Graph Generator and Visualizer. 3 | 4 | A tool that takes text input and generates an interactive knowledge graph visualization. 5 | """ 6 | 7 | from src.knowledge_graph.visualization import visualize_knowledge_graph, sample_data_visualization 8 | from src.knowledge_graph.llm import call_llm, extract_json_from_text 9 | from src.knowledge_graph.config import load_config 10 | 11 | __version__ = "0.1.0" 12 | -------------------------------------------------------------------------------- /src/generate_graph.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Knowledge Graph Generator and Visualizer. 4 | This script is the entry point for generating knowledge graphs from textual data. 5 | """ 6 | import sys 7 | import os 8 | 9 | # Add the current directory to the Python path to find the module 10 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 11 | 12 | from src.knowledge_graph.main import main 13 | 14 | if __name__ == "__main__": 15 | main() -------------------------------------------------------------------------------- /generate-graph.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Knowledge Graph Generator and Visualizer. 4 | This script serves as a backward-compatible entry point to the refactored code. 5 | """ 6 | import sys 7 | import os 8 | 9 | # Add the current directory to the path 10 | current_dir = os.path.dirname(os.path.abspath(__file__)) 11 | sys.path.insert(0, current_dir) 12 | 13 | from src.knowledge_graph.main import main 14 | 15 | if __name__ == "__main__": 16 | # Pass command line arguments to the main function 17 | main() 18 | -------------------------------------------------------------------------------- /src/knowledge_graph/config.py: -------------------------------------------------------------------------------- 1 | """Configuration utilities for the knowledge graph generator.""" 2 | import tomli 3 | import os 4 | 5 | def load_config(config_file="config.toml"): 6 | """ 7 | Load configuration from TOML file. 8 | 9 | Args: 10 | config_file: Path to the TOML configuration file 11 | 12 | Returns: 13 | Dictionary containing the configuration or None if loading fails 14 | """ 15 | try: 16 | with open(config_file, "rb") as f: 17 | return tomli.load(f) 18 | except Exception as e: 19 | print(f"Error loading config file: {e}") 20 | return None -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ai-knowledge-graph" 3 | version = "0.6.1" 4 | description = "Takes a text document and generates an interactive knowledge graph" 5 | authors = [{ name = "Robert McDermott", email = "robert.c.mcdermott@gmail.com" }, ] 6 | readme = "README.md" 7 | requires-python = ">=3.12" 8 | dependencies = [ 9 | "networkx>=3.4.2", 10 | "pyvis>=0.3.2", 11 | "pyvis-network>=0.0.6", 12 | "requests>=2.32.3", 13 | "tomli>=2.2.1", 14 | "python-louvain>=0.16" 15 | ] 16 | 17 | [build-system] 18 | requires = ["setuptools>=61.0"] 19 | build-backend = "setuptools.build_meta" 20 | 21 | [project.scripts] 22 | generate-graph = "src.knowledge_graph.main:main" 23 | 24 | [tool.setuptools] 25 | package-dir = {"" = "."} 26 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | asttokens==3.0.0 2 | certifi==2025.1.31 3 | charset-normalizer==3.4.1 4 | decorator==5.2.1 5 | executing==2.2.0 6 | idna==3.10 7 | ipython==9.0.2 8 | ipython-pygments-lexers==1.1.1 9 | jedi==0.19.2 10 | jinja2==3.1.6 11 | jsonpickle==4.0.2 12 | markupsafe==3.0.2 13 | matplotlib-inline==0.1.7 14 | networkx==3.4.2 15 | numpy==2.2.4 16 | pandas==2.2.3 17 | parso==0.8.4 18 | pexpect==4.9.0 19 | prompt-toolkit==3.0.50 20 | ptyprocess==0.7.0 21 | pure-eval==0.2.3 22 | pygments==2.19.1 23 | python-dateutil==2.9.0.post0 24 | python-louvain==0.16 25 | pytz==2025.1 26 | pyvis==0.3.2 27 | pyvis-network==0.0.6 28 | requests==2.32.3 29 | six==1.17.0 30 | stack-data==0.6.3 31 | tomli==2.2.1 32 | traitlets==5.14.3 33 | tzdata==2025.1 34 | urllib3==2.3.0 35 | wcwidth==0.2.13 36 | -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | [llm] 2 | model = "gemma3" 3 | #model = "claude-3.5-sonnet-v2" 4 | #model = "gpt4o" 5 | #model = "llama3-2-90b-instruct-v1:0" 6 | api_key = "sk-1234" 7 | base_url = "http://localhost:11434/v1/chat/completions" 8 | #base_url = "http://localhost:4000/v1/chat/completions" 9 | max_tokens = 8192 10 | #max_tokens = 4096 11 | temperature = 0.8 12 | 13 | [chunking] 14 | chunk_size = 100 # Number of words per chunk 15 | overlap = 20 # Number of words to overlap between chunks 16 | 17 | [standardization] 18 | enabled = true # Whether to enable entity standardization 19 | use_llm_for_entities = true # Whether to use LLM for additional entity resolution 20 | 21 | [inference] 22 | enabled = true # Whether to enable relationship inference 23 | use_llm_for_inference = true # Whether to use LLM for relationship inference 24 | apply_transitive = true # Whether to apply transitive inference rules 25 | 26 | [visualization] 27 | edge_smooth = false # Options: false, "dynamic", "continuous", "discrete", "diagonalCross", 28 | # "straightCross", "horizontal", "vertical", "curvedCW", "curvedCCW", "cubicBezier": true = "continuous" 29 | -------------------------------------------------------------------------------- /src/knowledge_graph/text_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Text processing utilities for the knowledge graph generator. 3 | """ 4 | 5 | def chunk_text(text, chunk_size=500, overlap=50): 6 | """ 7 | Split a text into chunks of words with overlap. 8 | 9 | Args: 10 | text: The input text to chunk 11 | chunk_size: The size of each chunk in words 12 | overlap: The number of words to overlap between chunks 13 | 14 | Returns: 15 | List of text chunks 16 | """ 17 | # Split text into words 18 | words = text.split() 19 | 20 | # If text is smaller than chunk size, return it as a single chunk 21 | if len(words) <= chunk_size: 22 | return [text] 23 | 24 | # Create chunks with overlap 25 | chunks = [] 26 | start = 0 27 | 28 | while start < len(words): 29 | # Calculate end position for this chunk 30 | end = min(start + chunk_size, len(words)) 31 | 32 | # Join words for this chunk 33 | chunk = ' '.join(words[start:end]) 34 | chunks.append(chunk) 35 | 36 | # Move start position for next chunk, accounting for overlap 37 | start = end - overlap 38 | 39 | # If we're near the end and the last chunk would be too small, just exit 40 | if start < len(words) and start + chunk_size - overlap >= len(words): 41 | # Add remaining words as the final chunk 42 | final_chunk = ' '.join(words[start:]) 43 | chunks.append(final_chunk) 44 | break 45 | 46 | return chunks -------------------------------------------------------------------------------- /json_to_html.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Utility script to convert existing JSON knowledge graph data to HTML visualization. 4 | This allows testing the visualization features without running the full pipeline. 5 | """ 6 | 7 | import json 8 | import sys 9 | import os 10 | 11 | # Add the src directory to Python path so we can import our modules 12 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) 13 | 14 | from knowledge_graph.visualization import visualize_knowledge_graph 15 | 16 | def json_to_html(json_file, output_file): 17 | """ 18 | Convert JSON knowledge graph data to HTML visualization. 19 | 20 | Args: 21 | json_file: Path to JSON file containing triples data 22 | output_file: Path to save the HTML visualization 23 | """ 24 | try: 25 | # Load JSON data 26 | with open(json_file, 'r', encoding='utf-8') as f: 27 | triples = json.load(f) 28 | 29 | print(f"Loaded {len(triples)} triples from {json_file}") 30 | 31 | # Generate HTML visualization 32 | stats = visualize_knowledge_graph(triples, output_file) 33 | 34 | print(f"Generated HTML visualization: {output_file}") 35 | print("Graph Statistics:") 36 | print(f" Nodes: {stats['nodes']}") 37 | print(f" Edges: {stats['edges']}") 38 | print(f" Original Edges: {stats.get('original_edges', 'N/A')}") 39 | print(f" Inferred Edges: {stats.get('inferred_edges', 'N/A')}") 40 | print(f" Communities: {stats['communities']}") 41 | 42 | print(f"\nTo view the visualization, open: file://{os.path.abspath(output_file)}") 43 | 44 | except Exception as e: 45 | print(f"Error: {e}") 46 | sys.exit(1) 47 | 48 | if __name__ == "__main__": 49 | if len(sys.argv) != 3: 50 | print("Usage: python json_to_html.py ") 51 | print("Example: python json_to_html.py docs/industrialRev.json test_inferred_filter.html") 52 | sys.exit(1) 53 | 54 | json_file = sys.argv[1] 55 | output_file = sys.argv[2] 56 | 57 | json_to_html(json_file, output_file) 58 | -------------------------------------------------------------------------------- /src/knowledge_graph/llm.py: -------------------------------------------------------------------------------- 1 | """LLM interaction utilities for knowledge graph generation.""" 2 | import requests 3 | import json 4 | import re 5 | 6 | def call_llm(model, user_prompt, api_key, system_prompt=None, max_tokens=1000, temperature=0.2, base_url=None) -> str: 7 | """ 8 | Call the language model API. 9 | 10 | Args: 11 | model: The model name to use 12 | user_prompt: The user prompt to send 13 | api_key: The API key for authentication 14 | system_prompt: Optional system prompt to set context 15 | max_tokens: Maximum number of tokens to generate 16 | temperature: Sampling temperature 17 | base_url: The base URL for the API endpoint 18 | 19 | Returns: 20 | The model's response as a string 21 | """ 22 | headers = { 23 | 'Content-Type': 'application/json', 24 | 'Authorization': f"Bearer {api_key}" 25 | } 26 | 27 | messages = [] 28 | 29 | if system_prompt: 30 | messages.append({ 31 | 'role': 'system', 32 | 'content': system_prompt 33 | }) 34 | 35 | messages.append({ 36 | 'role': 'user', 37 | 'content': [ 38 | { 39 | 'type': 'text', 40 | 'text': user_prompt 41 | } 42 | ] 43 | }) 44 | 45 | payload = { 46 | 'model': model, 47 | 'messages': messages, 48 | 'max_tokens': max_tokens, 49 | 'temperature': temperature 50 | } 51 | 52 | response = requests.post( 53 | base_url, 54 | headers=headers, 55 | json=payload 56 | ) 57 | 58 | if response.status_code == 200: 59 | return response.json()['choices'][0]['message']['content'] 60 | else: 61 | raise Exception(f"API request failed: {response.text}") 62 | 63 | def extract_json_from_text(text): 64 | """ 65 | Extract JSON array from text that might contain additional content. 66 | 67 | Args: 68 | text: Text that may contain JSON 69 | 70 | Returns: 71 | The parsed JSON if found, None otherwise 72 | """ 73 | # First, check if the text is wrapped in code blocks with triple backticks 74 | code_block_pattern = r'```(?:json)?\s*([\s\S]*?)```' 75 | code_match = re.search(code_block_pattern, text) 76 | if code_match: 77 | text = code_match.group(1).strip() 78 | print("Found JSON in code block, extracting content...") 79 | 80 | try: 81 | # Try direct parsing in case the response is already clean JSON 82 | return json.loads(text) 83 | except json.JSONDecodeError: 84 | # Look for opening and closing brackets of a JSON array 85 | start_idx = text.find('[') 86 | if start_idx == -1: 87 | print("No JSON array start found in text") 88 | return None 89 | 90 | # Simple bracket counting to find matching closing bracket 91 | bracket_count = 0 92 | complete_json = False 93 | for i in range(start_idx, len(text)): 94 | if text[i] == '[': 95 | bracket_count += 1 96 | elif text[i] == ']': 97 | bracket_count -= 1 98 | if bracket_count == 0: 99 | # Found the matching closing bracket 100 | json_str = text[start_idx:i+1] 101 | complete_json = True 102 | break 103 | 104 | # Handle complete JSON array 105 | if complete_json: 106 | try: 107 | return json.loads(json_str) 108 | except json.JSONDecodeError: 109 | print("Found JSON-like structure but couldn't parse it.") 110 | print("Trying to fix common formatting issues...") 111 | 112 | # Try to fix missing quotes around keys 113 | fixed_json = re.sub(r'(\s*)(\w+)(\s*):(\s*)', r'\1"\2"\3:\4', json_str) 114 | # Fix trailing commas 115 | fixed_json = re.sub(r',(\s*[\]}])', r'\1', fixed_json) 116 | 117 | try: 118 | return json.loads(fixed_json) 119 | except: 120 | print("Could not fix JSON format issues") 121 | else: 122 | # Handle incomplete JSON - try to complete it 123 | print("Found incomplete JSON array, attempting to complete it...") 124 | 125 | # Get all complete objects from the array 126 | objects = [] 127 | obj_start = -1 128 | obj_end = -1 129 | brace_count = 0 130 | 131 | # First find all complete objects 132 | for i in range(start_idx + 1, len(text)): 133 | if text[i] == '{': 134 | if brace_count == 0: 135 | obj_start = i 136 | brace_count += 1 137 | elif text[i] == '}': 138 | brace_count -= 1 139 | if brace_count == 0: 140 | obj_end = i 141 | objects.append(text[obj_start:obj_end+1]) 142 | 143 | if objects: 144 | # Reconstruct a valid JSON array with complete objects 145 | reconstructed_json = "[\n" + ",\n".join(objects) + "\n]" 146 | try: 147 | return json.loads(reconstructed_json) 148 | except json.JSONDecodeError: 149 | print("Couldn't parse reconstructed JSON array.") 150 | print("Trying to fix common formatting issues...") 151 | 152 | # Try to fix missing quotes around keys 153 | fixed_json = re.sub(r'(\s*)(\w+)(\s*):(\s*)', r'\1"\2"\3:\4', reconstructed_json) 154 | # Fix trailing commas 155 | fixed_json = re.sub(r',(\s*[\]}])', r'\1', fixed_json) 156 | 157 | try: 158 | return json.loads(fixed_json) 159 | except: 160 | print("Could not fix JSON format issues in reconstructed array") 161 | 162 | print("No complete JSON array could be extracted") 163 | return None -------------------------------------------------------------------------------- /src/knowledge_graph/prompts.py: -------------------------------------------------------------------------------- 1 | """Centralized repository for all LLM prompts used in the knowledge graph system.""" 2 | 3 | # Phase 1: Main extraction prompts 4 | MAIN_SYSTEM_PROMPT = """ 5 | You are an advanced AI system specialized in knowledge extraction and knowledge graph generation. 6 | Your expertise includes identifying consistent entity references and meaningful relationships in text. 7 | CRITICAL INSTRUCTION: All relationships (predicates) MUST be no more than 3 words maximum. Ideally 1-2 words. This is a hard limit. 8 | """ 9 | 10 | MAIN_USER_PROMPT = """ 11 | Your task: Read the text below (delimited by triple backticks) and identify all Subject-Predicate-Object (S-P-O) relationships in each sentence. Then produce a single JSON array of objects, each representing one triple. 12 | 13 | Follow these rules carefully: 14 | 15 | - Entity Consistency: Use consistent names for entities throughout the document. For example, if "John Smith" is mentioned as "John", "Mr. Smith", and "John Smith" in different places, use a single consistent form (preferably the most complete one) in all triples. 16 | - Atomic Terms: Identify distinct key terms (e.g., objects, locations, organizations, acronyms, people, conditions, concepts, feelings). Avoid merging multiple ideas into one term (they should be as "atomistic" as possible). 17 | - Unified References: Replace any pronouns (e.g., "he," "she," "it," "they," etc.) with the actual referenced entity, if identifiable. 18 | - Pairwise Relationships: If multiple terms co-occur in the same sentence (or a short paragraph that makes them contextually related), create one triple for each pair that has a meaningful relationship. 19 | - CRITICAL INSTRUCTION: Predicates MUST be 1-3 words maximum. Never more than 3 words. Keep them extremely concise. 20 | - Ensure that all possible relationships are identified in the text and are captured in an S-P-O relation. 21 | - Standardize terminology: If the same concept appears with slight variations (e.g., "artificial intelligence" and "AI"), use the most common or canonical form consistently. 22 | - Make all the text of S-P-O text lower-case, even Names of people and places. 23 | - If a person is mentioned by name, create a relation to their location, profession and what they are known for (invented, wrote, started, title, etc.) if known and if it fits the context of the informaiton. 24 | 25 | Important Considerations: 26 | - Aim for precision in entity naming - use specific forms that distinguish between similar but different entities 27 | - Maximize connectedness by using identical entity names for the same concepts throughout the document 28 | - Consider the entire context when identifying entity references 29 | - ALL PREDICATES MUST BE 3 WORDS OR FEWER - this is a hard requirement 30 | 31 | Output Requirements: 32 | 33 | - Do not include any text or commentary outside of the JSON. 34 | - Return only the JSON array, with each triple as an object containing "subject", "predicate", and "object". 35 | - Make sure the JSON is valid and properly formatted. 36 | 37 | Example of the desired output structure: 38 | 39 | [ 40 | { 41 | "subject": "Term A", 42 | "predicate": "relates to", // Notice: only 2 words 43 | "object": "Term B" 44 | }, 45 | { 46 | "subject": "Term C", 47 | "predicate": "uses", // Notice: only 1 word 48 | "object": "Term D" 49 | } 50 | ] 51 | 52 | Important: Only output the JSON array (with the S-P-O objects) and nothing else 53 | 54 | Text to analyze (between triple backticks): 55 | """ 56 | 57 | # Phase 2: Entity standardization prompts 58 | ENTITY_RESOLUTION_SYSTEM_PROMPT = """ 59 | You are an expert in entity resolution and knowledge representation. 60 | Your task is to standardize entity names from a knowledge graph to ensure consistency. 61 | """ 62 | 63 | def get_entity_resolution_user_prompt(entity_list): 64 | return f""" 65 | Below is a list of entity names extracted from a knowledge graph. 66 | Some may refer to the same real-world entities but with different wording. 67 | 68 | Please identify groups of entities that refer to the same concept, and provide a standardized name for each group. 69 | Return your answer as a JSON object where the keys are the standardized names and the values are arrays of all variant names that should map to that standard name. 70 | Only include entities that have multiple variants or need standardization. 71 | 72 | Entity list: 73 | {entity_list} 74 | 75 | Format your response as valid JSON like this: 76 | {{ 77 | "standardized name 1": ["variant 1", "variant 2"], 78 | "standardized name 2": ["variant 3", "variant 4", "variant 5"] 79 | }} 80 | """ 81 | 82 | # Phase 3: Community relationship inference prompts 83 | RELATIONSHIP_INFERENCE_SYSTEM_PROMPT = """ 84 | You are an expert in knowledge representation and inference. 85 | Your task is to infer plausible relationships between disconnected entities in a knowledge graph. 86 | """ 87 | 88 | def get_relationship_inference_user_prompt(entities1, entities2, triples_text): 89 | return f""" 90 | I have a knowledge graph with two disconnected communities of entities. 91 | 92 | Community 1 entities: {entities1} 93 | Community 2 entities: {entities2} 94 | 95 | Here are some existing relationships involving these entities: 96 | {triples_text} 97 | 98 | Please infer 2-3 plausible relationships between entities from Community 1 and entities from Community 2. 99 | Return your answer as a JSON array of triples in the following format: 100 | 101 | [ 102 | {{ 103 | "subject": "entity from community 1", 104 | "predicate": "inferred relationship", 105 | "object": "entity from community 2" 106 | }}, 107 | ... 108 | ] 109 | 110 | Only include highly plausible relationships with clear predicates. 111 | IMPORTANT: The inferred relationships (predicates) MUST be no more than 3 words maximum. Preferably 1-2 words. Never more than 3. 112 | For predicates, use short phrases that clearly describe the relationship. 113 | IMPORTANT: Make sure the subject and object are different entities - avoid self-references. 114 | """ 115 | 116 | # Phase 4: Within-community relationship inference prompts 117 | WITHIN_COMMUNITY_INFERENCE_SYSTEM_PROMPT = """ 118 | You are an expert in knowledge representation and inference. 119 | Your task is to infer plausible relationships between semantically related entities that are not yet connected in a knowledge graph. 120 | """ 121 | 122 | def get_within_community_inference_user_prompt(pairs_text, triples_text): 123 | return f""" 124 | I have a knowledge graph with several entities that appear to be semantically related but are not directly connected. 125 | 126 | Here are some pairs of entities that might be related: 127 | {pairs_text} 128 | 129 | Here are some existing relationships involving these entities: 130 | {triples_text} 131 | 132 | Please infer plausible relationships between these disconnected pairs. 133 | Return your answer as a JSON array of triples in the following format: 134 | 135 | [ 136 | {{ 137 | "subject": "entity1", 138 | "predicate": "inferred relationship", 139 | "object": "entity2" 140 | }}, 141 | ... 142 | ] 143 | 144 | Only include highly plausible relationships with clear predicates. 145 | IMPORTANT: The inferred relationships (predicates) MUST be no more than 3 words maximum. Preferably 1-2 words. Never more than 3. 146 | IMPORTANT: Make sure that the subject and object are different entities - avoid self-references. 147 | """ -------------------------------------------------------------------------------- /data/industrial-revolution.txt: -------------------------------------------------------------------------------- 1 | The Industrial Revolution, originating in Great Britain during the late 18th century, represented one of the most significant turning points in human history. Marked by a transition from manual labor and agrarian economies to machine-based manufacturing, this era was driven by groundbreaking inventions and socioeconomic shifts. Early innovations, particularly in textiles, such as James Hargreaves' spinning jenny and Richard Arkwright's water frame, accelerated production dramatically, enabling factories to replace cottage industries. 2 | 3 | A key catalyst of the First Industrial Revolution was the refinement of the steam engine by Scottish engineer James Watt, enabling widespread adoption in manufacturing, mining, and transportation. Steam-powered locomotives and ships revolutionized transport, linking previously distant markets. Railways, exemplified by Britain's Liverpool-Manchester Railway and America's Transcontinental Railroad, interconnected continents and facilitated rapid urbanization. Industrial cities such as Manchester, Birmingham, Pittsburgh, and Lowell became epicenters of production, trade, and migration, although often suffering from overcrowded and unsanitary conditions. 4 | 5 | Simultaneously, advances in agriculture, notably the development of Eli Whitney's cotton gin and Cyrus McCormick’s mechanical reaper, allowed more efficient food and raw material production, feeding urban expansion. Economically, capitalism matured significantly, shaped by thinkers like Adam Smith, whose seminal work, The Wealth of Nations, laid ideological foundations for free-market economies. 6 | 7 | However, the initial industrial boom led to harsh labor conditions, prompting social movements for reform. Activists such as Robert Owen promoted improved worker conditions, founding utopian communities like New Lanark. This activism spurred legislation like Britain's Factory Acts, which limited child labor and introduced workplace safety regulations. 8 | 9 | The Second Industrial Revolution, occurring roughly between the late 19th and early 20th centuries, built upon these foundations through breakthroughs in electricity, steel, chemicals, and communications. Innovations included the mass-production methods pioneered by Henry Ford, notably the assembly line, dramatically reducing costs and reshaping consumer markets. Simultaneously, the invention of the telephone by Alexander Graham Bell and the radio by Guglielmo Marconi revolutionized global communications, enhancing interconnectedness and cultural exchange. 10 | 11 | Technological advancements like the Bessemer Process, developed by Henry Bessemer, revolutionized steel production, driving construction and infrastructure projects such as skyscrapers and bridges. Electrification, spearheaded by inventors like Thomas Edison and Nikola Tesla, brought electric lighting, motors, and appliances into homes and factories, transforming daily life and further accelerating urban growth. 12 | 13 | This period also experienced the rise of significant labor movements and workers' rights organizations, responding to conditions in rapidly growing industrial enterprises. Ideological critiques of capitalism emerged prominently through philosophers and activists like Karl Marx and Friedrich Engels, whose influential works, including The Communist Manifesto, profoundly impacted political systems worldwide. 14 | 15 | Following World War II, humanity entered the Third Industrial Revolution, often termed the Digital Revolution, beginning around the mid-20th century and extending into the late 20th century. This era was characterized by transformative developments in electronics, computing, and automation. The invention of the transistor by researchers at Bell Labs and later, the integrated circuit, paved the way for modern computing devices, drastically altering industries, from manufacturing and finance to entertainment and healthcare. 16 | 17 | Personal computing, popularized by pioneers like Steve Jobs, Steve Wozniak, and Bill Gates, democratized access to information technology, transforming societal interactions and business practices. The creation of the Internet, initially developed by agencies like DARPA and expanded globally with contributions from visionaries such as Tim Berners-Lee, enabled instantaneous communication and unprecedented information sharing worldwide, ushering in the Information Age. 18 | 19 | Advancements in automation and robotics significantly altered manufacturing industries, exemplified by companies like Toyota, which pioneered robotic assembly lines to improve efficiency and product consistency. These technologies enabled mass customization and just-in-time production, reshaping global supply chains and economic structures. 20 | 21 | As the Digital Revolution matured, the emergence of mobile communications, represented by the development of cellular technologies by companies such as Motorola, Nokia, and later Apple and Samsung, further reshaped global interactions. The rise of e-commerce, spearheaded by platforms like Amazon and Alibaba, transformed retail, logistics, and consumer behavior at a global scale. 22 | 23 | In recent decades, humanity has begun experiencing the Fourth Industrial Revolution, distinguished by the fusion of physical, digital, and biological domains. This revolution centers around groundbreaking technologies such as artificial intelligence (AI), machine learning, robotics, biotechnology, quantum computing, blockchain, and the Internet of Things (IoT). AI development, advanced by research institutions and corporations including OpenAI, Google, Microsoft, and IBM, has enabled capabilities such as natural language processing, predictive analytics, autonomous vehicles, and smart assistants like ChatGPT. 24 | 25 | Machine learning algorithms are now fundamental in fields like healthcare, finance, and manufacturing, enabling sophisticated predictive modeling and optimization of processes. Autonomous vehicles, championed by companies such as Tesla, Waymo, and Cruise, have begun reshaping transportation and logistics, promising significant changes in urban planning and infrastructure. 26 | 27 | Blockchain technology, exemplified by cryptocurrencies like Bitcoin and Ethereum, has challenged traditional financial and legal institutions by introducing decentralized financial systems and smart contracts. Biotechnology advancements, such as CRISPR-Cas9 gene editing, have introduced possibilities of addressing previously incurable diseases, significantly altering medicine and ethical considerations. 28 | 29 | Quantum computing, currently in early stages of development by organizations such as IBM, Google, and research institutions like MIT, promises unprecedented computational power, potentially revolutionizing fields like cryptography, materials science, and pharmaceuticals. 30 | 31 | As the Fourth Industrial Revolution unfolds, critical questions emerge concerning economic inequality, ethics, data privacy, and employment. Debates on how best to manage technological progress are shaped by concerns around automation’s impact on employment, privacy implications of pervasive data collection, and the ethical dimensions of artificial intelligence and biotechnology. Institutions such as the World Economic Forum, the United Nations, and governments globally are actively engaging with these topics to manage technological progress responsibly. 32 | 33 | Collectively, these successive industrial revolutions have profoundly transformed society, reshaped global economic systems, altered cultural norms, and significantly influenced human interactions with technology and the environment. Understanding this extensive and interconnected narrative is essential for grasping the complex interplay between technological innovation, socioeconomic shifts, global politics, and human civilization as a whole. 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2025 Robert McDermott (robert.c.mcdermott@gmail.com) 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /src/knowledge_graph/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Knowledge Graph Generator and Visualizer main module. 3 | """ 4 | import argparse 5 | import json 6 | import os 7 | import sys 8 | 9 | # Add the parent directory to the Python path for imports 10 | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) 11 | 12 | from src.knowledge_graph.config import load_config 13 | from src.knowledge_graph.llm import call_llm, extract_json_from_text 14 | from src.knowledge_graph.visualization import visualize_knowledge_graph, sample_data_visualization 15 | from src.knowledge_graph.text_utils import chunk_text 16 | from src.knowledge_graph.entity_standardization import standardize_entities, infer_relationships, limit_predicate_length 17 | from src.knowledge_graph.prompts import MAIN_SYSTEM_PROMPT, MAIN_USER_PROMPT 18 | 19 | def process_with_llm(config, input_text, debug=False): 20 | """ 21 | Process input text with LLM to extract triples. 22 | 23 | Args: 24 | config: Configuration dictionary 25 | input_text: Text to analyze 26 | debug: If True, print detailed debug information 27 | 28 | Returns: 29 | List of extracted triples or None if processing failed 30 | """ 31 | # Use prompts from the prompts module 32 | system_prompt = MAIN_SYSTEM_PROMPT 33 | user_prompt = MAIN_USER_PROMPT 34 | user_prompt += f"```\n{input_text}```\n" 35 | 36 | # LLM configuration 37 | model = config["llm"]["model"] 38 | api_key = config["llm"]["api_key"] 39 | max_tokens = config["llm"]["max_tokens"] 40 | temperature = config["llm"]["temperature"] 41 | base_url = config["llm"]["base_url"] 42 | 43 | # Process with LLM 44 | metadata = {} 45 | response = call_llm(model, user_prompt, api_key, system_prompt, max_tokens, temperature, base_url) 46 | 47 | # Print raw response only if debug mode is on 48 | if debug: 49 | print("Raw LLM response:") 50 | print(response) 51 | print("\n---\n") 52 | 53 | # Extract JSON from the response 54 | result = extract_json_from_text(response) 55 | 56 | if result: 57 | # Validate and filter triples to ensure they have all required fields 58 | valid_triples = [] 59 | invalid_count = 0 60 | 61 | for item in result: 62 | if isinstance(item, dict) and "subject" in item and "predicate" in item and "object" in item: 63 | # Add metadata to valid items 64 | valid_triples.append(dict(item, **metadata)) 65 | else: 66 | invalid_count += 1 67 | 68 | if invalid_count > 0: 69 | print(f"Warning: Filtered out {invalid_count} invalid triples missing required fields") 70 | 71 | if not valid_triples: 72 | print("Error: No valid triples found in LLM response") 73 | return None 74 | 75 | # Apply predicate length limit to all valid triples 76 | for triple in valid_triples: 77 | triple["predicate"] = limit_predicate_length(triple["predicate"]) 78 | 79 | # Print extracted JSON only if debug mode is on 80 | if debug: 81 | print("Extracted JSON:") 82 | print(json.dumps(valid_triples, indent=2)) # Pretty print the JSON 83 | 84 | return valid_triples 85 | else: 86 | # Always print error messages even if debug is off 87 | print("\n\nERROR ### Could not extract valid JSON from response: ", response, "\n\n") 88 | return None 89 | 90 | def process_text_in_chunks(config, full_text, debug=False): 91 | """ 92 | Process a large text by breaking it into chunks with overlap, 93 | and then processing each chunk separately. 94 | 95 | Args: 96 | config: Configuration dictionary 97 | full_text: The complete text to process 98 | debug: If True, print detailed debug information 99 | 100 | Returns: 101 | List of all extracted triples from all chunks 102 | """ 103 | # Get chunking parameters from config 104 | chunk_size = config.get("chunking", {}).get("chunk_size", 500) 105 | overlap = config.get("chunking", {}).get("overlap", 50) 106 | 107 | # Split text into chunks 108 | text_chunks = chunk_text(full_text, chunk_size, overlap) 109 | 110 | print("=" * 50) 111 | print("PHASE 1: INITIAL TRIPLE EXTRACTION") 112 | print("=" * 50) 113 | print(f"Processing text in {len(text_chunks)} chunks (size: {chunk_size} words, overlap: {overlap} words)") 114 | 115 | # Process each chunk 116 | all_results = [] 117 | for i, chunk in enumerate(text_chunks): 118 | print(f"Processing chunk {i+1}/{len(text_chunks)} ({len(chunk.split())} words)") 119 | 120 | # Process the chunk with LLM 121 | chunk_results = process_with_llm(config, chunk, debug) 122 | 123 | if chunk_results: 124 | # Add chunk information to each triple 125 | for item in chunk_results: 126 | item["chunk"] = i + 1 127 | 128 | # Add to overall results 129 | all_results.extend(chunk_results) 130 | else: 131 | print(f"Warning: Failed to extract triples from chunk {i+1}") 132 | 133 | print(f"\nExtracted a total of {len(all_results)} triples from all chunks") 134 | 135 | # Apply entity standardization if enabled 136 | if config.get("standardization", {}).get("enabled", False): 137 | print("\n" + "="*50) 138 | print("PHASE 2: ENTITY STANDARDIZATION") 139 | print("="*50) 140 | print(f"Starting with {len(all_results)} triples and {len(get_unique_entities(all_results))} unique entities") 141 | 142 | all_results = standardize_entities(all_results, config) 143 | 144 | print(f"After standardization: {len(all_results)} triples and {len(get_unique_entities(all_results))} unique entities") 145 | 146 | # Apply relationship inference if enabled 147 | if config.get("inference", {}).get("enabled", False): 148 | print("\n" + "="*50) 149 | print("PHASE 3: RELATIONSHIP INFERENCE") 150 | print("="*50) 151 | print(f"Starting with {len(all_results)} triples") 152 | 153 | # Count existing relationships 154 | relationship_counts = {} 155 | for triple in all_results: 156 | relationship_counts[triple["predicate"]] = relationship_counts.get(triple["predicate"], 0) + 1 157 | 158 | print("Top 5 relationship types before inference:") 159 | for pred, count in sorted(relationship_counts.items(), key=lambda x: x[1], reverse=True)[:5]: 160 | print(f" - {pred}: {count} occurrences") 161 | 162 | all_results = infer_relationships(all_results, config) 163 | 164 | # Count relationships after inference 165 | relationship_counts_after = {} 166 | for triple in all_results: 167 | relationship_counts_after[triple["predicate"]] = relationship_counts_after.get(triple["predicate"], 0) + 1 168 | 169 | print("\nTop 5 relationship types after inference:") 170 | for pred, count in sorted(relationship_counts_after.items(), key=lambda x: x[1], reverse=True)[:5]: 171 | print(f" - {pred}: {count} occurrences") 172 | 173 | # Count inferred relationships 174 | inferred_count = sum(1 for triple in all_results if triple.get("inferred", False)) 175 | print(f"\nAdded {inferred_count} inferred relationships") 176 | print(f"Final knowledge graph: {len(all_results)} triples") 177 | 178 | return all_results 179 | 180 | def get_unique_entities(triples): 181 | """ 182 | Get the set of unique entities from the triples. 183 | 184 | Args: 185 | triples: List of triple dictionaries 186 | 187 | Returns: 188 | Set of unique entity names 189 | """ 190 | entities = set() 191 | for triple in triples: 192 | if not isinstance(triple, dict): 193 | continue 194 | if "subject" in triple: 195 | entities.add(triple["subject"]) 196 | if "object" in triple: 197 | entities.add(triple["object"]) 198 | return entities 199 | 200 | def main(): 201 | """Main entry point for the knowledge graph generator.""" 202 | # Parse command line arguments 203 | parser = argparse.ArgumentParser(description='Knowledge Graph Generator and Visualizer') 204 | parser.add_argument('--test', action='store_true', help='Generate a test visualization with sample data') 205 | parser.add_argument('--config', type=str, default='config.toml', help='Path to configuration file') 206 | parser.add_argument('--output', type=str, default='knowledge_graph.html', help='Output HTML file path') 207 | parser.add_argument('--input', type=str, required=False, help='Path to input text file (required unless --test is used)') 208 | parser.add_argument('--debug', action='store_true', help='Enable debug output (raw LLM responses and extracted JSON)') 209 | parser.add_argument('--no-standardize', action='store_true', help='Disable entity standardization') 210 | parser.add_argument('--no-inference', action='store_true', help='Disable relationship inference') 211 | 212 | args = parser.parse_args() 213 | 214 | # Load configuration 215 | config = load_config(args.config) 216 | if not config: 217 | print(f"Failed to load configuration from {args.config}. Exiting.") 218 | return 219 | 220 | # If test flag is provided, generate a sample visualization 221 | if args.test: 222 | print("Generating sample data visualization...") 223 | sample_data_visualization(args.output, config=config) 224 | print(f"\nSample visualization saved to {args.output}") 225 | print(f"To view the visualization, open the following file in your browser:") 226 | print(f"file://{os.path.abspath(args.output)}") 227 | return 228 | 229 | # For normal processing, input file is required 230 | if not args.input: 231 | print("Error: --input is required unless --test is used") 232 | parser.print_help() 233 | return 234 | 235 | # Override configuration settings with command line arguments 236 | if args.no_standardize: 237 | config.setdefault("standardization", {})["enabled"] = False 238 | if args.no_inference: 239 | config.setdefault("inference", {})["enabled"] = False 240 | 241 | # Load input text from file 242 | try: 243 | with open(args.input, 'r', encoding='utf-8') as f: 244 | input_text = f.read() 245 | print(f"Using input text from file: {args.input}") 246 | except Exception as e: 247 | print(f"Error reading input file {args.input}: {e}") 248 | return 249 | 250 | # Process text in chunks 251 | result = process_text_in_chunks(config, input_text, args.debug) 252 | 253 | if result: 254 | # Save the raw data as JSON for potential reuse 255 | json_output = args.output.replace('.html', '.json') 256 | try: 257 | with open(json_output, 'w', encoding='utf-8') as f: 258 | json.dump(result, f, indent=2) 259 | print(f"Saved raw knowledge graph data to {json_output}") 260 | except Exception as e: 261 | print(f"Warning: Could not save raw data to {json_output}: {e}") 262 | 263 | # Visualize the knowledge graph 264 | stats = visualize_knowledge_graph(result, args.output, config=config) 265 | print("\nKnowledge Graph Statistics:") 266 | print(f"Nodes: {stats['nodes']}") 267 | print(f"Edges: {stats['edges']}") 268 | print(f"Communities: {stats['communities']}") 269 | 270 | # Provide command to open the visualization in a browser 271 | print("\nTo view the visualization, open the following file in your browser:") 272 | print(f"file://{os.path.abspath(args.output)}") 273 | else: 274 | print("Knowledge graph generation failed due to errors in LLM processing.") 275 | 276 | if __name__ == "__main__": 277 | main() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![ai-knowledge-graph-example](https://github.com/robert-mcdermott/ai-knowledge-graph/blob/main/data/ai-knowledge-graph-example.png) 2 | 3 | # AI Powered Knowledge Graph Generator 4 | 5 | This system takes an unstructured text document, and uses an LLM of your choice to extract knowledge in the form of Subject-Predicate-Object (SPO) triplets, and visualizes the relationships as an interactive knowledge graph. 6 | A demo of a knowlege graph created with this project can be found here: [Industrial-Revolution Knowledge Graph](https://robert-mcdermott.github.io/ai-knowledge-graph/) 7 | 8 | 9 | ## Features 10 | 11 | - **Text Chunking**: Automatically splits large documents into manageable chunks for processing 12 | - **Knowledge Extraction**: Uses AI to identify entities and their relationships 13 | - **Entity Standardization**: Ensures consistent entity naming across document chunks 14 | - **Relationship Inference**: Discovers additional relationships between disconnected parts of the graph 15 | - **Interactive Visualization**: Creates an interactive graph visualization 16 | - **Works with Any OpenAI Compatible API Endpoint**: Ollama, LM Studio, OpenAI, vLLM, LiteLLM (provides access to AWS Bedrock, Azure OpenAI, Anthropic and many other LLM services) 17 | 18 | ## Requirements 19 | 20 | - Python 3.11+ 21 | - Required packages (install using `pip install -r requirements.txt` or `uv sync`) 22 | 23 | ## Quick Start 24 | 25 | 1. Clone this repository 26 | 2. Install dependencies: `pip install -r requirements.txt` 27 | 3. Configure your settings in `config.toml` 28 | 4. Run the system: 29 | 30 | ```bash 31 | python generate-graph.py --input your_text_file.txt --output knowledge_graph.html 32 | ``` 33 | 34 | Or with UV: 35 | 36 | ```bash 37 | uv run generate-graph.py --input your_text_file.txt --output knowledge_graph.html 38 | ``` 39 | Or installing and using as a module: 40 | 41 | ```bash 42 | pip install --upgrade -e . 43 | generate-graph --input your_text_file.txt --output knowledge_graph.html 44 | ``` 45 | 46 | ## Configuration 47 | 48 | The system can be configured using the `config.toml` file: 49 | 50 | ```toml 51 | [llm] 52 | model = "gemma3" # Google open weight model 53 | api_key = "sk-1234" 54 | base_url = "http://localhost:11434/v1/chat/completions" # Local Ollama instance running locally (but can be any OpenAI compatible endpoint) 55 | max_tokens = 8192 56 | temperature = 0.2 57 | 58 | [chunking] 59 | chunk_size = 200 # Number of words per chunk 60 | overlap = 20 # Number of words to overlap between chunks 61 | 62 | [standardization] 63 | enabled = true # Enable entity standardization 64 | use_llm_for_entities = true # Use LLM for additional entity resolution 65 | 66 | [inference] 67 | enabled = true # Enable relationship inference 68 | use_llm_for_inference = true # Use LLM for relationship inference 69 | apply_transitive = true # Apply transitive inference rules 70 | ``` 71 | 72 | ## Command Line Options 73 | 74 | - `--input FILE`: Input text file to process 75 | - `--output FILE`: Output HTML file for visualization (default: knowledge_graph.html) 76 | - `--config FILE`: Path to config file (default: config.toml) 77 | - `--debug`: Enable debug output with raw LLM responses 78 | - `--no-standardize`: Disable entity standardization 79 | - `--no-inference`: Disable relationship inference 80 | - `--test`: Generate sample visualization using test data 81 | 82 | ### Usage message (--help) 83 | 84 | ```bash 85 | generate-graph --help 86 | usage: generate-graph [-h] [--test] [--config CONFIG] [--output OUTPUT] [--input INPUT] [--debug] [--no-standardize] [--no-inference] 87 | 88 | Knowledge Graph Generator and Visualizer 89 | 90 | options: 91 | -h, --help show this help message and exit 92 | --test Generate a test visualization with sample data 93 | --config CONFIG Path to configuration file 94 | --output OUTPUT Output HTML file path 95 | --input INPUT Path to input text file (required unless --test is used) 96 | --debug Enable debug output (raw LLM responses and extracted JSON) 97 | --no-standardize Disable entity standardization 98 | --no-inference Disable relationship inference 99 | ``` 100 | 101 | ### Example Run 102 | 103 | **Command:** 104 | 105 | ```bash 106 | generate-graph --input data/industrial-revolution.txt --output industrial-revolution-kg.html 107 | ``` 108 | **Console Output:** 109 | 110 | ```markdown 111 | Using input text from file: data/industrial-revolution.txt 112 | ================================================== 113 | PHASE 1: INITIAL TRIPLE EXTRACTION 114 | ================================================== 115 | Processing text in 13 chunks (size: 100 words, overlap: 20 words) 116 | Processing chunk 1/13 (100 words) 117 | Processing chunk 2/13 (100 words) 118 | Processing chunk 3/13 (100 words) 119 | Processing chunk 4/13 (100 words) 120 | Processing chunk 5/13 (100 words) 121 | Processing chunk 6/13 (100 words) 122 | Processing chunk 7/13 (100 words) 123 | Processing chunk 8/13 (100 words) 124 | Processing chunk 9/13 (100 words) 125 | Processing chunk 10/13 (100 words) 126 | Processing chunk 11/13 (100 words) 127 | Processing chunk 12/13 (86 words) 128 | Processing chunk 13/13 (20 words) 129 | 130 | Extracted a total of 216 triples from all chunks 131 | 132 | ================================================== 133 | PHASE 2: ENTITY STANDARDIZATION 134 | ================================================== 135 | Starting with 216 triples and 201 unique entities 136 | Standardizing entity names across all triples... 137 | Applied LLM-based entity standardization for 15 entity groups 138 | Standardized 201 entities into 181 standard forms 139 | After standardization: 216 triples and 160 unique entities 140 | 141 | ================================================== 142 | PHASE 3: RELATIONSHIP INFERENCE 143 | ================================================== 144 | Starting with 216 triples 145 | Top 5 relationship types before inference: 146 | - enables: 20 occurrences 147 | - impacts: 15 occurrences 148 | - enabled: 12 occurrences 149 | - pioneered: 10 occurrences 150 | - invented: 9 occurrences 151 | Inferring additional relationships between entities... 152 | Identified 9 disconnected communities in the graph 153 | Inferred 3 new relationships between communities 154 | Inferred 3 new relationships between communities 155 | Inferred 3 new relationships between communities 156 | Inferred 3 new relationships between communities 157 | Inferred 3 new relationships between communities 158 | Inferred 3 new relationships between communities 159 | Inferred 3 new relationships between communities 160 | Inferred 3 new relationships between communities 161 | Inferred 3 new relationships between communities 162 | Inferred 3 new relationships between communities 163 | Inferred 9 new relationships within communities 164 | Inferred 2 new relationships within communities 165 | Inferred 88 relationships based on lexical similarity 166 | Added -22 inferred relationships 167 | 168 | Top 5 relationship types after inference: 169 | - related to: 65 occurrences 170 | - advances via Artificial Intelligence: 36 occurrences 171 | - pioneered via computing: 26 occurrences 172 | - enables via computing: 24 occurrences 173 | - enables: 21 occurrences 174 | 175 | Added 370 inferred relationships 176 | Final knowledge graph: 564 triples 177 | Saved raw knowledge graph data to /mnt/c/Users/rmcdermo/Documents/industrial-revolution-kg.json 178 | Processing 564 triples for visualization 179 | Found 161 unique nodes 180 | Found 355 inferred relationships 181 | Detected 9 communities using Louvain method 182 | Nodes in NetworkX graph: 161 183 | Edges in NetworkX graph: 537 184 | Knowledge graph visualization saved to /mnt/c/Users/rmcdermo/Documents/industrial-revolution-kg.html 185 | Graph Statistics: { 186 | "nodes": 161, 187 | "edges": 564, 188 | "original_edges": 209, 189 | "inferred_edges": 355, 190 | "communities": 9 191 | } 192 | 193 | Knowledge Graph Statistics: 194 | Nodes: 161 195 | Edges: 564 196 | Communities: 9 197 | 198 | To view the visualization, open the following file in your browser: 199 | file:///mnt/c/Users/rmcdermo/Documents/industrial-revolution-kg.html 200 | ``` 201 | 202 | ## How It Works 203 | 204 | 1. **Chunking**: The document is split into overlapping chunks to fit within the LLM's context window 205 | 2. **First Pass - SPO Extraction**: 206 | - Each chunk is processed by the LLM to extract Subject-Predicate-Object triplets 207 | - Implemented in the `process_with_llm` function 208 | - The LLM identifies entities and their relationships within each text segment 209 | - Results are collected across all chunks to form the initial knowledge graph 210 | 3. **Second Pass - Entity Standardization**: 211 | - Basic standardization through text normalization 212 | - Optional LLM-assisted entity alignment (controlled by `standardization.use_llm_for_entities` config) 213 | - When enabled, the LLM reviews all unique entities from the graph and identifies groups that refer to the same concept 214 | - This resolves cases where the same entity appears differently across chunks (e.g., "AI", "artificial intelligence", "AI system") 215 | - Standardization helps create a more coherent and navigable knowledge graph 216 | 4. **Third Pass - Relationship Inference**: 217 | - Automatic inference of transitive relationships 218 | - Optional LLM-assisted inference between disconnected graph components (controlled by `inference.use_llm_for_inference` config) 219 | - When enabled, the LLM analyzes representative entities from disconnected communities and infers plausible relationships 220 | - This reduces graph fragmentation by adding logical connections not explicitly stated in the text 221 | - Both rule-based and LLM-based inference methods work together to create a more comprehensive graph 222 | 5. **Visualization**: An interactive HTML visualization is generated using the PyVis library 223 | 224 | Both the second and third passes are optional and can be disabled in the configuration to minimize LLM usage or control these processes manually. 225 | 226 | ## Visualization Features 227 | 228 | - **Color-coded Communities**: Node colors represent different communities 229 | - **Node Size**: Nodes sized by importance (degree, betweenness, eigenvector centrality) 230 | - **Relationship Types**: Original relationships shown as solid lines, inferred relationships as dashed lines 231 | - **Interactive Controls**: Zoom, pan, hover for details, filtering and physics controls 232 | - **Light (default) and Dark mode themes**. 233 | 234 | ## Project Layout 235 | 236 | ``` 237 | . 238 | ├── config.toml # Main configuration file for the system 239 | ├── generate-graph.py # Entry point when run directly as a script 240 | ├── pyproject.toml # Python project metadata and build configuration 241 | ├── requirements.txt # Python dependencies for 'pip' users 242 | ├── uv.lock # Python dependencies for 'uv' users 243 | └── src/ # Source code 244 | ├── generate_graph.py # Main entry point script when run as a module 245 | └── knowledge_graph/ # Core package 246 | ├── __init__.py # Package initialization 247 | ├── config.py # Configuration loading and validation 248 | ├── entity_standardization.py # Entity standardization algorithms 249 | ├── llm.py # LLM interaction and response processing 250 | ├── main.py # Main program flow and orchestration 251 | ├── prompts.py # Centralized collection of LLM prompts 252 | ├── text_utils.py # Text processing and chunking utilities 253 | ├── visualization.py # Knowledge graph visualization generator 254 | └── templates/ # HTML templates for visualization 255 | └── graph_template.html # Base template for interactive graph 256 | ``` 257 | 258 | ## Program Flow 259 | 260 | This diagram illustrates the program flow. 261 | 262 | ```mermaid 263 | flowchart TD 264 | %% Main entry points 265 | A[main.py - Entry Point] --> B{Parse Arguments} 266 | 267 | %% Test mode branch 268 | B -->|--test flag| C[sample_data_visualization] 269 | C --> D[visualize_knowledge_graph] 270 | 271 | %% Normal processing branch 272 | B -->|normal processing| E[load_config] 273 | E --> F[process_text_in_chunks] 274 | 275 | %% Text processing 276 | F --> G[chunk_text] 277 | G --> H[process_with_llm] 278 | 279 | %% LLM processing 280 | H --> I[call_llm] 281 | I --> J[extract_json_from_text] 282 | 283 | %% Entity standardization phase 284 | F --> K{standardization enabled?} 285 | K -->|yes| L[standardize_entities] 286 | K -->|no| M{inference enabled?} 287 | L --> M 288 | 289 | %% Relationship inference phase 290 | M -->|yes| N[infer_relationships] 291 | M -->|no| O[visualize_knowledge_graph] 292 | N --> O 293 | 294 | %% Visualization components 295 | O --> P[_calculate_centrality_metrics] 296 | O --> Q[_detect_communities] 297 | O --> R[_calculate_node_sizes] 298 | O --> S[_add_nodes_and_edges_to_network] 299 | O --> T[_get_visualization_options] 300 | O --> U[_save_and_modify_html] 301 | 302 | %% Subprocesses 303 | L --> L1[_resolve_entities_with_llm] 304 | N --> N1[_identify_communities] 305 | N --> N2[_infer_relationships_with_llm] 306 | N --> N3[_infer_within_community_relationships] 307 | N --> N4[_apply_transitive_inference] 308 | N --> N5[_infer_relationships_by_lexical_similarity] 309 | N --> N6[_deduplicate_triples] 310 | 311 | %% File outputs 312 | U --> V[HTML Visualization] 313 | F --> W[JSON Data Export] 314 | 315 | %% Prompts usage 316 | Y[prompts.py] --> H 317 | Y --> L1 318 | Y --> N2 319 | Y --> N3 320 | 321 | %% Module dependencies 322 | subgraph Modules 323 | main.py 324 | config.py 325 | text_utils.py 326 | llm.py 327 | entity_standardization.py 328 | visualization.py 329 | prompts.py 330 | end 331 | 332 | %% Phases 333 | subgraph Phase 1: Triple Extraction 334 | G 335 | H 336 | I 337 | J 338 | end 339 | 340 | subgraph Phase 2: Entity Standardization 341 | L 342 | L1 343 | end 344 | 345 | subgraph Phase 3: Relationship Inference 346 | N 347 | N1 348 | N2 349 | N3 350 | N4 351 | N5 352 | N6 353 | end 354 | 355 | subgraph Phase 4: Visualization 356 | O 357 | P 358 | Q 359 | R 360 | S 361 | T 362 | U 363 | end 364 | ``` 365 | 366 | ## Program Flow Description 367 | 368 | 1. **Entry Point**: The program starts in `main.py` which parses command-line arguments. 369 | 370 | 2. **Mode Selection**: 371 | - If `--test` flag is provided, it generates a sample visualization 372 | - Otherwise, it processes the input text file 373 | 374 | 3. **Configuration**: Loads settings from `config.toml` using `config.py` 375 | 376 | 4. **Text Processing**: 377 | - Breaks text into chunks with overlap using `text_utils.py` 378 | - Processes each chunk with the LLM to extract triples 379 | - Uses prompts from `prompts.py` to guide the LLM's extraction process 380 | 381 | 5. **Entity Standardization** (optional): 382 | - Standardizes entity names across all triples 383 | - May use LLM for entity resolution in ambiguous cases 384 | - Uses specialized prompts from `prompts.py` for entity resolution 385 | 386 | 6. **Relationship Inference** (optional): 387 | - Identifies communities in the graph 388 | - Infers relationships between disconnected communities 389 | - Applies transitive inference and lexical similarity rules 390 | - Uses specialized prompts from `prompts.py` for relationship inference 391 | - Deduplicates triples 392 | 393 | 7. **Visualization**: 394 | - Calculates centrality metrics and community detection 395 | - Determines node sizes and colors based on importance 396 | - Creates an interactive HTML visualization using PyVis 397 | - Customizes the HTML with templates 398 | 399 | 8. **Output**: 400 | - Saves the knowledge graph as both HTML and JSON 401 | - Displays statistics about nodes, edges, and communities -------------------------------------------------------------------------------- /src/knowledge_graph/visualization.py: -------------------------------------------------------------------------------- 1 | """Visualization utilities for knowledge graphs.""" 2 | import networkx as nx 3 | import json 4 | import re 5 | import os 6 | from pyvis.network import Network 7 | 8 | # HTML template for visualization is now stored in a separate file 9 | def _load_html_template(): 10 | """Load the HTML template from the template file.""" 11 | template_path = os.path.join(os.path.dirname(__file__), 'templates', 'graph_template.html') 12 | try: 13 | with open(template_path, 'r', encoding='utf-8') as f: 14 | return f.read() 15 | except Exception as e: 16 | print(f"Warning: Could not load template file: {e}") 17 | return '
' # Fallback to basic template 18 | 19 | def visualize_knowledge_graph(triples, output_file="knowledge_graph.html", edge_smooth=None, config=None): 20 | """ 21 | Create and visualize a knowledge graph from subject-predicate-object triples. 22 | 23 | Args: 24 | triples: List of dictionaries with 'subject', 'predicate', and 'object' keys 25 | output_file: HTML file to save the visualization 26 | edge_smooth: Edge smoothing setting (overrides config): 27 | false, "dynamic", "continuous", "discrete", "diagonalCross", 28 | "straightCross", "horizontal", "vertical", "curvedCW", "curvedCCW", "cubicBezier" 29 | config: Configuration dictionary (optional) 30 | 31 | Returns: 32 | Dictionary with graph statistics 33 | """ 34 | # Determine edge smoothing from config if not explicitly provided 35 | if edge_smooth is None and config is not None: 36 | edge_smooth = config.get("visualization", {}).get("edge_smooth", False) 37 | elif edge_smooth is None: 38 | edge_smooth = False 39 | 40 | if not triples: 41 | print("Warning: No triples provided for visualization") 42 | return {"nodes": 0, "edges": 0, "communities": 0} 43 | 44 | print(f"Processing {len(triples)} triples for visualization") 45 | 46 | # Create a directed graph 47 | G = nx.DiGraph() 48 | 49 | # Dictionary to store node groups for community visualization 50 | node_communities = {} 51 | 52 | # Set of all unique nodes 53 | all_nodes = set() 54 | 55 | # Track inferred vs. original relationships 56 | inferred_edges = set() 57 | 58 | # Add all subjects and objects as nodes 59 | for triple in triples: 60 | subject = triple["subject"] 61 | obj = triple["object"] 62 | all_nodes.add(subject) 63 | all_nodes.add(obj) 64 | 65 | # Mark inferred relationships 66 | if triple.get("inferred", False): 67 | inferred_edges.add((subject, obj)) 68 | 69 | print(f"Found {len(all_nodes)} unique nodes") 70 | print(f"Found {len(inferred_edges)} inferred relationships") 71 | 72 | # Create an undirected graph for community detection and centrality measures 73 | G_undirected = nx.Graph() 74 | 75 | for triple in triples: 76 | G_undirected.add_edge(triple["subject"], triple["object"]) 77 | 78 | # Calculate centrality metrics 79 | centrality_metrics = _calculate_centrality_metrics(G_undirected, all_nodes) 80 | betweenness = centrality_metrics["betweenness"] 81 | degree = centrality_metrics["degree"] 82 | eigenvector = centrality_metrics["eigenvector"] 83 | 84 | # Calculate communities 85 | node_communities, community_count = _detect_communities(G_undirected, all_nodes) 86 | 87 | # Define colors for communities - these are standard colorblind-friendly colors 88 | colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf'] 89 | 90 | # Calculate node sizes based on centrality metrics 91 | node_sizes = _calculate_node_sizes(all_nodes, betweenness, degree, eigenvector) 92 | 93 | # Add nodes to the graph with community colors and sizes 94 | for node in all_nodes: 95 | community = node_communities[node] 96 | G.add_node( 97 | node, 98 | color=colors[community % len(colors)], # Ensure we don't go out of bounds 99 | label=node, # Explicit label 100 | title=f"{node} - Connections: {degree.get(node, 0)}", # Simple tooltip without HTML tags 101 | size=node_sizes[node] 102 | ) 103 | 104 | # Add edges with predicates as labels 105 | for triple in triples: 106 | subject = triple["subject"] 107 | obj = triple["object"] 108 | 109 | # Determine if this is an inferred relationship 110 | is_inferred = triple.get("inferred", False) 111 | 112 | G.add_edge( 113 | subject, 114 | obj, 115 | title=triple["predicate"], 116 | label=triple["predicate"], 117 | arrows="to", # Add arrow direction 118 | width=1, # Edge width 119 | dashes=is_inferred, # Use dashed lines for inferred relationships 120 | color="#555555" if is_inferred else None # Lighter color for inferred relationships 121 | ) 122 | 123 | # Create a PyVis network with explicit configuration 124 | net = Network( 125 | height="100%", 126 | width="100%", 127 | directed=True, 128 | notebook=False, 129 | cdn_resources='in_line', # Include resources in-line to ensure independence 130 | bgcolor="#ffffff", 131 | font_color=True, 132 | select_menu=False, 133 | filter_menu=False 134 | ) 135 | 136 | # Dump some debug info 137 | print(f"Nodes in NetworkX graph: {G.number_of_nodes()}") 138 | print(f"Edges in NetworkX graph: {G.number_of_edges()}") 139 | 140 | # Add nodes and edges from NetworkX graph - do this explicitly for better control 141 | _add_nodes_and_edges_to_network(net, G) 142 | 143 | # Set visualization options 144 | options = _get_visualization_options(edge_smooth) 145 | 146 | # Set all options in one go with proper JSON 147 | net.set_options(json.dumps(options)) 148 | 149 | 150 | # Save the network as HTML and modify with custom template 151 | _save_and_modify_html(net, output_file, community_count, all_nodes, triples) 152 | 153 | # Return statistics 154 | original_edges = len(triples) - len(inferred_edges) 155 | stats = { 156 | "nodes": len(all_nodes), 157 | "edges": len(triples), 158 | "original_edges": original_edges, 159 | "inferred_edges": len(inferred_edges), 160 | "communities": len(set(node_communities.values())) 161 | } 162 | print(f"Graph Statistics: {json.dumps(stats, indent=2)}") 163 | return stats 164 | 165 | def _calculate_centrality_metrics(G_undirected, all_nodes): 166 | """Calculate centrality metrics for the graph nodes.""" 167 | # Betweenness centrality - nodes that bridge communities are more important 168 | betweenness = nx.betweenness_centrality(G_undirected) 169 | 170 | # Degree centrality - nodes with more connections are more important 171 | degree = dict(G_undirected.degree()) 172 | 173 | # Eigenvector centrality - nodes connected to high-value nodes are more important 174 | try: 175 | eigenvector = nx.eigenvector_centrality(G_undirected, max_iter=1000) 176 | except: 177 | # If eigenvector calculation fails (can happen with certain graph structures) 178 | eigenvector = {node: 0.5 for node in all_nodes} 179 | 180 | return { 181 | "betweenness": betweenness, 182 | "degree": degree, 183 | "eigenvector": eigenvector 184 | } 185 | 186 | def _detect_communities(G_undirected, all_nodes): 187 | """Detect communities in the graph.""" 188 | try: 189 | # Attempt to detect communities using Louvain method 190 | import community as community_louvain 191 | partition = community_louvain.best_partition(G_undirected) 192 | community_count = len(set(partition.values())) 193 | print(f"Detected {community_count} communities using Louvain method") 194 | return partition, community_count 195 | except: 196 | # Fallback: assign community IDs based on degree for simplicity 197 | node_communities = {} 198 | for node in all_nodes: 199 | node_degree = G_undirected.degree(node) if node in G_undirected else 0 200 | # Ensure we have at least 0 as a community ID 201 | community_id = max(0, node_degree) % 8 # Using modulo 8 to limit number of colors 202 | node_communities[node] = community_id 203 | community_count = len(set(node_communities.values())) 204 | print(f"Using degree-based communities ({community_count} communities)") 205 | return node_communities, community_count 206 | 207 | def _calculate_node_sizes(all_nodes, betweenness, degree, eigenvector): 208 | """Calculate node sizes based on centrality metrics.""" 209 | # Find max values for normalization 210 | max_betweenness = max(betweenness.values()) if betweenness else 1 211 | max_degree = max(degree.values()) if degree else 1 212 | max_eigenvector = max(eigenvector.values()) if eigenvector else 1 213 | 214 | node_sizes = {} 215 | for node in all_nodes: 216 | # Normalize and combine metrics with weights 217 | degree_norm = degree.get(node, 1) / max_degree 218 | betweenness_norm = betweenness.get(node, 0) / max_betweenness if max_betweenness > 0 else 0 219 | eigenvector_norm = eigenvector.get(node, 0) / max_eigenvector if max_eigenvector > 0 else 0 220 | 221 | # Calculate a weighted importance score (adjust weights as needed) 222 | importance = 0.5 * degree_norm + 0.3 * betweenness_norm + 0.2 * eigenvector_norm 223 | 224 | # Scale node size - ensure minimum size and reasonable maximum 225 | node_sizes[node] = 10 + (20 * importance) # Size range from 10 to 30 226 | 227 | return node_sizes 228 | 229 | def _add_nodes_and_edges_to_network(net, G): 230 | """Add nodes and edges from NetworkX graph to PyVis network.""" 231 | # Add nodes with all their attributes 232 | for node_id in G.nodes(): 233 | node_data = G.nodes[node_id] 234 | net.add_node( 235 | node_id, 236 | color=node_data.get('color', '#4daf4a'), 237 | label=str(node_id), # Ensure label is a string 238 | title=str(node_data.get('title', node_id)), # Ensure title is a string 239 | shape="dot", 240 | size=node_data.get('size', 10), 241 | font={'color': '#000000'} # Explicitly set font color to black 242 | ) 243 | 244 | # Add edges with all their attributes 245 | for edge in G.edges(data=True): 246 | source, target, data = edge 247 | 248 | # Support for dashed lines for inferred relationships 249 | edge_options = { 250 | 'title': data.get('title', ''), 251 | 'label': data.get('label', ''), 252 | 'arrows': "to" 253 | } 254 | 255 | # Add dashes if specified 256 | if data.get('dashes', False): 257 | edge_options['dashes'] = True 258 | 259 | # Add color if specified 260 | if data.get('color'): 261 | edge_options['color'] = data.get('color') 262 | 263 | net.add_edge(source, target, **edge_options) 264 | 265 | def _get_visualization_options(edge_smooth=False): 266 | """ 267 | Get options for PyVis visualization. 268 | 269 | Args: 270 | edge_smooth: Edge smoothing setting: 271 | false, "dynamic", "continuous", "discrete", "diagonalCross", 272 | "straightCross", "horizontal", "vertical", "curvedCW", "curvedCCW", "cubicBezier" 273 | """ 274 | # Configure physics for better visualization 275 | physics_options = { 276 | "enabled": True, # Physics on by default 277 | "solver": "forceAtlas2Based", 278 | "forceAtlas2Based": { 279 | "gravitationalConstant": -50, 280 | "centralGravity": 0.01, 281 | "springLength": 100, 282 | "springConstant": 0.08 283 | }, 284 | "stabilization": { 285 | "iterations": 200, # Increased for better layout 286 | "enabled": True 287 | } 288 | } 289 | 290 | # Determine edge smoothing based on parameter 291 | if isinstance(edge_smooth, str): 292 | if edge_smooth.lower() == "false": 293 | edge_smoothing = False 294 | else: 295 | edge_smoothing = {'type': edge_smooth} 296 | elif edge_smooth: 297 | edge_smoothing = {'type': 'continuous'} # Default curved type 298 | else: 299 | edge_smoothing = False 300 | 301 | # Full options for visualization 302 | return { 303 | "physics": physics_options, 304 | "edges": { 305 | "color": {"inherit": True}, 306 | "font": {"size": 11}, 307 | "smooth": edge_smoothing # Apply edge smoothing setting 308 | }, 309 | "nodes": { 310 | "font": {"size": 14, "face": "Tahoma"}, 311 | "scaling": {"min": 10, "max": 50}, # Ensure nodes are visible 312 | "tooltipDelay": 200 313 | }, 314 | "interaction": { 315 | "hover": True, 316 | "navigationButtons": True, 317 | "keyboard": True, 318 | "tooltipDelay": 200 319 | }, 320 | "layout": { 321 | "improvedLayout": True 322 | } 323 | } 324 | 325 | def _save_and_modify_html(net, output_file, community_count, all_nodes, triples): 326 | """Save the network as HTML and modify with custom template.""" 327 | # Instead of letting PyVis write to a file, we'll access its HTML directly 328 | # and write it ourselves with explicit UTF-8 encoding 329 | 330 | # Generate the HTML content 331 | # This happens internally in PyVis without writing to a file 332 | net.generate_html() 333 | 334 | # Get the HTML from PyVis's internal html attribute 335 | html = net.html 336 | 337 | # Add our custom controls by replacing the div with our template 338 | html = html.replace('
', _load_html_template()) 339 | 340 | # Fix the duplicate title issue 341 | # Remove the default PyVis header 342 | html = re.sub(r'
\s*

.*?

\s*
', '', html) 343 | 344 | # Replace the other h1 with our enhanced title 345 | html = html.replace('

', f'

Knowledge Graph - {len(all_nodes)} Nodes, {len(triples)} Relationships, {community_count} Communities

') 346 | 347 | # Write the HTML directly to the output file with explicit UTF-8 encoding 348 | with open(output_file, 'w', encoding='utf-8') as f: 349 | f.write(html) 350 | 351 | print(f"Knowledge graph visualization saved to {output_file}") 352 | 353 | def sample_data_visualization(output_file="sample_knowledge_graph.html", edge_smooth=None, config=None): 354 | """ 355 | Generate a visualization using sample data to test the functionality. 356 | 357 | Args: 358 | output_file: Path to save the sample graph HTML 359 | edge_smooth: Edge smoothing setting (overrides config): 360 | false, "dynamic", "continuous", "discrete", "diagonalCross", 361 | "straightCross", "horizontal", "vertical", "curvedCW", "curvedCCW", "cubicBezier" 362 | config: Configuration dictionary (optional) 363 | """ 364 | # Sample data representing knowledge graph triples 365 | sample_triples = [ 366 | {"subject": "Industrial Revolution", "predicate": "began in", "object": "Great Britain"}, 367 | {"subject": "Industrial Revolution", "predicate": "characterized by", "object": "machine manufacturing"}, 368 | {"subject": "Industrial Revolution", "predicate": "led to", "object": "urbanization"}, 369 | {"subject": "Industrial Revolution", "predicate": "led to", "object": "rise of capitalism"}, 370 | {"subject": "Industrial Revolution", "predicate": "led to", "object": "new labor movements"}, 371 | {"subject": "Industrial Revolution", "predicate": "fueled by", "object": "technological innovations"}, 372 | {"subject": "James Watt", "predicate": "developed", "object": "steam engine"}, 373 | {"subject": "James Watt", "predicate": "born in", "object": "Scottland"}, 374 | {"subject": "Scottland", "predicate": "a country in", "object": "Europe"}, 375 | {"subject": "steam engine", "predicate": "revolutionized", "object": "transportation"}, 376 | {"subject": "steam engine", "predicate": "revolutionized", "object": "manufacturing processes"}, 377 | {"subject": "steam engine", "predicate": "spread to", "object": "Europe"}, 378 | {"subject": "steam engine", "predicate": "lead to", "object": "Industrial Revolution"}, 379 | {"subject": "steam engine", "predicate": "spread to", "object": "North America"}, 380 | {"subject": "technological innovations", "predicate": "led to", "object": "Digital Computers"}, 381 | {"subject": "Digital Computers", "predicate": "enabled", "object": "Artificial Intelligence"}, 382 | {"subject": "Artificial Intelligence", "predicate": "will replace", "object": "Humanity"}, 383 | {"subject": "Artificial Intelligence", "predicate": "led to", "object": "LLMs"}, 384 | {"subject": "Robert McDermott", "predicate": "likes", "object": "LLMs"}, 385 | {"subject": "Robert McDermott", "predicate": "owns", "object": "Digital Computers"}, 386 | {"subject": "Robert McDermott", "predicate": "lives in", "object": "North America"} 387 | ] 388 | 389 | # Determine edge smoothing from config if not explicitly provided 390 | if edge_smooth is None and config is not None: 391 | edge_smooth = config.get("visualization", {}).get("edge_smooth", False) 392 | elif edge_smooth is None: 393 | edge_smooth = False 394 | 395 | # Generate the visualization 396 | print(f"Generating sample visualization with {len(sample_triples)} triples") 397 | 398 | # Display edge smoothing type 399 | if edge_smooth is False: 400 | edge_style = "Straight (no smoothing)" 401 | elif isinstance(edge_smooth, str): 402 | edge_style = edge_smooth 403 | else: 404 | edge_style = "continuous (default curved)" 405 | 406 | print(f"Edge style: {edge_style}") 407 | stats = visualize_knowledge_graph(sample_triples, output_file, edge_smooth=edge_smooth, config=config) 408 | 409 | print("\nSample Knowledge Graph Statistics:") 410 | print(f"Nodes: {stats['nodes']}") 411 | print(f"Edges: {stats['edges']}") 412 | print(f"Communities: {stats['communities']}") 413 | 414 | print(f"\nVisualization saved to {output_file}") 415 | print(f"To view, open: file://{os.path.abspath(output_file)}") 416 | 417 | if __name__ == "__main__": 418 | # Run sample visualization when this module is run directly 419 | from src.knowledge_graph.config import load_config 420 | 421 | # Try to load config, fall back to defaults if not found 422 | config = load_config() 423 | if config is None: 424 | config = {"visualization": {"edge_smooth": False}} 425 | print("No config.toml found, using default settings") 426 | 427 | # Create sample visualizations with different edge types 428 | examples = [ 429 | ("sample_knowledge_graph_straight.html", False, "Straight edges (no smoothing)"), 430 | ("sample_knowledge_graph_curvedCW.html", "curvedCW", "Curved clockwise"), 431 | ("sample_knowledge_graph_curvedCCW.html", "curvedCCW", "Curved counter-clockwise"), 432 | ("sample_knowledge_graph_dynamic.html", "dynamic", "Dynamic edges"), 433 | ("sample_knowledge_graph_cubicBezier.html", "cubicBezier", "Cubic Bezier curves"), 434 | ] 435 | 436 | # Create example visualizations 437 | for filename, edge_type, description in examples: 438 | print(f"\nCreating visualization with {description}...") 439 | config_example = {"visualization": {"edge_smooth": edge_type}} 440 | sample_data_visualization(filename, config=config_example) 441 | 442 | # Create visualization using config.toml settings 443 | print("\nCreating visualization using configuration from config.toml...") 444 | sample_data_visualization("sample_knowledge_graph_config.html", config=config) 445 | 446 | # Determine edge style from config for output message 447 | config_edge_type = config.get("visualization", {}).get("edge_smooth", False) 448 | if config_edge_type is False: 449 | config_description = "straight edges (no smoothing)" 450 | else: 451 | config_description = f"edge style '{config_edge_type}'" 452 | 453 | print(f"\nCreated sample visualizations:") 454 | for filename, _, description in examples: 455 | print(f"- {filename}: {description}") 456 | print(f"- sample_knowledge_graph_config.html: Using {config_description} from config.toml") 457 | print("\nTo view these visualizations, open the HTML files in your browser.") -------------------------------------------------------------------------------- /src/knowledge_graph/entity_standardization.py: -------------------------------------------------------------------------------- 1 | """Entity standardization and relationship inference for knowledge graphs.""" 2 | import re 3 | from collections import defaultdict 4 | from src.knowledge_graph.llm import call_llm 5 | from src.knowledge_graph.prompts import ( 6 | ENTITY_RESOLUTION_SYSTEM_PROMPT, 7 | get_entity_resolution_user_prompt, 8 | RELATIONSHIP_INFERENCE_SYSTEM_PROMPT, 9 | get_relationship_inference_user_prompt, 10 | WITHIN_COMMUNITY_INFERENCE_SYSTEM_PROMPT, 11 | get_within_community_inference_user_prompt 12 | ) 13 | 14 | def limit_predicate_length(predicate, max_words=3): 15 | """ 16 | Enforce a maximum word limit on predicates. 17 | 18 | Args: 19 | predicate: The original predicate string 20 | max_words: Maximum number of words allowed (default: 3) 21 | 22 | Returns: 23 | Shortened predicate with no more than max_words 24 | """ 25 | words = predicate.split() 26 | if len(words) <= max_words: 27 | return predicate 28 | 29 | # If too long, use only the first max_words words 30 | shortened = ' '.join(words[:max_words]) 31 | 32 | # Remove trailing prepositions or articles if they're the last word 33 | stop_words = {'a', 'an', 'the', 'of', 'with', 'by', 'to', 'from', 'in', 'on', 'for'} 34 | last_word = shortened.split()[-1].lower() 35 | if last_word in stop_words and len(words) > 1: 36 | shortened = ' '.join(shortened.split()[:-1]) 37 | 38 | return shortened 39 | 40 | def standardize_entities(triples, config): 41 | """ 42 | Standardize entity names across all triples. 43 | 44 | Args: 45 | triples: List of dictionaries with 'subject', 'predicate', and 'object' keys 46 | config: Configuration dictionary 47 | 48 | Returns: 49 | List of triples with standardized entity names 50 | """ 51 | if not triples: 52 | return triples 53 | 54 | print("Standardizing entity names across all triples...") 55 | 56 | # Validate input triples to ensure they have the required fields 57 | valid_triples = [] 58 | invalid_count = 0 59 | 60 | for triple in triples: 61 | if isinstance(triple, dict) and "subject" in triple and "predicate" in triple and "object" in triple: 62 | valid_triples.append(triple) 63 | else: 64 | invalid_count += 1 65 | 66 | if invalid_count > 0: 67 | print(f"Warning: Filtered out {invalid_count} invalid triples missing required fields") 68 | 69 | if not valid_triples: 70 | print("Error: No valid triples found for entity standardization") 71 | return [] 72 | 73 | # 1. Extract all unique entities 74 | all_entities = set() 75 | for triple in valid_triples: 76 | all_entities.add(triple["subject"].lower()) 77 | all_entities.add(triple["object"].lower()) 78 | 79 | # 2. Group similar entities - first by exact match after lowercasing and removing stopwords 80 | standardized_entities = {} 81 | entity_groups = defaultdict(list) 82 | 83 | # Helper function to normalize text for comparison 84 | def normalize_text(text): 85 | # Convert to lowercase 86 | text = text.lower() 87 | # Remove common stopwords that might appear in entity names 88 | stopwords = {"the", "a", "an", "of", "and", "or", "in", "on", "at", "to", "for", "with", "by", "as"} 89 | words = [word for word in re.findall(r'\b\w+\b', text) if word not in stopwords] 90 | return " ".join(words) 91 | 92 | # Process entities in order of complexity (longer entities first) 93 | sorted_entities = sorted(all_entities, key=lambda x: (-len(x), x)) 94 | 95 | # First pass: Standard normalization 96 | for entity in sorted_entities: 97 | normalized = normalize_text(entity) 98 | if normalized: # Skip empty strings 99 | entity_groups[normalized].append(entity) 100 | 101 | # 3. For each group, choose the most representative name 102 | for group_key, variants in entity_groups.items(): 103 | if len(variants) == 1: 104 | # Only one variant, use it directly 105 | standardized_entities[variants[0]] = variants[0] 106 | else: 107 | # Multiple variants, choose the most common or the shortest one as standard 108 | # Sort by frequency in triples, then by length (shorter is better) 109 | variant_counts = defaultdict(int) 110 | for triple in valid_triples: 111 | for variant in variants: 112 | if triple["subject"].lower() == variant: 113 | variant_counts[variant] += 1 114 | if triple["object"].lower() == variant: 115 | variant_counts[variant] += 1 116 | 117 | # Choose the most common variant as the standard form 118 | standard_form = sorted(variants, key=lambda x: (-variant_counts[x], len(x)))[0] 119 | for variant in variants: 120 | standardized_entities[variant] = standard_form 121 | 122 | # 4. Second pass: check for root word relationships 123 | # This handles cases like "capitalism" and "capitalist decay" 124 | additional_standardizations = {} 125 | 126 | # Get all standardized entity names (after first pass) 127 | standard_forms = set(standardized_entities.values()) 128 | sorted_standards = sorted(standard_forms, key=len) 129 | 130 | for i, entity1 in enumerate(sorted_standards): 131 | e1_words = set(entity1.split()) 132 | 133 | for entity2 in sorted_standards[i+1:]: 134 | if entity1 == entity2: 135 | continue 136 | 137 | # Check if one entity is a subset of the other 138 | e2_words = set(entity2.split()) 139 | 140 | # If one entity contains all words from the other 141 | if e1_words.issubset(e2_words) and len(e1_words) > 0: 142 | # The shorter one is likely the more general concept 143 | additional_standardizations[entity2] = entity1 144 | elif e2_words.issubset(e1_words) and len(e2_words) > 0: 145 | additional_standardizations[entity1] = entity2 146 | else: 147 | # Check for stemming/root similarities 148 | stems1 = {word[:4] for word in e1_words if len(word) > 4} 149 | stems2 = {word[:4] for word in e2_words if len(word) > 4} 150 | 151 | shared_stems = stems1.intersection(stems2) 152 | 153 | if shared_stems and (len(shared_stems) / max(len(stems1), len(stems2))) > 0.5: 154 | # Use the shorter entity as the standard 155 | if len(entity1) <= len(entity2): 156 | additional_standardizations[entity2] = entity1 157 | else: 158 | additional_standardizations[entity1] = entity2 159 | 160 | # Apply additional standardizations 161 | for entity, standard in additional_standardizations.items(): 162 | standardized_entities[entity] = standard 163 | 164 | # 5. Apply standardization to all triples 165 | standardized_triples = [] 166 | for triple in valid_triples: 167 | subj_lower = triple["subject"].lower() 168 | obj_lower = triple["object"].lower() 169 | 170 | standardized_triple = { 171 | "subject": standardized_entities.get(subj_lower, triple["subject"]), 172 | "predicate": limit_predicate_length(triple["predicate"]), 173 | "object": standardized_entities.get(obj_lower, triple["object"]), 174 | "chunk": triple.get("chunk", 0) 175 | } 176 | standardized_triples.append(standardized_triple) 177 | 178 | # 6. Optional: Use LLM to help with entity resolution for ambiguous cases 179 | if config.get("standardization", {}).get("use_llm_for_entities", False): 180 | standardized_triples = _resolve_entities_with_llm(standardized_triples, config) 181 | 182 | # 7. Filter out self-referencing triples 183 | filtered_triples = [triple for triple in standardized_triples if triple["subject"] != triple["object"]] 184 | if len(filtered_triples) < len(standardized_triples): 185 | print(f"Removed {len(standardized_triples) - len(filtered_triples)} self-referencing triples") 186 | 187 | print(f"Standardized {len(all_entities)} entities into {len(set(standardized_entities.values()))} standard forms") 188 | return filtered_triples 189 | 190 | def infer_relationships(triples, config): 191 | """ 192 | Infer additional relationships between entities to reduce isolated communities. 193 | 194 | Args: 195 | triples: List of dictionaries with standardized entity names 196 | config: Configuration dictionary 197 | 198 | Returns: 199 | List of triples with additional inferred relationships 200 | """ 201 | if not triples or len(triples) < 2: 202 | return triples 203 | 204 | print("Inferring additional relationships between entities...") 205 | 206 | # Validate input triples to ensure they have the required fields 207 | valid_triples = [] 208 | invalid_count = 0 209 | 210 | for triple in triples: 211 | if isinstance(triple, dict) and "subject" in triple and "predicate" in triple and "object" in triple: 212 | valid_triples.append(triple) 213 | else: 214 | invalid_count += 1 215 | 216 | if invalid_count > 0: 217 | print(f"Warning: Filtered out {invalid_count} invalid triples missing required fields") 218 | 219 | if not valid_triples: 220 | print("Error: No valid triples found for relationship inference") 221 | return [] 222 | 223 | # Create a graph representation for easier traversal 224 | graph = defaultdict(set) 225 | all_entities = set() 226 | for triple in valid_triples: 227 | subj = triple["subject"] 228 | obj = triple["object"] 229 | graph[subj].add(obj) 230 | all_entities.add(subj) 231 | all_entities.add(obj) 232 | 233 | # Find disconnected communities 234 | communities = _identify_communities(graph) 235 | print(f"Identified {len(communities)} disconnected communities in the graph") 236 | 237 | new_triples = [] 238 | 239 | # Use LLM to infer relationships between isolated communities if configured 240 | if config.get("inference", {}).get("use_llm_for_inference", True): 241 | # Infer relationships between different communities 242 | community_triples = _infer_relationships_with_llm(valid_triples, communities, config) 243 | if community_triples: 244 | new_triples.extend(community_triples) 245 | 246 | # Infer relationships within the same communities for semantically related entities 247 | within_community_triples = _infer_within_community_relationships(valid_triples, communities, config) 248 | if within_community_triples: 249 | new_triples.extend(within_community_triples) 250 | 251 | # Apply transitive inference rules 252 | transitive_triples = _apply_transitive_inference(valid_triples, graph) 253 | if transitive_triples: 254 | new_triples.extend(transitive_triples) 255 | 256 | # Infer relationships based on lexical similarity 257 | lexical_triples = _infer_relationships_by_lexical_similarity(all_entities, valid_triples) 258 | if lexical_triples: 259 | new_triples.extend(lexical_triples) 260 | 261 | # Add new triples to the original set 262 | if new_triples: 263 | valid_triples.extend(new_triples) 264 | 265 | # De-duplicate triples 266 | unique_triples = _deduplicate_triples(valid_triples) 267 | 268 | # Final pass: ensure all predicates follow the 3-word limit 269 | for triple in unique_triples: 270 | triple["predicate"] = limit_predicate_length(triple["predicate"]) 271 | 272 | # Filter out self-referencing triples 273 | filtered_triples = [triple for triple in unique_triples if triple["subject"] != triple["object"]] 274 | if len(filtered_triples) < len(unique_triples): 275 | print(f"Removed {len(unique_triples) - len(filtered_triples)} self-referencing triples") 276 | 277 | print(f"Added {len(filtered_triples) - len(triples)} inferred relationships") 278 | return filtered_triples 279 | 280 | def _identify_communities(graph): 281 | """ 282 | Identify disconnected communities in the graph. 283 | 284 | Args: 285 | graph: Dictionary representing the graph structure 286 | 287 | Returns: 288 | List of sets, where each set contains nodes in a community 289 | """ 290 | # Get all nodes 291 | all_nodes = set(graph.keys()).union(*[graph[node] for node in graph]) 292 | 293 | # Track visited nodes 294 | visited = set() 295 | communities = [] 296 | 297 | # Depth-first search to find connected components 298 | def dfs(node, community): 299 | visited.add(node) 300 | community.add(node) 301 | 302 | # Visit outgoing edges 303 | for neighbor in graph.get(node, []): 304 | if neighbor not in visited: 305 | dfs(neighbor, community) 306 | 307 | # Visit incoming edges (we need to check all nodes) 308 | for source, targets in graph.items(): 309 | if node in targets and source not in visited: 310 | dfs(source, community) 311 | 312 | # Find all communities 313 | for node in all_nodes: 314 | if node not in visited: 315 | community = set() 316 | dfs(node, community) 317 | communities.append(community) 318 | 319 | return communities 320 | 321 | def _apply_transitive_inference(triples, graph): 322 | """ 323 | Apply transitive inference to find new relationships. 324 | 325 | Args: 326 | triples: List of triple dictionaries 327 | graph: Dictionary representing the graph structure 328 | 329 | Returns: 330 | List of new inferred triples 331 | """ 332 | new_triples = [] 333 | 334 | # Predicates by subject-object pairs 335 | predicates = {} 336 | for triple in triples: 337 | key = (triple["subject"], triple["object"]) 338 | predicates[key] = triple["predicate"] 339 | 340 | # Find transitive relationships: A -> B -> C implies A -> C 341 | for subj in graph: 342 | for mid in graph[subj]: 343 | for obj in graph.get(mid, []): 344 | # Only consider paths where A->B->C and A!=C 345 | if subj != obj and (subj, obj) not in predicates: 346 | # Create a new predicate combining the two relationships 347 | pred1 = predicates.get((subj, mid), "relates to") 348 | pred2 = predicates.get((mid, obj), "relates to") 349 | 350 | # Generate a new predicate based on the transitive relationship 351 | new_pred = f"indirectly {pred1}" if pred1 == pred2 else f"{pred1} via {mid}" 352 | 353 | # Add the new transitive relationship 354 | new_triples.append({ 355 | "subject": subj, 356 | "predicate": limit_predicate_length(new_pred), 357 | "object": obj, 358 | "inferred": True # Mark as inferred 359 | }) 360 | 361 | return new_triples 362 | 363 | def _deduplicate_triples(triples): 364 | """ 365 | Remove duplicate triples, keeping the original (non-inferred) ones. 366 | 367 | Args: 368 | triples: List of triple dictionaries 369 | 370 | Returns: 371 | List of unique triples 372 | """ 373 | # Use tuple of (subject, predicate, object) as key 374 | unique_triples = {} 375 | 376 | for triple in triples: 377 | key = (triple["subject"], triple["predicate"], triple["object"]) 378 | # Keep original triples (not inferred) when duplicates exist 379 | if key not in unique_triples or not triple.get("inferred", False): 380 | unique_triples[key] = triple 381 | 382 | return list(unique_triples.values()) 383 | 384 | def _resolve_entities_with_llm(triples, config): 385 | """ 386 | Use LLM to help resolve entity references and standardize entity names. 387 | 388 | Args: 389 | triples: List of triples with potentially non-standardized entities 390 | config: Configuration dictionary 391 | 392 | Returns: 393 | List of triples with LLM-assisted entity standardization 394 | """ 395 | # Extract all unique entities 396 | all_entities = set() 397 | for triple in triples: 398 | all_entities.add(triple["subject"]) 399 | all_entities.add(triple["object"]) 400 | 401 | # If there are too many entities, limit to the most frequent ones 402 | if len(all_entities) > 100: 403 | # Count entity occurrences 404 | entity_counts = defaultdict(int) 405 | for triple in triples: 406 | entity_counts[triple["subject"]] += 1 407 | entity_counts[triple["object"]] += 1 408 | 409 | # Keep only the top 100 most frequent entities 410 | all_entities = {entity for entity, count in 411 | sorted(entity_counts.items(), key=lambda x: -x[1])[:100]} 412 | 413 | # Prepare prompt for LLM 414 | entity_list = "\n".join(sorted(all_entities)) 415 | system_prompt = ENTITY_RESOLUTION_SYSTEM_PROMPT 416 | user_prompt = get_entity_resolution_user_prompt(entity_list) 417 | 418 | try: 419 | # LLM configuration 420 | model = config["llm"]["model"] 421 | api_key = config["llm"]["api_key"] 422 | max_tokens = config["llm"]["max_tokens"] 423 | temperature = config["llm"]["temperature"] 424 | base_url = config["llm"]["base_url"] 425 | 426 | # Call LLM 427 | response = call_llm(model, user_prompt, api_key, system_prompt, max_tokens, temperature, base_url) 428 | 429 | # Extract JSON mapping 430 | import json 431 | from src.knowledge_graph.llm import extract_json_from_text 432 | 433 | entity_mapping = extract_json_from_text(response) 434 | 435 | if entity_mapping and isinstance(entity_mapping, dict): 436 | # Apply the mapping to standardize entities 437 | entity_to_standard = {} 438 | for standard, variants in entity_mapping.items(): 439 | for variant in variants: 440 | entity_to_standard[variant] = standard 441 | # Also map the standard form to itself 442 | entity_to_standard[standard] = standard 443 | 444 | # Apply standardization to triples 445 | for triple in triples: 446 | triple["subject"] = entity_to_standard.get(triple["subject"], triple["subject"]) 447 | triple["object"] = entity_to_standard.get(triple["object"], triple["object"]) 448 | 449 | print(f"Applied LLM-based entity standardization for {len(entity_mapping)} entity groups") 450 | else: 451 | print("Could not extract valid entity mapping from LLM response") 452 | 453 | except Exception as e: 454 | print(f"Error in LLM-based entity resolution: {e}") 455 | 456 | return triples 457 | 458 | def _infer_relationships_with_llm(triples, communities, config): 459 | """ 460 | Use LLM to infer relationships between disconnected communities. 461 | 462 | Args: 463 | triples: List of existing triples 464 | communities: List of community sets 465 | config: Configuration dictionary 466 | 467 | Returns: 468 | List of new inferred triples 469 | """ 470 | # Skip if there's only one community 471 | if len(communities) <= 1: 472 | print("Only one community found, skipping LLM-based relationship inference") 473 | return [] 474 | 475 | # Focus on the largest communities 476 | large_communities = sorted(communities, key=len, reverse=True)[:5] 477 | 478 | # For each pair of large communities, try to infer relationships 479 | new_triples = [] 480 | 481 | for i, comm1 in enumerate(large_communities): 482 | for j, comm2 in enumerate(large_communities): 483 | if i >= j: 484 | continue # Skip self-comparisons and duplicates 485 | 486 | # Select representative entities from each community 487 | rep1 = list(comm1)[:min(5, len(comm1))] 488 | rep2 = list(comm2)[:min(5, len(comm2))] 489 | 490 | # Prepare relevant existing triples for context 491 | context_triples = [] 492 | for triple in triples: 493 | if triple["subject"] in rep1 or triple["subject"] in rep2 or \ 494 | triple["object"] in rep1 or triple["object"] in rep2: 495 | context_triples.append(triple) 496 | 497 | # Limit context size 498 | if len(context_triples) > 20: 499 | context_triples = context_triples[:20] 500 | 501 | # Convert triples to text for prompt 502 | triples_text = "\n".join([ 503 | f"{t['subject']} {t['predicate']} {t['object']}" 504 | for t in context_triples 505 | ]) 506 | 507 | # Prepare entity lists 508 | entities1 = ", ".join(rep1) 509 | entities2 = ", ".join(rep2) 510 | 511 | # Create prompt for LLM 512 | system_prompt = RELATIONSHIP_INFERENCE_SYSTEM_PROMPT 513 | user_prompt = get_relationship_inference_user_prompt(entities1, entities2, triples_text) 514 | 515 | try: 516 | # LLM configuration 517 | model = config["llm"]["model"] 518 | api_key = config["llm"]["api_key"] 519 | max_tokens = config["llm"]["max_tokens"] 520 | temperature = config["llm"]["temperature"] 521 | base_url = config["llm"]["base_url"] 522 | 523 | # Call LLM 524 | response = call_llm(model, user_prompt, api_key, system_prompt, max_tokens, temperature, base_url) 525 | 526 | # Extract JSON results 527 | from src.knowledge_graph.llm import extract_json_from_text 528 | inferred_triples = extract_json_from_text(response) 529 | 530 | if inferred_triples and isinstance(inferred_triples, list): 531 | # Mark as inferred and add to new triples 532 | for triple in inferred_triples: 533 | if "subject" in triple and "predicate" in triple and "object" in triple: 534 | # Skip self-referencing triples 535 | if triple["subject"] == triple["object"]: 536 | continue 537 | triple["inferred"] = True 538 | triple["predicate"] = limit_predicate_length(triple["predicate"]) 539 | new_triples.append(triple) 540 | 541 | print(f"Inferred {len(new_triples)} new relationships between communities") 542 | else: 543 | print("Could not extract valid inferred relationships from LLM response") 544 | 545 | except Exception as e: 546 | print(f"Error in LLM-based relationship inference: {e}") 547 | 548 | return new_triples 549 | 550 | def _infer_within_community_relationships(triples, communities, config): 551 | """ 552 | Use LLM to infer relationships between entities within the same community. 553 | Focus on entities that might be semantically related but not directly connected. 554 | 555 | Args: 556 | triples: List of existing triples 557 | communities: List of community sets 558 | config: Configuration dictionary 559 | 560 | Returns: 561 | List of new inferred triples 562 | """ 563 | new_triples = [] 564 | 565 | # Process larger communities 566 | for community in sorted(communities, key=len, reverse=True)[:3]: 567 | # Skip small communities 568 | if len(community) < 5: 569 | continue 570 | 571 | # Get all entities in this community 572 | community_entities = list(community) 573 | 574 | # Create an adjacency matrix to identify disconnected entity pairs 575 | connections = {(a, b): False for a in community_entities for b in community_entities if a != b} 576 | 577 | # Mark existing connections 578 | for triple in triples: 579 | if triple["subject"] in community_entities and triple["object"] in community_entities: 580 | connections[(triple["subject"], triple["object"])] = True 581 | 582 | # Find disconnected pairs that might be semantically related 583 | disconnected_pairs = [] 584 | for (a, b), connected in connections.items(): 585 | if not connected and a != b: # Ensure a and b are different entities 586 | # Check for potential semantic relationship (e.g., shared words) 587 | a_words = set(a.lower().split()) 588 | b_words = set(b.lower().split()) 589 | shared_words = a_words.intersection(b_words) 590 | 591 | # If they share words or one is contained in the other, they might be related 592 | if shared_words or a.lower() in b.lower() or b.lower() in a.lower(): 593 | disconnected_pairs.append((a, b)) 594 | 595 | # Limit to the most promising pairs 596 | disconnected_pairs = disconnected_pairs[:10] 597 | 598 | if not disconnected_pairs: 599 | continue 600 | 601 | # Get relevant context 602 | context_triples = [] 603 | entities_of_interest = set() 604 | for a, b in disconnected_pairs: 605 | entities_of_interest.add(a) 606 | entities_of_interest.add(b) 607 | 608 | for triple in triples: 609 | if triple["subject"] in entities_of_interest or triple["object"] in entities_of_interest: 610 | context_triples.append(triple) 611 | 612 | # Limit context size 613 | if len(context_triples) > 20: 614 | context_triples = context_triples[:20] 615 | 616 | # Convert triples to text for prompt 617 | triples_text = "\n".join([ 618 | f"{t['subject']} {t['predicate']} {t['object']}" 619 | for t in context_triples 620 | ]) 621 | 622 | # Create pairs text 623 | pairs_text = "\n".join([f"{a} and {b}" for a, b in disconnected_pairs]) 624 | 625 | # Create prompt for LLM 626 | system_prompt = WITHIN_COMMUNITY_INFERENCE_SYSTEM_PROMPT 627 | user_prompt = get_within_community_inference_user_prompt(pairs_text, triples_text) 628 | 629 | try: 630 | # LLM configuration 631 | model = config["llm"]["model"] 632 | api_key = config["llm"]["api_key"] 633 | max_tokens = config["llm"]["max_tokens"] 634 | temperature = config["llm"]["temperature"] 635 | base_url = config["llm"]["base_url"] 636 | 637 | # Call LLM 638 | response = call_llm(model, user_prompt, api_key, system_prompt, max_tokens, temperature, base_url) 639 | 640 | # Extract JSON results 641 | from src.knowledge_graph.llm import extract_json_from_text 642 | inferred_triples = extract_json_from_text(response) 643 | 644 | if inferred_triples and isinstance(inferred_triples, list): 645 | # Mark as inferred and add to new triples 646 | for triple in inferred_triples: 647 | if "subject" in triple and "predicate" in triple and "object" in triple: 648 | # Skip self-referencing triples 649 | if triple["subject"] == triple["object"]: 650 | continue 651 | triple["inferred"] = True 652 | triple["predicate"] = limit_predicate_length(triple["predicate"]) 653 | new_triples.append(triple) 654 | 655 | print(f"Inferred {len(inferred_triples)} new relationships within communities") 656 | else: 657 | print("Could not extract valid inferred relationships from LLM response") 658 | 659 | except Exception as e: 660 | print(f"Error in LLM-based relationship inference within communities: {e}") 661 | 662 | return new_triples 663 | 664 | def _infer_relationships_by_lexical_similarity(entities, triples): 665 | """ 666 | Infer relationships between entities based on lexical similarity. 667 | This can help connect entities like "capitalism" and "capitalist decay". 668 | 669 | Args: 670 | entities: Set of all entities 671 | triples: List of existing triples 672 | 673 | Returns: 674 | List of new inferred triples 675 | """ 676 | new_triples = [] 677 | processed_pairs = set() 678 | 679 | # Create a dictionary to track existing relationships 680 | existing_relationships = set() 681 | for triple in triples: 682 | existing_relationships.add((triple["subject"], triple["object"])) 683 | 684 | # Check for lexical similarity between entities 685 | entities_list = list(entities) 686 | for i, entity1 in enumerate(entities_list): 687 | for entity2 in entities_list[i+1:]: 688 | # Skip if already connected 689 | if (entity1, entity2) in existing_relationships or (entity2, entity1) in existing_relationships: 690 | continue 691 | 692 | # Skip if already processed this pair 693 | if (entity1, entity2) in processed_pairs or (entity2, entity1) in processed_pairs: 694 | continue 695 | 696 | # Skip if the entities are the same (prevent self-reference) 697 | if entity1 == entity2: 698 | continue 699 | 700 | processed_pairs.add((entity1, entity2)) 701 | 702 | # Check for containment or shared roots 703 | e1_lower = entity1.lower() 704 | e2_lower = entity2.lower() 705 | 706 | # Simple word overlap check 707 | e1_words = set(e1_lower.split()) 708 | e2_words = set(e2_lower.split()) 709 | shared_words = e1_words.intersection(e2_words) 710 | 711 | if shared_words: 712 | # Create relationships based on shared words 713 | main_shared = max(shared_words, key=len) 714 | 715 | if len(main_shared) >= 4: # Only consider significant shared words 716 | if e1_lower.startswith(main_shared) and not e2_lower.startswith(main_shared): 717 | new_triples.append({ 718 | "subject": entity2, 719 | "predicate": "relates to", 720 | "object": entity1, 721 | "inferred": True 722 | }) 723 | elif e2_lower.startswith(main_shared) and not e1_lower.startswith(main_shared): 724 | new_triples.append({ 725 | "subject": entity1, 726 | "predicate": "relates to", 727 | "object": entity2, 728 | "inferred": True 729 | }) 730 | else: 731 | new_triples.append({ 732 | "subject": entity1, 733 | "predicate": "related to", 734 | "object": entity2, 735 | "inferred": True 736 | }) 737 | 738 | # Check if one entity contains the other 739 | elif e1_lower in e2_lower: 740 | new_triples.append({ 741 | "subject": entity2, 742 | "predicate": "is type of", 743 | "object": entity1, 744 | "inferred": True 745 | }) 746 | elif e2_lower in e1_lower: 747 | new_triples.append({ 748 | "subject": entity1, 749 | "predicate": "is type of", 750 | "object": entity2, 751 | "inferred": True 752 | }) 753 | 754 | print(f"Inferred {len(new_triples)} relationships based on lexical similarity") 755 | return new_triples -------------------------------------------------------------------------------- /src/knowledge_graph/templates/graph_template.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
12 |
13 | Edge Types: 14 | 15 | Extracted 16 | 17 | Inferred 18 |
19 |
20 | 21 | 41 | 42 | 142 | 143 | 166 |
167 | 172 |
173 | 174 | 318 | 319 | -------------------------------------------------------------------------------- /uv.lock: -------------------------------------------------------------------------------- 1 | version = 1 2 | revision = 1 3 | requires-python = ">=3.12" 4 | 5 | [[package]] 6 | name = "ai-knowledge-graph" 7 | version = "0.6.1" 8 | source = { editable = "." } 9 | dependencies = [ 10 | { name = "networkx" }, 11 | { name = "python-louvain" }, 12 | { name = "pyvis" }, 13 | { name = "pyvis-network" }, 14 | { name = "requests" }, 15 | { name = "tomli" }, 16 | ] 17 | 18 | [package.metadata] 19 | requires-dist = [ 20 | { name = "networkx", specifier = ">=3.4.2" }, 21 | { name = "python-louvain", specifier = ">=0.16" }, 22 | { name = "pyvis", specifier = ">=0.3.2" }, 23 | { name = "pyvis-network", specifier = ">=0.0.6" }, 24 | { name = "requests", specifier = ">=2.32.3" }, 25 | { name = "tomli", specifier = ">=2.2.1" }, 26 | ] 27 | 28 | [[package]] 29 | name = "asttokens" 30 | version = "3.0.0" 31 | source = { registry = "https://pypi.org/simple" } 32 | sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } 33 | wheels = [ 34 | { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, 35 | ] 36 | 37 | [[package]] 38 | name = "certifi" 39 | version = "2025.1.31" 40 | source = { registry = "https://pypi.org/simple" } 41 | sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } 42 | wheels = [ 43 | { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, 44 | ] 45 | 46 | [[package]] 47 | name = "charset-normalizer" 48 | version = "3.4.1" 49 | source = { registry = "https://pypi.org/simple" } 50 | sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } 51 | wheels = [ 52 | { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, 53 | { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, 54 | { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, 55 | { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, 56 | { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, 57 | { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, 58 | { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, 59 | { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, 60 | { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, 61 | { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, 62 | { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, 63 | { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, 64 | { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, 65 | { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, 66 | { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, 67 | { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, 68 | { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, 69 | { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, 70 | { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, 71 | { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, 72 | { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, 73 | { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, 74 | { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, 75 | { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, 76 | { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, 77 | { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, 78 | { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, 79 | ] 80 | 81 | [[package]] 82 | name = "colorama" 83 | version = "0.4.6" 84 | source = { registry = "https://pypi.org/simple" } 85 | sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } 86 | wheels = [ 87 | { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, 88 | ] 89 | 90 | [[package]] 91 | name = "decorator" 92 | version = "5.2.1" 93 | source = { registry = "https://pypi.org/simple" } 94 | sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711 } 95 | wheels = [ 96 | { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190 }, 97 | ] 98 | 99 | [[package]] 100 | name = "executing" 101 | version = "2.2.0" 102 | source = { registry = "https://pypi.org/simple" } 103 | sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } 104 | wheels = [ 105 | { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, 106 | ] 107 | 108 | [[package]] 109 | name = "idna" 110 | version = "3.10" 111 | source = { registry = "https://pypi.org/simple" } 112 | sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } 113 | wheels = [ 114 | { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, 115 | ] 116 | 117 | [[package]] 118 | name = "ipython" 119 | version = "9.0.2" 120 | source = { registry = "https://pypi.org/simple" } 121 | dependencies = [ 122 | { name = "colorama", marker = "sys_platform == 'win32'" }, 123 | { name = "decorator" }, 124 | { name = "ipython-pygments-lexers" }, 125 | { name = "jedi" }, 126 | { name = "matplotlib-inline" }, 127 | { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, 128 | { name = "prompt-toolkit" }, 129 | { name = "pygments" }, 130 | { name = "stack-data" }, 131 | { name = "traitlets" }, 132 | ] 133 | sdist = { url = "https://files.pythonhosted.org/packages/7d/ce/012a0f40ca58a966f87a6e894d6828e2817657cbdf522b02a5d3a87d92ce/ipython-9.0.2.tar.gz", hash = "sha256:ec7b479e3e5656bf4f58c652c120494df1820f4f28f522fb7ca09e213c2aab52", size = 4366102 } 134 | wheels = [ 135 | { url = "https://files.pythonhosted.org/packages/20/3a/917cb9e72f4e1a4ea13c862533205ae1319bd664119189ee5cc9e4e95ebf/ipython-9.0.2-py3-none-any.whl", hash = "sha256:143ef3ea6fb1e1bffb4c74b114051de653ffb7737a3f7ab1670e657ca6ae8c44", size = 600524 }, 136 | ] 137 | 138 | [[package]] 139 | name = "ipython-pygments-lexers" 140 | version = "1.1.1" 141 | source = { registry = "https://pypi.org/simple" } 142 | dependencies = [ 143 | { name = "pygments" }, 144 | ] 145 | sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393 } 146 | wheels = [ 147 | { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074 }, 148 | ] 149 | 150 | [[package]] 151 | name = "jedi" 152 | version = "0.19.2" 153 | source = { registry = "https://pypi.org/simple" } 154 | dependencies = [ 155 | { name = "parso" }, 156 | ] 157 | sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } 158 | wheels = [ 159 | { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, 160 | ] 161 | 162 | [[package]] 163 | name = "jinja2" 164 | version = "3.1.6" 165 | source = { registry = "https://pypi.org/simple" } 166 | dependencies = [ 167 | { name = "markupsafe" }, 168 | ] 169 | sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 } 170 | wheels = [ 171 | { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 }, 172 | ] 173 | 174 | [[package]] 175 | name = "jsonpickle" 176 | version = "4.0.2" 177 | source = { registry = "https://pypi.org/simple" } 178 | sdist = { url = "https://files.pythonhosted.org/packages/2c/6f/e73cb91f774ad1077929a29b3a4af6e5603beb446cb99a55debe240664c3/jsonpickle-4.0.2.tar.gz", hash = "sha256:3e650b9853adcdab9d9d62a88412b6d36e9a59ba423b01cacf0cd4ee80733aca", size = 315121 } 179 | wheels = [ 180 | { url = "https://files.pythonhosted.org/packages/5e/c8/e9dc0af97ce006616abbd7f522d0b45ac322a77f72bb29d901b5114a49ba/jsonpickle-4.0.2-py3-none-any.whl", hash = "sha256:cd3c90d32a68dcaa7f0e4b918bda7d4bb61f3c03b182d82dae2caf9ded0ab6b3", size = 46325 }, 181 | ] 182 | 183 | [[package]] 184 | name = "markupsafe" 185 | version = "3.0.2" 186 | source = { registry = "https://pypi.org/simple" } 187 | sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } 188 | wheels = [ 189 | { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, 190 | { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, 191 | { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, 192 | { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, 193 | { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, 194 | { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, 195 | { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, 196 | { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, 197 | { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, 198 | { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, 199 | { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, 200 | { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, 201 | { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, 202 | { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, 203 | { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, 204 | { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, 205 | { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, 206 | { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, 207 | { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, 208 | { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, 209 | { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, 210 | { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, 211 | { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, 212 | { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, 213 | { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, 214 | { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, 215 | { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, 216 | { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, 217 | { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, 218 | { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, 219 | ] 220 | 221 | [[package]] 222 | name = "matplotlib-inline" 223 | version = "0.1.7" 224 | source = { registry = "https://pypi.org/simple" } 225 | dependencies = [ 226 | { name = "traitlets" }, 227 | ] 228 | sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } 229 | wheels = [ 230 | { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, 231 | ] 232 | 233 | [[package]] 234 | name = "networkx" 235 | version = "3.4.2" 236 | source = { registry = "https://pypi.org/simple" } 237 | sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368 } 238 | wheels = [ 239 | { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263 }, 240 | ] 241 | 242 | [[package]] 243 | name = "numpy" 244 | version = "2.2.4" 245 | source = { registry = "https://pypi.org/simple" } 246 | sdist = { url = "https://files.pythonhosted.org/packages/e1/78/31103410a57bc2c2b93a3597340a8119588571f6a4539067546cb9a0bfac/numpy-2.2.4.tar.gz", hash = "sha256:9ba03692a45d3eef66559efe1d1096c4b9b75c0986b5dff5530c378fb8331d4f", size = 20270701 } 247 | wheels = [ 248 | { url = "https://files.pythonhosted.org/packages/a2/30/182db21d4f2a95904cec1a6f779479ea1ac07c0647f064dea454ec650c42/numpy-2.2.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a7b9084668aa0f64e64bd00d27ba5146ef1c3a8835f3bd912e7a9e01326804c4", size = 20947156 }, 249 | { url = "https://files.pythonhosted.org/packages/24/6d/9483566acfbda6c62c6bc74b6e981c777229d2af93c8eb2469b26ac1b7bc/numpy-2.2.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dbe512c511956b893d2dacd007d955a3f03d555ae05cfa3ff1c1ff6df8851854", size = 14133092 }, 250 | { url = "https://files.pythonhosted.org/packages/27/f6/dba8a258acbf9d2bed2525cdcbb9493ef9bae5199d7a9cb92ee7e9b2aea6/numpy-2.2.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:bb649f8b207ab07caebba230d851b579a3c8711a851d29efe15008e31bb4de24", size = 5163515 }, 251 | { url = "https://files.pythonhosted.org/packages/62/30/82116199d1c249446723c68f2c9da40d7f062551036f50b8c4caa42ae252/numpy-2.2.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:f34dc300df798742b3d06515aa2a0aee20941c13579d7a2f2e10af01ae4901ee", size = 6696558 }, 252 | { url = "https://files.pythonhosted.org/packages/0e/b2/54122b3c6df5df3e87582b2e9430f1bdb63af4023c739ba300164c9ae503/numpy-2.2.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3f7ac96b16955634e223b579a3e5798df59007ca43e8d451a0e6a50f6bfdfba", size = 14084742 }, 253 | { url = "https://files.pythonhosted.org/packages/02/e2/e2cbb8d634151aab9528ef7b8bab52ee4ab10e076509285602c2a3a686e0/numpy-2.2.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f92084defa704deadd4e0a5ab1dc52d8ac9e8a8ef617f3fbb853e79b0ea3592", size = 16134051 }, 254 | { url = "https://files.pythonhosted.org/packages/8e/21/efd47800e4affc993e8be50c1b768de038363dd88865920439ef7b422c60/numpy-2.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4e84a6283b36632e2a5b56e121961f6542ab886bc9e12f8f9818b3c266bfbb", size = 15578972 }, 255 | { url = "https://files.pythonhosted.org/packages/04/1e/f8bb88f6157045dd5d9b27ccf433d016981032690969aa5c19e332b138c0/numpy-2.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:11c43995255eb4127115956495f43e9343736edb7fcdb0d973defd9de14cd84f", size = 17898106 }, 256 | { url = "https://files.pythonhosted.org/packages/2b/93/df59a5a3897c1f036ae8ff845e45f4081bb06943039ae28a3c1c7c780f22/numpy-2.2.4-cp312-cp312-win32.whl", hash = "sha256:65ef3468b53269eb5fdb3a5c09508c032b793da03251d5f8722b1194f1790c00", size = 6311190 }, 257 | { url = "https://files.pythonhosted.org/packages/46/69/8c4f928741c2a8efa255fdc7e9097527c6dc4e4df147e3cadc5d9357ce85/numpy-2.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:2aad3c17ed2ff455b8eaafe06bcdae0062a1db77cb99f4b9cbb5f4ecb13c5146", size = 12644305 }, 258 | { url = "https://files.pythonhosted.org/packages/2a/d0/bd5ad792e78017f5decfb2ecc947422a3669a34f775679a76317af671ffc/numpy-2.2.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cf4e5c6a278d620dee9ddeb487dc6a860f9b199eadeecc567f777daace1e9e7", size = 20933623 }, 259 | { url = "https://files.pythonhosted.org/packages/c3/bc/2b3545766337b95409868f8e62053135bdc7fa2ce630aba983a2aa60b559/numpy-2.2.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1974afec0b479e50438fc3648974268f972e2d908ddb6d7fb634598cdb8260a0", size = 14148681 }, 260 | { url = "https://files.pythonhosted.org/packages/6a/70/67b24d68a56551d43a6ec9fe8c5f91b526d4c1a46a6387b956bf2d64744e/numpy-2.2.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:79bd5f0a02aa16808fcbc79a9a376a147cc1045f7dfe44c6e7d53fa8b8a79392", size = 5148759 }, 261 | { url = "https://files.pythonhosted.org/packages/1c/8b/e2fc8a75fcb7be12d90b31477c9356c0cbb44abce7ffb36be39a0017afad/numpy-2.2.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:3387dd7232804b341165cedcb90694565a6015433ee076c6754775e85d86f1fc", size = 6683092 }, 262 | { url = "https://files.pythonhosted.org/packages/13/73/41b7b27f169ecf368b52533edb72e56a133f9e86256e809e169362553b49/numpy-2.2.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f527d8fdb0286fd2fd97a2a96c6be17ba4232da346931d967a0630050dfd298", size = 14081422 }, 263 | { url = "https://files.pythonhosted.org/packages/4b/04/e208ff3ae3ddfbafc05910f89546382f15a3f10186b1f56bd99f159689c2/numpy-2.2.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bce43e386c16898b91e162e5baaad90c4b06f9dcbe36282490032cec98dc8ae7", size = 16132202 }, 264 | { url = "https://files.pythonhosted.org/packages/fe/bc/2218160574d862d5e55f803d88ddcad88beff94791f9c5f86d67bd8fbf1c/numpy-2.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31504f970f563d99f71a3512d0c01a645b692b12a63630d6aafa0939e52361e6", size = 15573131 }, 265 | { url = "https://files.pythonhosted.org/packages/a5/78/97c775bc4f05abc8a8426436b7cb1be806a02a2994b195945600855e3a25/numpy-2.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:81413336ef121a6ba746892fad881a83351ee3e1e4011f52e97fba79233611fd", size = 17894270 }, 266 | { url = "https://files.pythonhosted.org/packages/b9/eb/38c06217a5f6de27dcb41524ca95a44e395e6a1decdc0c99fec0832ce6ae/numpy-2.2.4-cp313-cp313-win32.whl", hash = "sha256:f486038e44caa08dbd97275a9a35a283a8f1d2f0ee60ac260a1790e76660833c", size = 6308141 }, 267 | { url = "https://files.pythonhosted.org/packages/52/17/d0dd10ab6d125c6d11ffb6dfa3423c3571befab8358d4f85cd4471964fcd/numpy-2.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:207a2b8441cc8b6a2a78c9ddc64d00d20c303d79fba08c577752f080c4007ee3", size = 12636885 }, 268 | { url = "https://files.pythonhosted.org/packages/fa/e2/793288ede17a0fdc921172916efb40f3cbc2aa97e76c5c84aba6dc7e8747/numpy-2.2.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8120575cb4882318c791f839a4fd66161a6fa46f3f0a5e613071aae35b5dd8f8", size = 20961829 }, 269 | { url = "https://files.pythonhosted.org/packages/3a/75/bb4573f6c462afd1ea5cbedcc362fe3e9bdbcc57aefd37c681be1155fbaa/numpy-2.2.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a761ba0fa886a7bb33c6c8f6f20213735cb19642c580a931c625ee377ee8bd39", size = 14161419 }, 270 | { url = "https://files.pythonhosted.org/packages/03/68/07b4cd01090ca46c7a336958b413cdbe75002286295f2addea767b7f16c9/numpy-2.2.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:ac0280f1ba4a4bfff363a99a6aceed4f8e123f8a9b234c89140f5e894e452ecd", size = 5196414 }, 271 | { url = "https://files.pythonhosted.org/packages/a5/fd/d4a29478d622fedff5c4b4b4cedfc37a00691079623c0575978d2446db9e/numpy-2.2.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:879cf3a9a2b53a4672a168c21375166171bc3932b7e21f622201811c43cdd3b0", size = 6709379 }, 272 | { url = "https://files.pythonhosted.org/packages/41/78/96dddb75bb9be730b87c72f30ffdd62611aba234e4e460576a068c98eff6/numpy-2.2.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f05d4198c1bacc9124018109c5fba2f3201dbe7ab6e92ff100494f236209c960", size = 14051725 }, 273 | { url = "https://files.pythonhosted.org/packages/00/06/5306b8199bffac2a29d9119c11f457f6c7d41115a335b78d3f86fad4dbe8/numpy-2.2.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f085ce2e813a50dfd0e01fbfc0c12bbe5d2063d99f8b29da30e544fb6483b8", size = 16101638 }, 274 | { url = "https://files.pythonhosted.org/packages/fa/03/74c5b631ee1ded596945c12027649e6344614144369fd3ec1aaced782882/numpy-2.2.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:92bda934a791c01d6d9d8e038363c50918ef7c40601552a58ac84c9613a665bc", size = 15571717 }, 275 | { url = "https://files.pythonhosted.org/packages/cb/dc/4fc7c0283abe0981e3b89f9b332a134e237dd476b0c018e1e21083310c31/numpy-2.2.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ee4d528022f4c5ff67332469e10efe06a267e32f4067dc76bb7e2cddf3cd25ff", size = 17879998 }, 276 | { url = "https://files.pythonhosted.org/packages/e5/2b/878576190c5cfa29ed896b518cc516aecc7c98a919e20706c12480465f43/numpy-2.2.4-cp313-cp313t-win32.whl", hash = "sha256:05c076d531e9998e7e694c36e8b349969c56eadd2cdcd07242958489d79a7286", size = 6366896 }, 277 | { url = "https://files.pythonhosted.org/packages/3e/05/eb7eec66b95cf697f08c754ef26c3549d03ebd682819f794cb039574a0a6/numpy-2.2.4-cp313-cp313t-win_amd64.whl", hash = "sha256:188dcbca89834cc2e14eb2f106c96d6d46f200fe0200310fc29089657379c58d", size = 12739119 }, 278 | ] 279 | 280 | [[package]] 281 | name = "pandas" 282 | version = "2.2.3" 283 | source = { registry = "https://pypi.org/simple" } 284 | dependencies = [ 285 | { name = "numpy" }, 286 | { name = "python-dateutil" }, 287 | { name = "pytz" }, 288 | { name = "tzdata" }, 289 | ] 290 | sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213 } 291 | wheels = [ 292 | { url = "https://files.pythonhosted.org/packages/17/a3/fb2734118db0af37ea7433f57f722c0a56687e14b14690edff0cdb4b7e58/pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9", size = 12529893 }, 293 | { url = "https://files.pythonhosted.org/packages/e1/0c/ad295fd74bfac85358fd579e271cded3ac969de81f62dd0142c426b9da91/pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4", size = 11363475 }, 294 | { url = "https://files.pythonhosted.org/packages/c6/2a/4bba3f03f7d07207481fed47f5b35f556c7441acddc368ec43d6643c5777/pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3", size = 15188645 }, 295 | { url = "https://files.pythonhosted.org/packages/38/f8/d8fddee9ed0d0c0f4a2132c1dfcf0e3e53265055da8df952a53e7eaf178c/pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319", size = 12739445 }, 296 | { url = "https://files.pythonhosted.org/packages/20/e8/45a05d9c39d2cea61ab175dbe6a2de1d05b679e8de2011da4ee190d7e748/pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8", size = 16359235 }, 297 | { url = "https://files.pythonhosted.org/packages/1d/99/617d07a6a5e429ff90c90da64d428516605a1ec7d7bea494235e1c3882de/pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a", size = 14056756 }, 298 | { url = "https://files.pythonhosted.org/packages/29/d4/1244ab8edf173a10fd601f7e13b9566c1b525c4f365d6bee918e68381889/pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13", size = 11504248 }, 299 | { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, 300 | { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, 301 | { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, 302 | { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, 303 | { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, 304 | { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, 305 | { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, 306 | { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, 307 | { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, 308 | { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, 309 | { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, 310 | { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, 311 | { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, 312 | ] 313 | 314 | [[package]] 315 | name = "parso" 316 | version = "0.8.4" 317 | source = { registry = "https://pypi.org/simple" } 318 | sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } 319 | wheels = [ 320 | { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, 321 | ] 322 | 323 | [[package]] 324 | name = "pexpect" 325 | version = "4.9.0" 326 | source = { registry = "https://pypi.org/simple" } 327 | dependencies = [ 328 | { name = "ptyprocess" }, 329 | ] 330 | sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } 331 | wheels = [ 332 | { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, 333 | ] 334 | 335 | [[package]] 336 | name = "prompt-toolkit" 337 | version = "3.0.50" 338 | source = { registry = "https://pypi.org/simple" } 339 | dependencies = [ 340 | { name = "wcwidth" }, 341 | ] 342 | sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } 343 | wheels = [ 344 | { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, 345 | ] 346 | 347 | [[package]] 348 | name = "ptyprocess" 349 | version = "0.7.0" 350 | source = { registry = "https://pypi.org/simple" } 351 | sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } 352 | wheels = [ 353 | { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, 354 | ] 355 | 356 | [[package]] 357 | name = "pure-eval" 358 | version = "0.2.3" 359 | source = { registry = "https://pypi.org/simple" } 360 | sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } 361 | wheels = [ 362 | { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, 363 | ] 364 | 365 | [[package]] 366 | name = "pygments" 367 | version = "2.19.1" 368 | source = { registry = "https://pypi.org/simple" } 369 | sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } 370 | wheels = [ 371 | { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, 372 | ] 373 | 374 | [[package]] 375 | name = "python-dateutil" 376 | version = "2.9.0.post0" 377 | source = { registry = "https://pypi.org/simple" } 378 | dependencies = [ 379 | { name = "six" }, 380 | ] 381 | sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } 382 | wheels = [ 383 | { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, 384 | ] 385 | 386 | [[package]] 387 | name = "python-louvain" 388 | version = "0.16" 389 | source = { registry = "https://pypi.org/simple" } 390 | dependencies = [ 391 | { name = "networkx" }, 392 | { name = "numpy" }, 393 | ] 394 | sdist = { url = "https://files.pythonhosted.org/packages/7c/0d/8787b021d52eb8764c0bb18ab95f720cf554902044c6a5cb1865daf45763/python-louvain-0.16.tar.gz", hash = "sha256:b7ba2df5002fd28d3ee789a49532baad11fe648e4f2117cf0798e7520a1da56b", size = 204641 } 395 | 396 | [[package]] 397 | name = "pytz" 398 | version = "2025.1" 399 | source = { registry = "https://pypi.org/simple" } 400 | sdist = { url = "https://files.pythonhosted.org/packages/5f/57/df1c9157c8d5a05117e455d66fd7cf6dbc46974f832b1058ed4856785d8a/pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e", size = 319617 } 401 | wheels = [ 402 | { url = "https://files.pythonhosted.org/packages/eb/38/ac33370d784287baa1c3d538978b5e2ea064d4c1b93ffbd12826c190dd10/pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57", size = 507930 }, 403 | ] 404 | 405 | [[package]] 406 | name = "pyvis" 407 | version = "0.3.2" 408 | source = { registry = "https://pypi.org/simple" } 409 | dependencies = [ 410 | { name = "ipython" }, 411 | { name = "jinja2" }, 412 | { name = "jsonpickle" }, 413 | { name = "networkx" }, 414 | ] 415 | wheels = [ 416 | { url = "https://files.pythonhosted.org/packages/ab/4b/e37e4e5d5ee1179694917b445768bdbfb084f5a59ecd38089d3413d4c70f/pyvis-0.3.2-py3-none-any.whl", hash = "sha256:5720c4ca8161dc5d9ab352015723abb7a8bb8fb443edeb07f7a322db34a97555", size = 756038 }, 417 | ] 418 | 419 | [[package]] 420 | name = "pyvis-network" 421 | version = "0.0.6" 422 | source = { registry = "https://pypi.org/simple" } 423 | dependencies = [ 424 | { name = "pandas" }, 425 | ] 426 | sdist = { url = "https://files.pythonhosted.org/packages/80/d0/a891da8eb87276756c6ff0fa9c9506ad61948bc71e547146750b45324f4e/pyvis-network-0.0.6.tar.gz", hash = "sha256:c5dc11cd5e8de2f02b181741b8599ef2388c1d8f6eb65d721163c68d51803781", size = 48432 } 427 | wheels = [ 428 | { url = "https://files.pythonhosted.org/packages/3e/b7/9ed53162e01d69ba5b3465f896c10e41e98ff71b2ee016b28a5e654e120e/pyvis_network-0.0.6-py3-none-any.whl", hash = "sha256:8d110456e1af4f634bf7a3e306077104e9b6d511aa37bab03c9eb152e3bbccf3", size = 49560 }, 429 | ] 430 | 431 | [[package]] 432 | name = "requests" 433 | version = "2.32.3" 434 | source = { registry = "https://pypi.org/simple" } 435 | dependencies = [ 436 | { name = "certifi" }, 437 | { name = "charset-normalizer" }, 438 | { name = "idna" }, 439 | { name = "urllib3" }, 440 | ] 441 | sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } 442 | wheels = [ 443 | { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, 444 | ] 445 | 446 | [[package]] 447 | name = "six" 448 | version = "1.17.0" 449 | source = { registry = "https://pypi.org/simple" } 450 | sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031 } 451 | wheels = [ 452 | { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 }, 453 | ] 454 | 455 | [[package]] 456 | name = "stack-data" 457 | version = "0.6.3" 458 | source = { registry = "https://pypi.org/simple" } 459 | dependencies = [ 460 | { name = "asttokens" }, 461 | { name = "executing" }, 462 | { name = "pure-eval" }, 463 | ] 464 | sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } 465 | wheels = [ 466 | { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, 467 | ] 468 | 469 | [[package]] 470 | name = "tomli" 471 | version = "2.2.1" 472 | source = { registry = "https://pypi.org/simple" } 473 | sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } 474 | wheels = [ 475 | { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, 476 | { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, 477 | { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, 478 | { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, 479 | { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, 480 | { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, 481 | { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, 482 | { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, 483 | { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, 484 | { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, 485 | { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, 486 | { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, 487 | { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, 488 | { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, 489 | { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, 490 | { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, 491 | { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, 492 | { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, 493 | { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, 494 | { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, 495 | { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, 496 | ] 497 | 498 | [[package]] 499 | name = "traitlets" 500 | version = "5.14.3" 501 | source = { registry = "https://pypi.org/simple" } 502 | sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } 503 | wheels = [ 504 | { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, 505 | ] 506 | 507 | [[package]] 508 | name = "tzdata" 509 | version = "2025.1" 510 | source = { registry = "https://pypi.org/simple" } 511 | sdist = { url = "https://files.pythonhosted.org/packages/43/0f/fa4723f22942480be4ca9527bbde8d43f6c3f2fe8412f00e7f5f6746bc8b/tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694", size = 194950 } 512 | wheels = [ 513 | { url = "https://files.pythonhosted.org/packages/0f/dd/84f10e23edd882c6f968c21c2434fe67bd4a528967067515feca9e611e5e/tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639", size = 346762 }, 514 | ] 515 | 516 | [[package]] 517 | name = "urllib3" 518 | version = "2.3.0" 519 | source = { registry = "https://pypi.org/simple" } 520 | sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } 521 | wheels = [ 522 | { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, 523 | ] 524 | 525 | [[package]] 526 | name = "wcwidth" 527 | version = "0.2.13" 528 | source = { registry = "https://pypi.org/simple" } 529 | sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } 530 | wheels = [ 531 | { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, 532 | ] 533 | --------------------------------------------------------------------------------