├── mean_connection_time.png ├── mean_requests_per_second.png ├── mean_tls_handshake_time.png ├── mean_total_response_time.png ├── requirements.txt ├── model.py ├── LICENSE ├── benchmark_analytics.py ├── benchmark_tests.py ├── benchmark.py ├── README.md ├── .gitignore └── factory.py /mean_connection_time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perodriguezl/python-http-libraries-benchmark/HEAD/mean_connection_time.png -------------------------------------------------------------------------------- /mean_requests_per_second.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perodriguezl/python-http-libraries-benchmark/HEAD/mean_requests_per_second.png -------------------------------------------------------------------------------- /mean_tls_handshake_time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perodriguezl/python-http-libraries-benchmark/HEAD/mean_tls_handshake_time.png -------------------------------------------------------------------------------- /mean_total_response_time.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/perodriguezl/python-http-libraries-benchmark/HEAD/mean_total_response_time.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp 2 | httpx 3 | requests 4 | urllib3 5 | pycurl 6 | coverage 7 | pandas 8 | matplotlib 9 | seaborn 10 | niquests -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | class BenchmarkResult: 2 | def __init__(self, requests_per_sec, total_time, avg_conn_time, avg_tls_time=None): 3 | self.requests_per_sec = requests_per_sec 4 | self.total_time = total_time 5 | self.avg_conn_time = avg_conn_time 6 | self.avg_tls_time = avg_tls_time -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Pedro 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /benchmark_analytics.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | import seaborn as sns 4 | 5 | # Load CSV 6 | df = pd.read_csv("benchmark_results.csv") 7 | 8 | # List of packages 9 | packages = ["aiohttp", "httpx", "pycurl", "requests", "urllib3"] 10 | 11 | # Plot configuration 12 | metrics_info = { 13 | "req_sec": ("Mean Requests per Second Across Runs", "Mean Requests/sec", "mean_requests_per_second.png"), 14 | "total": ("Mean Total Response Time Across Runs", "Mean Total Time (s)", "mean_total_response_time.png"), 15 | "conn_avg": ("Mean Connection Time Across Runs", "Mean Connection Time (s)", "mean_connection_time.png"), 16 | "tls_avg": ("Mean TLS Handshake Time Across Runs", "Mean TLS Time (s)", "mean_tls_handshake_time.png"), 17 | } 18 | 19 | 20 | def plot_metric(metric_key, title, ylabel, filename): 21 | plt.figure(figsize=(10, 6)) 22 | for pkg in packages: 23 | col_name = f"{metric_key}_{pkg}" 24 | series = df[col_name] 25 | if metric_key == "tls_avg": 26 | series = series[series != "N/A"] # Remove non-numeric 27 | series = series.astype(float) 28 | mean_value = series.expanding().mean() 29 | sns.lineplot(x=mean_value.index, y=mean_value, label=pkg) 30 | plt.title(title) 31 | plt.xlabel("Run #") 32 | plt.ylabel(ylabel) 33 | plt.legend() 34 | plt.grid(True) 35 | plt.tight_layout() 36 | plt.savefig(filename) 37 | plt.show() 38 | 39 | 40 | # Generate all plots 41 | for metric_key, (title, ylabel, filename) in metrics_info.items(): 42 | plot_metric(metric_key, title, ylabel, filename) 43 | 44 | -------------------------------------------------------------------------------- /benchmark_tests.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch, MagicMock 3 | from unittest.mock import AsyncMock 4 | import benchmark 5 | 6 | class TestMockedBenchmarks(unittest.TestCase): 7 | 8 | @patch("benchmark.PackageFactory.get_package") 9 | def test_run_package_mocked(self, mock_get_package): 10 | # Create a mocked BenchmarkResult-like object 11 | mock_result = MagicMock() 12 | mock_result.requests_per_sec = 100.0 13 | mock_result.total_time = 2.0 14 | mock_result.avg_conn_time = 0.02 15 | mock_result.avg_tls_time = 0.01 16 | 17 | # Create mock package with run_sync and run_async 18 | mock_package = MagicMock() 19 | mock_package.run_sync.return_value = mock_result 20 | mock_package.run_async = AsyncMock(return_value=mock_result) 21 | 22 | mock_get_package.return_value = mock_package 23 | 24 | # Run run_package for both async and sync clients 25 | import asyncio 26 | aio_result = asyncio.run(benchmark.run_package("aiohttp")) 27 | sync_result = asyncio.run(benchmark.run_package("requests")) 28 | 29 | self.assertEqual(aio_result.requests_per_sec, 100.0) 30 | self.assertEqual(sync_result.avg_tls_time, 0.01) 31 | 32 | @patch("benchmark.run_package") 33 | @patch("benchmark.PackageFactory.get_package") 34 | def test_run_benchmarks_mocked(self, mock_get_package, mock_run_package): 35 | # Setup mock result 36 | mock_result = MagicMock() 37 | mock_result.requests_per_sec = 99.0 38 | mock_result.total_time = 1.5 39 | mock_result.avg_conn_time = 0.015 40 | mock_result.avg_tls_time = 0.005 41 | 42 | mock_run_package.return_value = mock_result 43 | 44 | # Patch the range to only loop twice 45 | with patch("benchmark.range", return_value=range(2)): 46 | benchmark.run_benchmarks() 47 | 48 | # Ensure mock was used 49 | self.assertTrue(mock_run_package.called) 50 | 51 | 52 | if __name__ == "__main__": 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /benchmark.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import csv 3 | import random 4 | import os 5 | import time 6 | import sys 7 | from platform import system 8 | from datetime import datetime 9 | from factory import PackageFactory 10 | 11 | CSV_FILE = "benchmark_results.csv" 12 | NUM_REQUESTS_PER_PACKAGE_RUN = 100 13 | MAX_RETRIES = 3 14 | 15 | async def run_package(package_name): 16 | package = PackageFactory.get_package(package_name) 17 | retries = 0 18 | while retries < MAX_RETRIES: 19 | try: 20 | if package_name in ["aiohttp", "httpx"]: 21 | return await package.run_async() 22 | else: 23 | return package.run_sync() 24 | except Exception as e: 25 | retries += 1 26 | print(f"Error while running {package_name} (attempt {retries}): {e}") 27 | if retries >= MAX_RETRIES: 28 | print(f"Exceeded max retries for {package_name}. Exiting.") 29 | sys.exit(1) 30 | print("Retrying after 30 seconds...") 31 | time.sleep(30) 32 | 33 | 34 | def run_benchmarks(): 35 | packages = ["aiohttp", "httpx", "pycurl", "requests", "urllib3"] 36 | metrics = ["req_sec", "total", "conn_avg", "tls_avg"] 37 | 38 | fieldnames = ["start_time", "end_time", "num_requests"] + [f"{metric}_{pkg}" for metric in metrics for pkg in packages] 39 | 40 | file_exists = os.path.isfile(CSV_FILE) 41 | 42 | with open(CSV_FILE, mode='a', newline='') as file: 43 | writer = csv.DictWriter(file, fieldnames=fieldnames) 44 | if not file_exists: 45 | writer.writeheader() 46 | 47 | for run in range(101): 48 | print(f"Benchmark run: {run + 1}") 49 | random.shuffle(packages) 50 | 51 | start_time = datetime.now().isoformat() 52 | 53 | results = {} 54 | 55 | for pkg_name in packages: 56 | result = asyncio.run(run_package(pkg_name)) 57 | results[f"req_sec_{pkg_name}"] = f"{result.requests_per_sec:.2f}" 58 | results[f"total_{pkg_name}"] = f"{result.total_time:.2f}" 59 | results[f"conn_avg_{pkg_name}"] = f"{result.avg_conn_time:.4f}" 60 | results[f"tls_avg_{pkg_name}"] = f"{result.avg_tls_time:.4f}" if result.avg_tls_time is not None else "N/A" 61 | 62 | end_time = datetime.now().isoformat() 63 | 64 | if run == 0: 65 | continue 66 | 67 | results["start_time"] = start_time 68 | results["end_time"] = end_time 69 | results["num_requests"] = NUM_REQUESTS_PER_PACKAGE_RUN 70 | 71 | writer.writerow(results) 72 | 73 | print("Benchmark Results:") 74 | for key, value in results.items(): 75 | print(f"{key}: {value}") 76 | print("-" * 40) 77 | 78 | if __name__ == "__main__": 79 | if system() == "Windows": 80 | asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) 81 | run_benchmarks() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🐍 python-http-libraries-benchmark 2 | 3 | This project benchmarks popular Python HTTP libraries to compare their performance across core networking metrics. The goal is to offer insights on how each library performs under real-world request loads. 4 | 5 | --- 6 | 7 | ## 📦 HTTP Libraries Benchmarked 8 | This benchmark includes five widely-used Python HTTP libraries—requests, urllib3, httpx, aiohttp, and pycurl—chosen for their relevance, diversity, and real-world usage. requests is the most popular and beginner-friendly library, while urllib3 offers low-level control and powers requests internally. httpx and aiohttp provide modern asynchronous support for high-concurrency applications. Finally, pycurl wraps the high-performance C-based libcurl library, often used in system-level or legacy environments. Together, they represent a broad spectrum of HTTP client capabilities in Python, from ease of use to advanced performance tuning. 9 | 10 | - [`aiohttp`](https://docs.aiohttp.org/) 11 | - [`httpx`](https://www.python-httpx.org/) 12 | - [`requests`](https://docs.python-requests.org/) 13 | - [`urllib3`](https://urllib3.readthedocs.io/) 14 | - [`pycurl`](http://pycurl.io/) 15 | 16 | --- 17 | 18 | ## 📊 Metrics Captured 19 | 20 | Each library is evaluated using the following metrics: 21 | 22 | - **Requests per Second (`req/sec`)**: Measures throughput. A higher number indicates the library handles more traffic efficiently. 23 | - **Total Time**: Measures how long the library takes to complete a full batch of requests. 24 | - **Average Connection Time**: Indicates the average time spent on establishing connections for each request. 25 | 26 | These metrics are chosen to reflect practical usage in applications such as REST APIs, microservices, and integrations. 27 | 28 | --- 29 | 30 | ## 🎯 Benchmarking Strategy 31 | 32 | To ensure fair and unbiased comparisons: 33 | 34 | - Each run sends a fixed number of requests (`NUM_REQUESTS = 100`) to `https://postman-echo.com/get`. 35 | - The script performs **6 complete benchmark runs**, where the first is used as warm-up and excluded from the CSV output. 36 | - **Randomized Execution Order**: For every run, the order in which HTTP libraries are tested is randomized. This prevents any one library from benefiting from system or network caching effects due to consistent positioning. 37 | 38 | --- 39 | 40 | ## 📁 Output 41 | 42 | Benchmark results are stored in `benchmark_results.csv`. Each row represents one full benchmarking round and includes: 43 | 44 | - Start and end timestamps 45 | - Requests per second, total duration, and average connection time for each library 46 | - Number of requests executed 47 | 48 | --- 49 | 50 | ## ▶️ How to Run 51 | 52 | 1. **Install requirements:** 53 | 54 | ```bash 55 | pip install -r requirements.txt 56 | ``` 57 | 2. **Running the benchmark:** 58 | ```bash 59 | python benchmark.py 60 | ``` 61 | 3. **Running Analytics:** 62 | ```bash 63 | python benchmark_analytics.py 64 | ``` 65 | 66 | ## Results 67 | 68 | ### Environment: 69 | OS: Windows 11 Home 70 | 71 | Processor: Intel(R) Core(TM) i5-10400 CPU @ 2.90GHz 2.90 GHz 72 | 73 | RAM: 12.0 GB 74 | 75 | Location Type: Localhost 76 | 77 | 78 | ![Mean Connection Time](mean_connection_time.png) 79 | ![Mean Req/Sec ](mean_requests_per_second.png) 80 | ![Mean TLS handshake time](mean_tls_handshake_time.png) 81 | ![Mean Total Response times](mean_total_response_time.png) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # Ruff stuff: 171 | .ruff_cache/ 172 | 173 | # PyPI configuration file 174 | .pypirc 175 | 176 | *.csv 177 | .idea/ -------------------------------------------------------------------------------- /factory.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import time 3 | import requests 4 | import httpx 5 | import aiohttp 6 | import urllib3 7 | import pycurl 8 | from io import BytesIO 9 | from model import BenchmarkResult 10 | 11 | TEST_URL = "http://localhost" 12 | NUM_REQUESTS_PER_PACKAGE_RUN = 100 13 | CONCURRENT_REQUESTS = 1 14 | 15 | class Package: 16 | async def run_async(self): 17 | pass 18 | 19 | def run_sync(self): 20 | pass 21 | 22 | class AiohttpPackage(Package): 23 | async def run_async(self): 24 | semaphore = asyncio.Semaphore(CONCURRENT_REQUESTS) 25 | conn_times = [] 26 | 27 | async def fetch(session, url): 28 | async with semaphore: 29 | start_conn = time.time() 30 | async with session.get(url) as response: 31 | await response.read() 32 | conn_time = time.time() - start_conn 33 | conn_times.append(conn_time) 34 | 35 | async with aiohttp.ClientSession() as session: 36 | start_total = time.time() 37 | tasks = [fetch(session, TEST_URL) for _ in range(NUM_REQUESTS_PER_PACKAGE_RUN)] 38 | await asyncio.gather(*tasks) 39 | duration = time.time() - start_total 40 | 41 | return BenchmarkResult(NUM_REQUESTS_PER_PACKAGE_RUN/duration, duration, sum(conn_times)/NUM_REQUESTS_PER_PACKAGE_RUN, avg_tls_time=None) 42 | 43 | class HttpxPackage(Package): 44 | async def run_async(self): 45 | semaphore = asyncio.Semaphore(CONCURRENT_REQUESTS) 46 | conn_times = [] 47 | 48 | async def fetch(client, url): 49 | async with semaphore: 50 | start_conn = time.time() 51 | response = await client.get(url) 52 | conn_time = time.time() - start_conn 53 | conn_times.append(conn_time) 54 | 55 | async with httpx.AsyncClient() as client: 56 | start_total = time.time() 57 | tasks = [fetch(client, TEST_URL) for _ in range(NUM_REQUESTS_PER_PACKAGE_RUN)] 58 | await asyncio.gather(*tasks) 59 | duration = time.time() - start_total 60 | 61 | return BenchmarkResult(NUM_REQUESTS_PER_PACKAGE_RUN/duration, duration, sum(conn_times)/NUM_REQUESTS_PER_PACKAGE_RUN, avg_tls_time=None) 62 | 63 | class PycurlPackage(Package): 64 | def run_sync(self): 65 | total_conn_time = 0 66 | total_tls_time = 0 67 | 68 | start_total = time.time() 69 | for _ in range(NUM_REQUESTS_PER_PACKAGE_RUN): 70 | buffer = BytesIO() 71 | c = pycurl.Curl() 72 | c.setopt(c.URL, TEST_URL) 73 | c.setopt(c.WRITEDATA, buffer) 74 | 75 | start_conn = time.time() 76 | c.perform() 77 | conn_time = time.time() - start_conn 78 | total_conn_time += conn_time 79 | 80 | tls_time = c.getinfo(pycurl.APPCONNECT_TIME) 81 | total_tls_time += tls_time 82 | c.close() 83 | 84 | duration = time.time() - start_total 85 | 86 | return BenchmarkResult(NUM_REQUESTS_PER_PACKAGE_RUN/duration, duration, total_conn_time/NUM_REQUESTS_PER_PACKAGE_RUN, total_tls_time/NUM_REQUESTS_PER_PACKAGE_RUN) 87 | 88 | class RequestsPackage(Package): 89 | def run_sync(self): 90 | total_conn_time = 0 91 | total_tls_time = 0 92 | 93 | start_total = time.time() 94 | for _ in range(NUM_REQUESTS_PER_PACKAGE_RUN): 95 | start_conn = time.time() 96 | response = requests.get(TEST_URL, stream=True, timeout=(2.0, 5.0)) 97 | conn_time = time.time() - start_conn 98 | total_conn_time += conn_time 99 | total_tls_time += response.elapsed.total_seconds() 100 | 101 | duration = time.time() - start_total 102 | 103 | return BenchmarkResult(NUM_REQUESTS_PER_PACKAGE_RUN/duration, duration, total_conn_time/NUM_REQUESTS_PER_PACKAGE_RUN, total_tls_time/NUM_REQUESTS_PER_PACKAGE_RUN) 104 | 105 | class Urllib3Package(Package): 106 | def run_sync(self): 107 | http = urllib3.PoolManager() 108 | total_conn_time = 0 109 | 110 | start_total = time.time() 111 | for _ in range(NUM_REQUESTS_PER_PACKAGE_RUN): 112 | start_conn = time.time() 113 | response = http.request('GET', TEST_URL, preload_content=False, timeout=urllib3.Timeout(connect=2.0, read=5.0)) 114 | conn_time = time.time() - start_conn 115 | total_conn_time += conn_time 116 | 117 | duration = time.time() - start_total 118 | 119 | return BenchmarkResult(NUM_REQUESTS_PER_PACKAGE_RUN/duration, duration, total_conn_time/NUM_REQUESTS_PER_PACKAGE_RUN, avg_tls_time=None) 120 | 121 | class PackageFactory: 122 | @staticmethod 123 | def get_package(package_name): 124 | if package_name == "aiohttp": 125 | return AiohttpPackage() 126 | elif package_name == "httpx": 127 | return HttpxPackage() 128 | elif package_name == "pycurl": 129 | return PycurlPackage() 130 | elif package_name == "requests": 131 | return RequestsPackage() 132 | elif package_name == "urllib3": 133 | return Urllib3Package() 134 | --------------------------------------------------------------------------------