├── src
├── __init__.py
├── __main__.py
├── crawler_config.py
├── mytypes.py
├── renderer.py
├── crawler.py
├── main.py
└── helpers.py
├── INPUT.json
├── tests
├── __init__.py
├── test_html.py
├── test_renderer.py
└── test_helpers.py
├── .actor
├── actor.json
├── dataset_schema.json
├── input_schema.json
└── Dockerfile
├── Makefile
├── .github
└── workflows
│ └── master.yml
├── .dockerignore
├── .gitignore
├── pyproject.toml
├── README.md
└── LICENSE
/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/INPUT.json:
--------------------------------------------------------------------------------
1 | {
2 | "startUrl": "https://docs.apify.com/cli/docs"
3 | }
--------------------------------------------------------------------------------
/src/__main__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from .main import main
4 |
5 | # Execute the llms.txt generator actor entry point.
6 | asyncio.run(main())
7 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from pathlib import Path
3 |
4 | parent_dir = Path(__file__).parent
5 | src_dir = parent_dir.parent / 'src'
6 |
7 | sys.path.append(str(src_dir))
8 |
--------------------------------------------------------------------------------
/.actor/actor.json:
--------------------------------------------------------------------------------
1 | {
2 | "actorSpecification": 1,
3 | "name": "llmstxt-generator",
4 | "title": "Generate /llms.txt for the given site",
5 | "description": "Generates /llms.txt for the given site",
6 | "version": "0.2",
7 | "buildTag": "latest",
8 | "input": "./input_schema.json",
9 | "storages": {
10 | "dataset": "./dataset_schema.json"
11 | },
12 | "dockerfile": "./Dockerfile"
13 | }
14 |
--------------------------------------------------------------------------------
/.actor/dataset_schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "actorSpecification": 1,
3 | "views": {
4 | "overview": {
5 | "title": "Overview",
6 | "transformation": {
7 | "fields": ["llms.txt"]
8 | },
9 | "display": {
10 | "component": "table",
11 | "properties": {
12 | "llms.txt": {
13 | "label": "llms.txt",
14 | "format": "text"
15 | }
16 | }
17 | }
18 | }
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: clean install-dev lint type-check unit-test format
2 |
3 | DIRS_WITH_CODE = src/ tests/
4 |
5 | clean:
6 | rm -rf .mypy_cache .pytest_cache .ruff_cache build dist htmlcov .coverage
7 |
8 | install-dev:
9 | uv sync --all-extras
10 |
11 | lint:
12 | uv run ruff check $(DIRS_WITH_CODE)
13 |
14 | type-check:
15 | uv run mypy $(DIRS_WITH_CODE)
16 |
17 | format:
18 | uv run ruff format $(DIRS_WITH_CODE)
19 |
20 | unit-test:
21 | uv run pytest tests/
22 |
--------------------------------------------------------------------------------
/src/crawler_config.py:
--------------------------------------------------------------------------------
1 | CRAWLER_CONFIG = {
2 | 'htmlTransformer': 'none',
3 | # dummy value is used to prevent the removal of any elements
4 | # changed by get_crawler_actor_config with default value 1
5 | 'maxCrawlDepth': 0, # 0 by default for root page only just in case
6 | 'maxCrawlPages': 10, # 10 by default, just in case it is not set
7 | 'saveHtmlAsFile': True,
8 | 'startUrls': [
9 | # is populated by get_crawler_actor_config
10 | ],
11 | 'useSitemaps': False,
12 | }
13 |
--------------------------------------------------------------------------------
/tests/test_html.py:
--------------------------------------------------------------------------------
1 | from src.helpers import get_description_from_html
2 |
3 |
4 | def test_description_meta_tag() -> None:
5 | html = '
'
6 | assert get_description_from_html(html) == 'testdesc'
7 |
8 |
9 | def test_description_meta_tag_with_capital_d() -> None:
10 | html = ''
11 | assert get_description_from_html(html) == 'testdec'
12 |
13 |
14 | def test_no_description_meta_tag() -> None:
15 | html = ''
16 | assert get_description_from_html(html) is None
17 |
--------------------------------------------------------------------------------
/src/mytypes.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import TypedDict
4 |
5 |
6 | class LinkDict(TypedDict):
7 | """Dictionary representing a single link in the `llms.txt` file."""
8 |
9 | url: str
10 | title: str
11 | description: str | None
12 |
13 |
14 | class SectionDict(TypedDict):
15 | """Dictionary representing a single section in the `llms.txt` file."""
16 |
17 | title: str
18 | links: list[LinkDict]
19 |
20 |
21 | class LLMSData(TypedDict):
22 | """Dictionary representing the data structure of the `llms.txt` file."""
23 |
24 | title: str
25 | description: str | None
26 | details: str | None
27 | sections: dict[str, SectionDict]
28 |
29 |
30 | class CrawledPage(TypedDict):
31 | """Dictionary representing the crawled pages."""
32 |
33 | url: str
34 | title: str
35 | description: str | None
36 |
--------------------------------------------------------------------------------
/.actor/input_schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "title": "Generate /llms.txt for the given site",
3 | "type": "object",
4 | "schemaVersion": 1,
5 | "properties": {
6 | "startUrl": {
7 | "title": "Start URL",
8 | "type": "string",
9 | "description": "The URL from which the crawler will start to generate the /llms.txt file.",
10 | "editor": "textfield",
11 | "prefill": "https://docs.apify.com/cli/docs"
12 | },
13 | "maxCrawlDepth": {
14 | "title": "Max crawl depth",
15 | "type": "integer",
16 | "description": "The maximum depth of the crawl. Default is 1.",
17 | "editor": "number",
18 | "default": 1
19 | },
20 | "maxCrawlPages": {
21 | "title": "Max crawl pages",
22 | "type": "integer",
23 | "description": "The maximum number of pages to crawl. Default is 50.",
24 | "editor": "number",
25 | "default": 50
26 | }
27 | },
28 | "required": ["startUrl"]
29 | }
30 |
--------------------------------------------------------------------------------
/.github/workflows/master.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python Lint, Check, Test
5 |
6 | on:
7 | push:
8 | branches: ["master"]
9 | pull_request:
10 | branches: ["master"]
11 |
12 | jobs:
13 | build:
14 | runs-on: ubuntu-latest
15 | timeout-minutes: 20
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | python-version: ["3.12"]
20 |
21 | steps:
22 | - name: Checkout repository
23 | uses: actions/checkout@v3
24 |
25 | - name: Set up Python ${{ matrix.python-version }}
26 | uses: actions/setup-python@v4
27 | with:
28 | python-version: ${{ matrix.python-version }}
29 |
30 | - name: Install Poetry
31 | run: pip install uv
32 |
33 | - name: Install dependencies
34 | run: make install-dev
35 |
36 | - name: Run lint
37 | run: make lint
38 |
39 | - name: Run type checks
40 | run: make type-check
41 |
42 | - name: Tests (unit)
43 | run: make unit-test
44 |
--------------------------------------------------------------------------------
/.actor/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM apify/actor-python:3.12
2 |
3 | # Second, copy just requirements.txt into the Actor image,
4 | # since it should be the only file that affects the dependency install in the next step,
5 | # in order to speed up the build
6 | COPY pyproject.toml uv.lock ./
7 |
8 | # Install the packages specified in requirements.txt,
9 | # Print the installed Python version, pip version
10 | # and all installed packages with their versions for debugging
11 | RUN echo "Python version:" \
12 | && python --version \
13 | && echo "Pip version:" \
14 | && pip --version \
15 | && echo "Installing uv:" \
16 | && pip install --no-cache-dir uv \
17 | && echo "Installing dependencies:" \
18 | && uv sync --frozen --no-dev \
19 | && echo "All installed Python packages:" \
20 | && uv pip freeze
21 |
22 | # Next, copy the remaining files and directories with the source code.
23 | # Since we do this after installing the dependencies, quick build will be really fast
24 | # for most source file changes.
25 | COPY . ./
26 |
27 | # Use compileall to ensure the runnability of the Actor Python code.
28 | #RUN python3 -m compileall -q .
29 |
30 | # create non-root user
31 | RUN useradd -m apify && \
32 | chown -R apify:apify /usr/src/app
33 |
34 | USER apify
35 |
36 | # Specify how to launch the source code of your Actor.
37 | # By default, the "python3 -m ." command is run
38 | ENV PATH="/usr/src/app/.venv/bin:$PATH"
39 | CMD ["python3", "-m", "src"]
--------------------------------------------------------------------------------
/src/renderer.py:
--------------------------------------------------------------------------------
1 | from src.mytypes import LLMSData
2 |
3 |
4 | def render_llms_txt(data: LLMSData) -> str:
5 | """Generates llms.txt file from the provided data.
6 |
7 | Example data:
8 | {
9 | 'title': 'Example',
10 | 'description': 'Example description',
11 | 'details': 'Example details',
12 | 'sections': [
13 | {
14 | 'title': 'Section 1',
15 | 'links': [
16 | {'url': 'https://example.com', 'title': 'Example', 'description': 'Example description'},
17 | ],
18 | },
19 | ],
20 | }
21 | Example output:
22 | # Example
23 |
24 | > Example description
25 |
26 | Example details
27 |
28 | ## Section 1
29 |
30 | - [Example](https://example.com): Example description
31 |
32 | """
33 | result = [f'# {data["title"].strip()}\n\n']
34 |
35 | if description := data.get('description'):
36 | result.append(f'> {description.strip()}\n\n')
37 |
38 | if details := data.get('details'):
39 | result.append(f'{details.strip()}\n\n')
40 |
41 | for section_dir in sorted(data.get('sections', {})):
42 | section = data['sections'][section_dir]
43 | result.append(f'## {section["title"].strip()}\n\n')
44 | for link in section.get('links', []):
45 | link_str = f'- [{link["title"].strip()}]({link["url"].strip()})'
46 | if link_description := link.get('description'):
47 | link_str += f': {link_description.strip()}'
48 | result.append(f'{link_str}\n')
49 | result.append('\n')
50 |
51 | return ''.join(result)
52 |
--------------------------------------------------------------------------------
/src/crawler.py:
--------------------------------------------------------------------------------
1 | from apify import ProxyConfiguration
2 | from crawlee.crawlers import BeautifulSoupCrawler, BeautifulSoupCrawlingContext
3 |
4 | from src.helpers import get_description_from_soup, get_h1_from_soup, is_description_suitable
5 | from src.mytypes import CrawledPage
6 |
7 | STATUS_CODE_OK = 200
8 |
9 |
10 | async def run_crawler(
11 | url: str, max_crawl_depth: int = 1, max_crawl_pages: int = 50, proxy: ProxyConfiguration | None = None
12 | ) -> list[CrawledPage]:
13 | """Runs the crawler and returns the results."""
14 | results: list[CrawledPage] = []
15 |
16 | async def request_handler(context: BeautifulSoupCrawlingContext) -> None:
17 | links = await context.extract_links()
18 | links = [link for link in links if link.url.startswith(url)]
19 | await context.add_requests(links)
20 |
21 | context.log.info(f'Processing {context.request.url}...')
22 | status_code = context.http_response.status_code
23 | if status_code != STATUS_CODE_OK:
24 | context.log.warning(f'Failed to fetch {context.request.url} with status code {status_code}')
25 | return
26 |
27 | title = get_h1_from_soup(context.soup) or context.soup.title.string if context.soup.title else None
28 | if not title:
29 | context.log.warning(f'No title found for {context.request.url}')
30 | return
31 | description = get_description_from_soup(context.soup)
32 | data: CrawledPage = {
33 | 'url': context.request.url,
34 | 'title': title,
35 | 'description': description if is_description_suitable(description) else None,
36 | }
37 | results.append(data)
38 |
39 | crawler_kwargs = {
40 | 'request_handler': request_handler,
41 | 'max_crawl_depth': max_crawl_depth,
42 | 'max_requests_per_crawl': max_crawl_pages,
43 | }
44 | if proxy is not None:
45 | crawler_kwargs['proxy_configuration'] = proxy
46 |
47 | crawler = BeautifulSoupCrawler(**crawler_kwargs) # type: ignore[arg-type]
48 | await crawler.run([url])
49 |
50 | return results
51 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .ruff_cache/
2 | .venv
3 |
4 | .git
5 | .mise.toml
6 | .nvim.lua
7 | storage
8 |
9 | # The rest is copied from https://github.com/github/gitignore/blob/main/Python.gitignore
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 |
16 | # C extensions
17 | *.so
18 |
19 | # Distribution / packaging
20 | .Python
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | wheels/
33 | share/python-wheels/
34 | *.egg-info/
35 | .installed.cfg
36 | *.egg
37 | MANIFEST
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .nox/
53 | .coverage
54 | .coverage.*
55 | .cache
56 | nosetests.xml
57 | coverage.xml
58 | *.cover
59 | *.py,cover
60 | .hypothesis/
61 | .pytest_cache/
62 | cover/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 | local_settings.py
71 | db.sqlite3
72 | db.sqlite3-journal
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | .pybuilder/
86 | target/
87 |
88 | # Jupyter Notebook
89 | .ipynb_checkpoints
90 |
91 | # IPython
92 | profile_default/
93 | ipython_config.py
94 |
95 | # pyenv
96 | # For a library or package, you might want to ignore these files since the code is
97 | # intended to run in multiple environments; otherwise, check them in:
98 | .python-version
99 |
100 | # pdm
101 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
102 | #pdm.lock
103 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
104 | # in version control.
105 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
106 | .pdm.toml
107 | .pdm-python
108 | .pdm-build/
109 |
110 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
111 | __pypackages__/
112 |
113 | # Celery stuff
114 | celerybeat-schedule
115 | celerybeat.pid
116 |
117 | # SageMath parsed files
118 | *.sage.py
119 |
120 | # Environments
121 | .env
122 | .venv
123 | env/
124 | venv/
125 | ENV/
126 | env.bak/
127 | venv.bak/
128 |
129 | # Spyder project settings
130 | .spyderproject
131 | .spyproject
132 |
133 | # Rope project settings
134 | .ropeproject
135 |
136 | # mkdocs documentation
137 | /site
138 |
139 | # mypy
140 | .mypy_cache/
141 | .dmypy.json
142 | dmypy.json
143 |
144 | # Pyre type checker
145 | .pyre/
146 |
147 | # pytype static type analyzer
148 | .pytype/
149 |
150 | # Cython debug symbols
151 | cython_debug/
152 |
153 | # PyCharm
154 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
155 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
156 | # and can be added to the global gitignore or merged into this file. For a more nuclear
157 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
158 | .idea/
159 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .ruff_cache
2 | storage
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | share/python-wheels/
27 | *.egg-info/
28 | .installed.cfg
29 | *.egg
30 | MANIFEST
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 | cover/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 | db.sqlite3-journal
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | .pybuilder/
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # pyenv
89 | # For a library or package, you might want to ignore these files since the code is
90 | # intended to run in multiple environments; otherwise, check them in:
91 | # .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # poetry
101 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102 | # This is especially recommended for binary packages to ensure reproducibility, and is more
103 | # commonly ignored for libraries.
104 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105 | #poetry.lock
106 |
107 | # pdm
108 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109 | #pdm.lock
110 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111 | # in version control.
112 | # https://pdm.fming.dev/#use-with-ide
113 | .pdm.toml
114 |
115 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116 | __pypackages__/
117 |
118 | # Celery stuff
119 | celerybeat-schedule
120 | celerybeat.pid
121 |
122 | # SageMath parsed files
123 | *.sage.py
124 |
125 | # Environments
126 | .env
127 | .venv
128 | env/
129 | venv/
130 | ENV/
131 | env.bak/
132 | venv.bak/
133 |
134 | # Spyder project settings
135 | .spyderproject
136 | .spyproject
137 |
138 | # Rope project settings
139 | .ropeproject
140 |
141 | # mkdocs documentation
142 | /site
143 |
144 | # mypy
145 | .mypy_cache/
146 | .dmypy.json
147 | dmypy.json
148 |
149 | # Pyre type checker
150 | .pyre/
151 |
152 | # pytype static type analyzer
153 | .pytype/
154 |
155 | # Cython debug symbols
156 | cython_debug/
157 |
158 | # PyCharm
159 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161 | # and can be added to the global gitignore or merged into this file. For a more nuclear
162 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163 | .idea/
164 |
165 | # docker
166 | volumes/**
167 |
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | """This module defines the main entry point for the llsm.txt generator actor."""
2 |
3 | import logging
4 | from typing import TYPE_CHECKING
5 | from urllib.parse import urlparse
6 |
7 | from apify import Actor
8 |
9 | from src.crawler import run_crawler
10 |
11 | from .helpers import (
12 | clean_llms_data,
13 | get_section_dir_title,
14 | get_url_path,
15 | get_url_path_dir,
16 | is_description_suitable,
17 | normalize_url,
18 | )
19 | from .renderer import render_llms_txt
20 |
21 | if TYPE_CHECKING:
22 | from src.mytypes import LLMSData
23 |
24 | logger = logging.getLogger('apify')
25 |
26 | # section with less than this number of links will be moved to the index section
27 | SECTION_MIN_LINKS = 2
28 |
29 |
30 | async def main() -> None:
31 | """Main entry point for the llms.txt generator actor."""
32 | async with Actor:
33 | actor_input = await Actor.get_input()
34 | url = actor_input.get('startUrl')
35 | if url is None:
36 | msg = 'Missing "startUrl" attribute in input!'
37 | raise ValueError(msg)
38 | url_normalized = normalize_url(url)
39 |
40 | max_crawl_depth = int(actor_input.get('maxCrawlDepth', 1))
41 | max_crawl_pages = int(actor_input.get('maxCrawlPages', 50))
42 |
43 | proxy_config = await Actor.create_proxy_configuration()
44 | results = await run_crawler(
45 | url=url, max_crawl_depth=max_crawl_depth, max_crawl_pages=max_crawl_pages, proxy=proxy_config
46 | )
47 |
48 | hostname = urlparse(url).hostname
49 | root_title = hostname
50 |
51 | data: LLMSData = {'title': root_title, 'description': None, 'details': None, 'sections': {}}
52 | sections = data['sections']
53 |
54 | is_dataset_empty = True
55 | path_titles: dict[str, str] = {}
56 | sections_to_fill_title = []
57 | for item in results:
58 | is_dataset_empty = False
59 | if (item_url := item.get('url')) is None:
60 | logger.warning('Missing "url" attribute in dataset item!')
61 | continue
62 | logger.info(f'Processing page: {item_url}')
63 |
64 | description = item['description']
65 | title = item['title']
66 | path_titles[get_url_path(item_url)] = title
67 |
68 | # handle input root url separately
69 | is_root = normalize_url(item_url) == url_normalized
70 | if is_root:
71 | data['description'] = description if is_description_suitable(description) else None
72 | continue
73 |
74 | section_dir = get_url_path_dir(item_url)
75 | section_title = path_titles.get(section_dir)
76 | if section_dir not in sections:
77 | sections[section_dir] = {'title': section_title or section_dir, 'links': []}
78 | if section_title is None:
79 | sections_to_fill_title.append(section_dir)
80 |
81 | sections[section_dir]['links'].append(
82 | {
83 | 'url': item_url,
84 | 'title': title,
85 | 'description': description if is_description_suitable(description) else None,
86 | }
87 | )
88 |
89 | if is_dataset_empty:
90 | msg = (
91 | 'No pages were crawled successfully!'
92 | ' Please check the "apify/website-content-crawler" actor run for more details.'
93 | )
94 | raise RuntimeError(msg)
95 |
96 | for section_dir in sections_to_fill_title:
97 | sections[section_dir]['title'] = get_section_dir_title(section_dir, path_titles)
98 |
99 | # move sections with less than SECTION_MIN_LINKS to the root
100 | clean_llms_data(data, section_min_links=SECTION_MIN_LINKS)
101 | output = render_llms_txt(data)
102 |
103 | # save into kv-store as a file to be able to download it
104 | store = await Actor.open_key_value_store()
105 | await store.set_value('llms.txt', output)
106 | logger.info('Saved the "llms.txt" file into the key-value store!')
107 |
108 | await Actor.push_data({'llms.txt': output})
109 | logger.info('Pushed the "llms.txt" file to the dataset!')
110 |
111 | await Actor.set_status_message('Finished! Saved the "llms.txt" file into the key-value store and dataset...')
112 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | authors = [
3 | {name = "Jakub Kopecky", email = "jakub.kopecky@apify.com"},
4 | ]
5 | requires-python = "<4.0,>=3.12"
6 | dependencies = [
7 | "apify<3.0.0,>=2.1.0",
8 | "beautifulsoup4<5.0.0,>=4.12.3",
9 | "crawlee[beautifulsoup]>=0.6.9",
10 | ]
11 | name = "apify-llmstxt-generator"
12 | version = "0.2.0"
13 | description = "/llms.txt generator"
14 | readme = "README.md"
15 |
16 | [dependency-groups]
17 | dev = [
18 | "ruff<1.0.0,>=0.8.6",
19 | "mypy<2.0.0,>=1.14.1",
20 | "types-beautifulsoup4<5.0.0.0,>=4.12.0.20241020",
21 | "pytest<9.0.0,>=8.3.4",
22 | "pytest-asyncio<1.0.0,>=0.25.2",
23 | "pytest-timeout<3.0.0,>=2.3.1",
24 | ]
25 |
26 | [tool.pyright]
27 | venvPath = "."
28 | venv = ".venv"
29 |
30 | [tool.ruff]
31 | line-length = 120
32 |
33 | [tool.ruff.lint]
34 | select = ["ALL"]
35 | ignore = [
36 | "ANN401", # Dynamically typed expressions (typing.Any) are disallowed in {filename}
37 | "ASYNC109", # Async function definition with a `timeout` parameter
38 | "BLE001", # Do not catch blind exception
39 | "C901", # `{name}` is too complex
40 | "COM812", # This rule may cause conflicts when used with the formatter
41 | "D100", # Missing docstring in public module
42 | "D104", # Missing docstring in public package
43 | "D107", # Missing docstring in `__init__`
44 | "EM", # flake8-errmsg
45 | "G004", # Logging statement uses f-string
46 | "ISC001", # This rule may cause conflicts when used with the formatter
47 | "FIX", # flake8-fixme
48 | "PLR0911", # Too many return statements
49 | "PLR0913", # Too many arguments in function definition
50 | "PLR0915", # Too many statements
51 | "PTH", # flake8-use-pathlib
52 | "PYI034", # `__aenter__` methods in classes like `{name}` usually return `self` at runtime
53 | "PYI036", # The second argument in `__aexit__` should be annotated with `object` or `BaseException | None`
54 | "S102", # Use of `exec` detected
55 | "S105", # Possible hardcoded password assigned to
56 | "S106", # Possible hardcoded password assigned to argument: "{name}"
57 | "S301", # `pickle` and modules that wrap it can be unsafe when used to deserialize untrusted data, possible security issue
58 | "S303", # Use of insecure MD2, MD4, MD5, or SHA1 hash function
59 | "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes
60 | "TD002", # Missing author in TODO; try: `# TODO(): ...` or `# TODO @: ...
61 | "TRY003", # Avoid specifying long messages outside the exception class
62 | ]
63 |
64 | [tool.ruff.format]
65 | quote-style = "single"
66 | indent-style = "space"
67 |
68 | [tool.ruff.lint.per-file-ignores]
69 | "**/__init__.py" = [
70 | "F401", # Unused imports
71 | ]
72 | "**/{tests}/*" = [
73 | "D", # Everything from the pydocstyle
74 | "INP001", # File {filename} is part of an implicit namespace package, add an __init__.py
75 | "PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable
76 | "S101", # Use of assert detected
77 | "SLF001", # Private member accessed: `{name}`
78 | "T20", # flake8-print
79 | "TRY301", # Abstract `raise` to an inner function
80 | ]
81 | "**/{docs}/**" = [
82 | "D", # Everything from the pydocstyle
83 | "INP001", # File {filename} is part of an implicit namespace package, add an __init__.py
84 | "F841", # Local variable {variable} is assigned to but never used
85 | "N999", # Invalid module name
86 | ]
87 |
88 | [tool.ruff.lint.flake8-quotes]
89 | docstring-quotes = "double"
90 | inline-quotes = "single"
91 |
92 | [tool.ruff.lint.flake8-type-checking]
93 | runtime-evaluated-base-classes = [
94 | "pydantic.BaseModel",
95 | "pydantic_settings.BaseSettings",
96 | ]
97 |
98 | [tool.ruff.lint.flake8-builtins]
99 | builtins-ignorelist = ["id"]
100 |
101 | [tool.ruff.lint.pydocstyle]
102 | convention = "google"
103 |
104 | [tool.ruff.lint.pylint]
105 | max-branches = 18
106 |
107 | [tool.pytest.ini_options]
108 | addopts = "-ra"
109 | asyncio_mode = "auto"
110 | asyncio_default_fixture_loop_scope = "function"
111 | timeout = 1200
112 |
113 | [tool.mypy]
114 | python_version = "3.11"
115 | files = ["scripts", "src", "tests"]
116 | check_untyped_defs = true
117 | disallow_incomplete_defs = true
118 | disallow_untyped_calls = true
119 | disallow_untyped_decorators = true
120 | disallow_untyped_defs = true
121 | no_implicit_optional = true
122 | warn_redundant_casts = true
123 | warn_return_any = true
124 | warn_unreachable = true
125 | warn_unused_ignores = true
126 |
127 | [tool.mypy-sortedcollections]
128 | ignore_missing_imports = true
129 |
--------------------------------------------------------------------------------
/tests/test_renderer.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING
2 |
3 | from src.renderer import render_llms_txt
4 |
5 | if TYPE_CHECKING:
6 | from src.mytypes import LLMSData
7 |
8 |
9 | def test_render_llms_txt() -> None:
10 | data: LLMSData = {
11 | 'title': 'docs.apify.com',
12 | 'details': None,
13 | 'description': None,
14 | 'sections': {
15 | '/': {
16 | 'title': 'Index',
17 | 'links': [
18 | {
19 | 'url': 'https://docs.apify.com/academy',
20 | 'title': 'Web Scraping Academy',
21 | 'description': 'Learn everything about web scraping.',
22 | }
23 | ],
24 | }
25 | },
26 | }
27 |
28 | expected_output = """# docs.apify.com
29 |
30 | ## Index
31 |
32 | - [Web Scraping Academy](https://docs.apify.com/academy): Learn everything about web scraping.
33 |
34 | """
35 |
36 | assert render_llms_txt(data) == expected_output
37 |
38 |
39 | def test_render_llms_txt_with_description() -> None:
40 | data: LLMSData = {
41 | 'title': 'docs.apify.com',
42 | 'description': 'Apify documentation',
43 | 'details': None,
44 | 'sections': {
45 | '/': {
46 | 'title': 'Index',
47 | 'links': [
48 | {
49 | 'url': 'https://docs.apify.com/academy',
50 | 'title': 'Web Scraping Academy',
51 | 'description': 'Learn everything about web scraping.',
52 | }
53 | ],
54 | }
55 | },
56 | }
57 |
58 | expected_output = """# docs.apify.com
59 |
60 | > Apify documentation
61 |
62 | ## Index
63 |
64 | - [Web Scraping Academy](https://docs.apify.com/academy): Learn everything about web scraping.
65 |
66 | """
67 |
68 | assert render_llms_txt(data) == expected_output
69 |
70 |
71 | def test_render_llms_txt_with_description_and_details() -> None:
72 | data: LLMSData = {
73 | 'title': 'docs.apify.com',
74 | 'description': 'Apify documentation',
75 | 'details': 'This is the documentation for Apify',
76 | 'sections': {
77 | '/': {
78 | 'title': 'Index',
79 | 'links': [
80 | {
81 | 'url': 'https://docs.apify.com/academy',
82 | 'title': 'Web Scraping Academy',
83 | 'description': 'Learn everything about web scraping.',
84 | }
85 | ],
86 | }
87 | },
88 | }
89 |
90 | expected_output = """# docs.apify.com
91 |
92 | > Apify documentation
93 |
94 | This is the documentation for Apify
95 |
96 | ## Index
97 |
98 | - [Web Scraping Academy](https://docs.apify.com/academy): Learn everything about web scraping.
99 |
100 | """
101 |
102 | assert render_llms_txt(data) == expected_output
103 |
104 |
105 | def test_render_llms_txt_with_no_sections() -> None:
106 | data: LLMSData = {'title': 'docs.apify.com', 'description': 'Apify documentation', 'details': None, 'sections': {}}
107 |
108 | expected_output = """# docs.apify.com
109 |
110 | > Apify documentation
111 |
112 | """
113 |
114 | assert render_llms_txt(data) == expected_output
115 |
116 |
117 | def test_render_llms_txt_with_multiple_sections() -> None:
118 | data: LLMSData = {
119 | 'title': 'docs.apify.com',
120 | 'description': 'Apify documentation',
121 | 'details': None,
122 | 'sections': {
123 | '/': {
124 | 'title': 'Index',
125 | 'links': [
126 | {
127 | 'url': 'https://docs.apify.com/academy',
128 | 'title': 'Web Scraping Academy',
129 | 'description': 'Learn everything about web scraping.',
130 | }
131 | ],
132 | },
133 | '/guides': {
134 | 'title': 'Guides',
135 | 'links': [
136 | {
137 | 'url': 'https://docs.apify.com/guides/getting-started',
138 | 'title': 'Getting Started',
139 | 'description': 'Learn how to get started with Apify.',
140 | }
141 | ],
142 | },
143 | },
144 | }
145 |
146 | expected_output = """# docs.apify.com
147 |
148 | > Apify documentation
149 |
150 | ## Index
151 |
152 | - [Web Scraping Academy](https://docs.apify.com/academy): Learn everything about web scraping.
153 |
154 | ## Guides
155 |
156 | - [Getting Started](https://docs.apify.com/guides/getting-started): Learn how to get started with Apify.
157 |
158 | """
159 |
160 | assert render_llms_txt(data) == expected_output
161 |
--------------------------------------------------------------------------------
/src/helpers.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import logging
4 | from typing import TYPE_CHECKING
5 | from urllib.parse import urlparse
6 |
7 | import bs4
8 | from bs4.element import NavigableString
9 |
10 | if TYPE_CHECKING:
11 | from src.mytypes import LLMSData
12 |
13 | # not using Actor.log because pytest then throws a warning
14 | # about non existent event loop
15 | logger = logging.getLogger('apify')
16 |
17 |
18 | def get_section_dir_title(section_dir: str, path_titles: dict[str, str]) -> str:
19 | """Gets the title of the section from the path titles."""
20 | current_dir = section_dir
21 | while (parent_title := path_titles.get(current_dir)) is None:
22 | current_dir = current_dir.rsplit('/', 1)[0]
23 | if not current_dir:
24 | parent_title = section_dir
25 | break
26 | return parent_title
27 |
28 |
29 | def get_h1_from_html(html: str) -> str | None:
30 | """Extracts the first h1 tag from the HTML content."""
31 | soup = bs4.BeautifulSoup(html, 'html.parser')
32 | return get_h1_from_soup(soup)
33 |
34 |
35 | def get_h1_from_soup(soup: bs4.BeautifulSoup) -> str | None:
36 | """Extracts the first h1 tag from the BeautifulSoup object."""
37 | h1 = soup.find('h1')
38 | return h1.getText() if h1 else None
39 |
40 |
41 | def clean_llms_data(data: LLMSData, section_min_links: int = 2) -> None:
42 | """Cleans the LLMS data by removing sections with low link count and moving the links to the index section.
43 |
44 | :param data: LLMS data to clean
45 | :param section_min_links: Minimum number of links in a section to keep it
46 | and not move the links to the index section
47 | """
48 | to_remove_sections: set[str] = set()
49 |
50 | if 'sections' not in data:
51 | raise ValueError('Missing "sections" attribute in the LLMS data!')
52 |
53 | sections = data['sections']
54 |
55 | for section_dir, section in sections.items():
56 | # skip the index section
57 | if section_dir == '/':
58 | continue
59 | if len(section['links']) < section_min_links:
60 | to_remove_sections.add(section_dir)
61 |
62 | if to_remove_sections:
63 | if '/' not in sections:
64 | sections['/'] = {'title': 'Index', 'links': []}
65 | for section_dir in to_remove_sections:
66 | sections['/']['links'].extend(sections[section_dir]['links'])
67 | del sections[section_dir]
68 |
69 |
70 | def get_url_path(url: str) -> str:
71 | """Get the path from the URL."""
72 | url_normalized = normalize_url(url)
73 | parsed_url = urlparse(url_normalized)
74 | return parsed_url.path or '/'
75 |
76 |
77 | def get_url_path_dir(url: str) -> str:
78 | """Get the directory path from the URL."""
79 | url_normalized = normalize_url(url)
80 | parsed_url = urlparse(url_normalized)
81 | return parsed_url.path.rsplit('/', 1)[0] or '/'
82 |
83 |
84 | def normalize_url(url: str) -> str:
85 | """Normalizes the URL by removing trailing slash."""
86 | parsed_url = urlparse(url)
87 | normalized = parsed_url._replace(path=parsed_url.path.rstrip('/'))
88 | return normalized.geturl()
89 |
90 |
91 | def get_hostname_path_string_from_url(url: str) -> str:
92 | """Extracts the hostname and path from the URL."""
93 | parsed_url = urlparse(url)
94 | if parsed_url.hostname is None or parsed_url.path is None:
95 | return url
96 | return f'{parsed_url.hostname}{parsed_url.path}'
97 |
98 |
99 | def is_description_suitable(description: str | None) -> bool:
100 | """Checks if the description is suitable for the `llms.txt` file.
101 |
102 | Currently only cheks if the description does not contain newlines.
103 | This was created because of the https://docs.apify.com/api/v2.
104 | The page that contains whole MD document in the meta tag description.
105 | """
106 | if description is None:
107 | return False
108 | return '\n' not in description
109 |
110 |
111 | def get_description_from_html(html: str) -> None | str:
112 | """Extracts the description from the HTML content.
113 |
114 | Uses meta 'description' or 'Description' from the html.
115 | """
116 | soup = bs4.BeautifulSoup(html, 'html.parser')
117 | return get_description_from_soup(soup)
118 |
119 |
120 | def get_description_from_soup(soup: bs4.BeautifulSoup) -> None | str:
121 | """Extracts the description from the BeautifulSoup object.
122 |
123 | Uses meta 'description' or 'Description' from the html.
124 | """
125 | description = soup.find('meta', {'name': 'description'})
126 | if description is None:
127 | description = soup.find('meta', {'name': 'Description'})
128 |
129 | if description is None:
130 | return None
131 |
132 | if isinstance(description, NavigableString):
133 | return description.getText()
134 |
135 | content = description.get('content')
136 | if isinstance(content, list):
137 | return ''.join(content)
138 |
139 | return content
140 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # /llms.txt Generator 🚀📄
2 |
3 |
4 | [](https://apify.com/jakub.kopecky/llmstxt-generator)
5 | [](https://github.com/apify/actor-llmstxt-generator/stargazers)
6 |
7 | The **/llms.txt Generator** is an Apify Actor that helps you extract essential website content and generate an [/llms.txt](https://llmstxt.org/) file, making your content ready for AI-powered applications such as fine-tuning, indexing, and integrating large language models (LLMs) like GPT-4, ChatGPT, or LLaMA. This Actor leverages the [Website Content Crawler](https://apify.com/apify/website-content-crawler) actor to perform deep crawls and extract text content from web pages, ensuring comprehensive data collection. The Website Content Crawler is particularly useful because it supports output in multiple formats, including markdown, which is used by the **/llms.txt**.
8 |
9 | ## 🌟 What is /llms.txt?
10 |
11 | The **/llms.txt** format is a markdown-based standard for providing AI-friendly content. It contains:
12 |
13 | - **Brief background information** and guidance.
14 | - **Links to additional resources** in markdown format.
15 | - **AI-focused** structure to help coders, researchers, and AI models easily access and use website content.
16 |
17 | Proposed structure:
18 |
19 | ```
20 | # Title
21 |
22 | > Optional description
23 |
24 | Optional details go here
25 |
26 | ## Section name
27 |
28 | - [Link title](https://link_url): Optional link details
29 |
30 | ## Optional
31 |
32 | - [Link title](https://link_url)
33 | ```
34 |
35 | By adding an **/llms.txt** file to your website, you make it easy for AI systems to understand, index, and use your content effectively.
36 |
37 | ---
38 |
39 | ## 🎯 Features of /llms.txt Generator
40 |
41 | Our Actor is designed to simplify and automate the creation of **/llms.txt** files. Here are its key features:
42 |
43 | - **Deep website crawling**: Extracts content from multi-level websites using the powerful [Crawlee](https://crawlee.dev) library and the [Website Content Crawler](https://apify.com/apify/website-content-crawler) Actor.
44 | - **Content extraction**: Retrieves key metadata such as titles, descriptions, and URLs for seamless integration.
45 | - **File generation**: Saves the output in the standardized **/llms.txt** format.
46 | - **Downloadable output**: The **/llms.txt** file can be downloaded from the **key-value store** in the storage section of the Actor run details.
47 | - **Resource management**: Limits the crawler Actor to 4 GB of memory to ensure compatibility with the free tier, which has an 8 GB limit. Note that this may slow down the crawling process.
48 |
49 | ---
50 |
51 | ## 🚀 How it works
52 |
53 | 1. **Input**: Provide the start URL of the website to crawl.
54 | 2. **Configuration**: Set the maximum crawl depth and other options (optional).
55 | 3. **Output**: The Actor generates a structured **/llms.txt** file with extracted content, ready for AI applications.
56 |
57 | ### Input example
58 |
59 | ```json
60 | {
61 | "startUrl": "https://docs.apify.com",
62 | "maxCrawlDepth": 1
63 | }
64 | ```
65 |
66 | ### Output example (/llms.txt)
67 |
68 | ```
69 | # docs.apify.com
70 |
71 | ## Index
72 |
73 | - [Home | Platform | Apify Documentation](https://docs.apify.com/platform): Apify is your one-stop shop for web scraping, data extraction, and RPA. Automate anything you can do manually in a browser.
74 | - [Web Scraping Academy | Academy | Apify Documentation](https://docs.apify.com/academy): Learn everything about web scraping and automation with our free courses that will turn you into an expert scraper developer.
75 | - [Apify Documentation](https://docs.apify.com/api)
76 | - [API scraping | Academy | Apify Documentation](https://docs.apify.com/academy/api-scraping): Learn all about how the professionals scrape various types of APIs with various configurations, parameters, and requirements.
77 | - [API client for JavaScript | Apify Documentation](https://docs.apify.com/api/client/js/)
78 | - [Apify API | Apify Documentation](https://docs.apify.com/api/v2)
79 | - [API client for Python | Apify Documentation](https://docs.apify.com/api/client/python/)
80 | ...
81 |
82 | ```
83 |
84 |
85 | ---
86 |
87 | ## ✨ Why use /llms.txt Generator?
88 |
89 | - **Save time**: Automates the tedious process of extracting, formatting, and organizing web content.
90 | - **Boost AI performance**: Provides clean, structured data for LLMs and AI-powered tools.
91 | - **Future-proof**: Follows a standardized format that’s gaining adoption in the AI community.
92 | - **User-friendly**: Easy integration into customer-facing products, allowing users to generate **/llms.txt** files effortlessly.
93 |
94 | ## 📖 Learn more
95 |
96 | - [Apify platform](https://apify.com)
97 | - [Apify SDK documentation](https://docs.apify.com/sdk/python)
98 | - [Crawlee library](https://crawlee.dev)
99 | - [/llms.txt proposal](https://llmstxt.org/)
100 |
101 | ---
102 |
103 | Start generating **/llms.txt** files today and empower your AI applications with clean, structured, and AI-friendly data! 🌐🤖
104 |
--------------------------------------------------------------------------------
/tests/test_helpers.py:
--------------------------------------------------------------------------------
1 | from typing import TYPE_CHECKING
2 |
3 | from src.helpers import (
4 | clean_llms_data,
5 | get_h1_from_html,
6 | get_hostname_path_string_from_url,
7 | get_section_dir_title,
8 | get_url_path,
9 | get_url_path_dir,
10 | normalize_url,
11 | )
12 |
13 | if TYPE_CHECKING:
14 | from src.mytypes import LLMSData
15 |
16 |
17 | def test_get_section_dir_title() -> None:
18 | path_titles = {
19 | '/dir': 'Directory',
20 | '/dir/subdir': 'Subdirectory',
21 | '/dir/subdir/page': 'Page',
22 | }
23 |
24 | # Test case 1: Exact match in path_titles
25 | section_dir = '/dir/subdir'
26 | assert get_section_dir_title(section_dir, path_titles) == 'Subdirectory'
27 |
28 | # Test case 2: No exact match, but parent directory match
29 | section_dir2 = '/dir/subdir/page/subpage'
30 | assert get_section_dir_title(section_dir2, path_titles) == 'Page'
31 |
32 | # Test case 3: No match at all, should return the original section_dir
33 | section_dir3 = '/unknown/path'
34 | assert get_section_dir_title(section_dir3, path_titles) == '/unknown/path'
35 |
36 | # Test case 4: Root directory match
37 | section_dir4 = '/dir'
38 | assert get_section_dir_title(section_dir4, path_titles) == 'Directory'
39 |
40 | # Test case 5: Empty section_dir
41 | section_dir5 = ''
42 | assert get_section_dir_title(section_dir5, path_titles) == ''
43 |
44 |
45 | def test_get_url_path() -> None:
46 | url = 'https://example.com/path'
47 | assert get_url_path(url) == '/path'
48 |
49 | url2 = 'https://example.com/path/'
50 | assert get_url_path(url2) == '/path'
51 |
52 | url3 = 'https://example.com/'
53 | assert get_url_path(url3) == '/'
54 |
55 | url4 = 'https://example.com/dir/page'
56 | assert get_url_path(url4) == '/dir/page'
57 |
58 | url5 = 'https://example.com'
59 | assert get_url_path(url5) == '/'
60 |
61 |
62 | def test_get_h1_from_html() -> None:
63 | # single h1 tag
64 | html = 'Example
'
65 | assert get_h1_from_html(html) == 'Example'
66 |
67 | # multiple h1 tags
68 | # only the first one should be returned
69 | html2 = 'Example
Example 2
'
70 | assert get_h1_from_html(html2) == 'Example'
71 |
72 | # no h1 tags
73 | html3 = 'Example
'
74 | assert get_h1_from_html(html3) is None
75 |
76 | # nested h1 tag
77 | html4 = 'Example
'
78 | assert get_h1_from_html(html4) == 'Example'
79 |
80 |
81 | def test_clean_llms_data() -> None:
82 | # Test case 1: Normal case where sections with fewer links are moved to index
83 | data: LLMSData = {
84 | 'title': 'Test LLMS',
85 | 'description': None,
86 | 'details': None,
87 | 'sections': {
88 | 'section_1': {
89 | 'title': 'Section 1',
90 | 'links': [{'url': 'http://example.com', 'title': 'Example', 'description': None}],
91 | },
92 | 'section_2': {
93 | 'title': 'Section 2',
94 | 'links': [
95 | {'url': 'http://example2.com', 'title': 'Example 2', 'description': None},
96 | {'url': 'http://example3.com', 'title': 'Example 3', 'description': None},
97 | ],
98 | },
99 | },
100 | }
101 |
102 | clean_llms_data(data, section_min_links=2)
103 |
104 | assert 'section_1' not in data['sections'] # Section 1 should be removed
105 | assert 'section_2' in data['sections'] # Section 2 should remain
106 | assert '/' in data['sections'] # Index section should be created
107 | assert len(data['sections']['/']['links']) == 1 # The link from section_1 should be moved to index
108 |
109 | # Test case 2: If all sections meet the minimum link count, nothing changes
110 | data2: LLMSData = {
111 | 'title': 'Test LLMS',
112 | 'description': None,
113 | 'details': None,
114 | 'sections': {
115 | 'section_1': {
116 | 'title': 'Section 1',
117 | 'links': [
118 | {'url': 'http://example.com', 'title': 'Example', 'description': None},
119 | {'url': 'http://example2.com', 'title': 'Example 2', 'description': None},
120 | ],
121 | }
122 | },
123 | }
124 |
125 | clean_llms_data(data2, section_min_links=2)
126 |
127 | assert 'section_1' in data2['sections'] # Section 1 should remain
128 | assert '/' not in data2['sections'] # Index section should not be created
129 |
130 | # Test case 4: Empty sections dictionary
131 | data4: LLMSData = {'title': 'Empty LLMS', 'description': None, 'details': None, 'sections': {}}
132 |
133 | clean_llms_data(data4, section_min_links=2)
134 |
135 | assert data4['sections'] == {} # Sections should remain empty
136 |
137 | # Test case 5: Sections already have an index section
138 | data5: LLMSData = {
139 | 'title': 'LLMS with Index',
140 | 'description': None,
141 | 'details': None,
142 | 'sections': {
143 | '/': {'title': 'Index', 'links': [{'url': 'http://index.com', 'title': 'Index Link', 'description': None}]},
144 | 'section_1': {
145 | 'title': 'Section 1',
146 | 'links': [{'url': 'http://example.com', 'title': 'Example', 'description': None}],
147 | },
148 | },
149 | }
150 |
151 | clean_llms_data(data5, section_min_links=2)
152 |
153 | assert 'section_1' not in data5['sections'] # Section 1 should be removed
154 | assert '/' in data5['sections'] # Index should remain
155 | assert len(data5['sections']['/']['links']) == 2 # Index should now contain both the old and new links
156 |
157 |
158 | def test_get_url_path_dir() -> None:
159 | url = 'https://example.com/dir/subdir/page'
160 | _dir = '/dir/subdir'
161 | assert get_url_path_dir(url) == _dir
162 |
163 | url2 = 'https://example.com/page'
164 | _dir2 = '/'
165 | assert get_url_path_dir(url2) == _dir2
166 |
167 | url3 = 'https://example.com/dir/page/'
168 | _dir3 = '/dir'
169 | assert get_url_path_dir(url3) == _dir3
170 |
171 | url4 = 'https://example.com'
172 | assert get_url_path_dir(url4) == '/'
173 |
174 |
175 | def test_normalize_url() -> None:
176 | url = 'https://example.com/'
177 | url_normalized = 'https://example.com'
178 | assert normalize_url(url) == url_normalized
179 |
180 | url2 = 'https://example.com/dir/page'
181 | url2_normalized = 'https://example.com/dir/page'
182 | assert normalize_url(url2) == url2_normalized
183 |
184 | url3 = 'https://example.com/dir/page/'
185 | url3_normalized = 'https://example.com/dir/page'
186 | assert normalize_url(url3) == url3_normalized
187 |
188 |
189 | def test_get_hostname_path_string_from_url() -> None:
190 | url = 'https://example.com/path'
191 | assert get_hostname_path_string_from_url(url) == 'example.com/path'
192 |
193 | url2 = 'https://example.com/path/'
194 | assert get_hostname_path_string_from_url(url2) == 'example.com/path/'
195 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2025 Apify Technologies s.r.o.
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------