├── apis ├── __init__.py ├── onlyfans │ ├── __init__.py │ ├── decorators │ │ └── decorators.py │ ├── classes │ │ ├── __init__.py │ │ ├── create_highlight.py │ │ ├── create_story.py │ │ ├── create_message.py │ │ ├── create_post.py │ │ ├── extras.py │ │ └── create_auth.py │ └── onlyfans.py ├── starsavn │ └── __init__.py └── api_helper.py ├── tests ├── __init__.py └── main_test.py ├── datascraper ├── __init__.py └── main_datascraper.py ├── .dockerignore ├── database ├── databases │ └── user_data │ │ ├── alembic │ │ ├── README │ │ ├── script.py.mako │ │ ├── env.py │ │ └── versions │ │ │ └── 5493253cc03c_content.py │ │ ├── test_user_database.db │ │ ├── models │ │ ├── api_table.py │ │ └── media_table.py │ │ ├── user_database.py │ │ └── alembic.ini ├── archived_databases │ ├── posts │ │ ├── test_posts.db │ │ ├── posts.py │ │ ├── alembic │ │ │ ├── script.py.mako │ │ │ ├── versions │ │ │ │ ├── 5b4bea08c27f_content.py │ │ │ │ ├── 6b1b10eb67de_content.py │ │ │ │ ├── 990fc1108317_content.py │ │ │ │ ├── a918b6b05d2f_content.py │ │ │ │ └── 194e05269f09_content.py │ │ │ └── env.py │ │ └── alembic.ini │ ├── stories │ │ ├── test_stories.db │ │ ├── stories.py │ │ ├── alembic │ │ │ ├── script.py.mako │ │ │ ├── versions │ │ │ │ ├── ebc3f4bb0782_content.py │ │ │ │ ├── 29f675c35eee_content.py │ │ │ │ ├── e0c73f066547_content.py │ │ │ │ ├── 2e4f8364f7e2_content.py │ │ │ │ └── 3076beb33c1b_content.py │ │ │ └── env.py │ │ └── alembic.ini │ └── messages │ │ ├── test_messages.db │ │ ├── messages.py │ │ ├── alembic │ │ ├── script.py.mako │ │ ├── versions │ │ │ ├── aeb9fe314556_content.py │ │ │ ├── bf20242a238f_content.py │ │ │ ├── d0118d8ec0b4_content.py │ │ │ ├── 7c1c6e101059_content.py │ │ │ └── 2c36fcc0b921_content.py │ │ └── env.py │ │ └── alembic.ini └── extras │ └── db_upgrader │ └── start.py ├── .gitattributes ├── examples ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png └── 64255399-96a86700-cf21-11e9-8c62-87a483f33701.png ├── extras ├── OFRenamer │ ├── extra_classes │ │ └── make_settings.py │ ├── .gitignore │ └── start_ofr.py ├── OFSorter │ └── ofsorter.py └── OFLogin │ └── start_ofl.py ├── Dockerfile ├── requirements.txt ├── classes ├── prepare_download.py ├── prepare_webhooks.py ├── make_settings.py └── prepare_metadata.py ├── updater.py ├── start_ofd.py ├── .gitignore ├── helpers └── db_helper.py └── README.md /apis/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /datascraper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apis/onlyfans/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apis/starsavn/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apis/onlyfans/decorators/decorators.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .logs 3 | .settings 4 | .sites 5 | __pycache__ -------------------------------------------------------------------------------- /database/databases/user_data/alembic/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/__init__.py: -------------------------------------------------------------------------------- 1 | from apis.onlyfans.classes.create_auth import create_auth 2 | -------------------------------------------------------------------------------- /examples/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/examples/1.png -------------------------------------------------------------------------------- /examples/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/examples/2.png -------------------------------------------------------------------------------- /examples/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/examples/3.png -------------------------------------------------------------------------------- /examples/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/examples/4.png -------------------------------------------------------------------------------- /examples/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/examples/5.png -------------------------------------------------------------------------------- /extras/OFRenamer/extra_classes/make_settings.py: -------------------------------------------------------------------------------- 1 | class config(object): 2 | def __init__(self, ofd_directory=""): 3 | self.ofd_directory = ofd_directory 4 | -------------------------------------------------------------------------------- /database/archived_databases/posts/test_posts.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/database/archived_databases/posts/test_posts.db -------------------------------------------------------------------------------- /database/archived_databases/stories/test_stories.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/database/archived_databases/stories/test_stories.db -------------------------------------------------------------------------------- /database/databases/user_data/test_user_database.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/database/databases/user_data/test_user_database.db -------------------------------------------------------------------------------- /database/archived_databases/messages/test_messages.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/database/archived_databases/messages/test_messages.db -------------------------------------------------------------------------------- /examples/64255399-96a86700-cf21-11e9-8c62-87a483f33701.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GhostSecurityTeamIO/OnlyFans-DataScraper/HEAD/examples/64255399-96a86700-cf21-11e9-8c62-87a483f33701.png -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-slim 2 | 3 | WORKDIR /usr/src/app 4 | 5 | COPY requirements.txt ./ 6 | RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY . . 9 | 10 | CMD [ "python", "./start_ofd.py" ] 11 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | beautifulsoup4 3 | urllib3 4 | win32-setctime 5 | python-socks[asyncio] 6 | psutil 7 | python-dateutil 8 | lxml 9 | mergedeep 10 | jsonpickle 11 | ujson 12 | sqlalchemy==1.4.20 13 | alembic 14 | tqdm>=4.62.0 15 | selenium 16 | selenium-wire==2.1.2 17 | user_agent 18 | aiohttp 19 | aiohttp_socks 20 | -------------------------------------------------------------------------------- /classes/prepare_download.py: -------------------------------------------------------------------------------- 1 | # Preparing for what? 2 | class start(object): 3 | def __init__(self, username="", link="", image_url="", post_count=0, webhook=True): 4 | self.username = username 5 | self.link = link 6 | self.image_url = image_url 7 | self.post_count = post_count 8 | self.webhook = webhook 9 | self.user = {} 10 | self.others = [] 11 | -------------------------------------------------------------------------------- /database/archived_databases/posts/posts.py: -------------------------------------------------------------------------------- 1 | ### posts.py ### 2 | 3 | # type: ignore 4 | from database.databases.user_data.models.api_table import api_table 5 | from database.databases.user_data.models.media_table import template_media_table 6 | from sqlalchemy.orm import declarative_base 7 | 8 | Base = declarative_base() 9 | 10 | 11 | class api_table(api_table, Base): 12 | api_table.__tablename__ = "posts" 13 | 14 | 15 | class template_media_table(template_media_table, Base): 16 | pass 17 | -------------------------------------------------------------------------------- /database/archived_databases/stories/stories.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | ### posts.py ### 3 | 4 | from sqlalchemy.orm import declarative_base 5 | from database.databases.user_data.models.api_table import api_table 6 | from database.databases.user_data.models.media_table import template_media_table 7 | 8 | Base = declarative_base() 9 | 10 | 11 | class api_table(api_table, Base): 12 | api_table.__tablename__ = "stories" 13 | 14 | 15 | class template_media_table(template_media_table, Base): 16 | pass 17 | -------------------------------------------------------------------------------- /database/archived_databases/messages/messages.py: -------------------------------------------------------------------------------- 1 | ### messages.py ### 2 | 3 | # type: ignore 4 | from database.databases.user_data.models.api_table import api_table 5 | from database.databases.user_data.models.media_table import template_media_table 6 | from sqlalchemy.orm import declarative_base 7 | 8 | Base = declarative_base() 9 | 10 | 11 | class api_table(api_table, Base): 12 | api_table.__tablename__ = "messages" 13 | 14 | 15 | class template_media_table(template_media_table, Base): 16 | pass 17 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/create_highlight.py: -------------------------------------------------------------------------------- 1 | class create_highlight: 2 | def __init__(self, option={}) -> None: 3 | self.id: int = option.get("id") 4 | self.userId: int = option.get("userId") 5 | self.title: str = option.get("title") 6 | self.coverStoryId: int = option.get("coverStoryId") 7 | self.cover: str = option.get("cover") 8 | self.storiesCount: int = option.get("storiesCount") 9 | self.createdAt: str = option.get("createdAt") 10 | self.stories: list = option.get("stories") 11 | -------------------------------------------------------------------------------- /classes/prepare_webhooks.py: -------------------------------------------------------------------------------- 1 | class discord(object): 2 | def __init__(self): 3 | self.embeds = [] 4 | 5 | class embed(object): 6 | def __init__(self): 7 | class image(object): 8 | def __init__(self): 9 | self.url = "" 10 | self.title = "" 11 | self.fields = [] 12 | self.image = image() 13 | 14 | def add_field(self, name, value="", inline=True): 15 | field = {} 16 | field["name"] = name 17 | field["value"] = value 18 | field["inline"] = inline 19 | self.fields.append(field) 20 | -------------------------------------------------------------------------------- /database/databases/user_data/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/versions/aeb9fe314556_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: aeb9fe314556 5 | Revises: d0118d8ec0b4 6 | Create Date: 2021-02-14 19:56:59.175268 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'aeb9fe314556' 15 | down_revision = 'd0118d8ec0b4' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('linked', sa.String(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('linked') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/versions/5b4bea08c27f_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 5b4bea08c27f 5 | Revises: 194e05269f09 6 | Create Date: 2021-02-04 02:59:05.010106 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '5b4bea08c27f' 15 | down_revision = '194e05269f09' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('preview', sa.Integer(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('preview') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/versions/6b1b10eb67de_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 6b1b10eb67de 5 | Revises: 5b4bea08c27f 6 | Create Date: 2021-02-14 19:56:56.267261 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '6b1b10eb67de' 15 | down_revision = '5b4bea08c27f' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('linked', sa.String(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('linked') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/versions/990fc1108317_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 990fc1108317 5 | Revises: a918b6b05d2f 6 | Create Date: 2021-06-20 12:42:34.173918 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '990fc1108317' 15 | down_revision = 'a918b6b05d2f' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('api_type', sa.String(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('api_type') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/versions/ebc3f4bb0782_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: ebc3f4bb0782 5 | Revises: 29f675c35eee 6 | Create Date: 2021-02-14 19:56:54.040372 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'ebc3f4bb0782' 15 | down_revision = '29f675c35eee' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('linked', sa.String(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('linked') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/versions/bf20242a238f_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: bf20242a238f 5 | Revises: 7c1c6e101059 6 | Create Date: 2021-06-20 12:42:35.578665 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'bf20242a238f' 15 | down_revision = '7c1c6e101059' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('api_type', sa.String(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('api_type') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/versions/d0118d8ec0b4_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: d0118d8ec0b4 5 | Revises: 2c36fcc0b921 6 | Create Date: 2021-02-04 02:59:06.516503 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'd0118d8ec0b4' 15 | down_revision = '2c36fcc0b921' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('preview', sa.Integer(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('preview') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/versions/29f675c35eee_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 29f675c35eee 5 | Revises: 3076beb33c1b 6 | Create Date: 2021-02-04 02:59:01.746229 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '29f675c35eee' 15 | down_revision = '3076beb33c1b' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('preview', sa.Integer(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('preview') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/versions/e0c73f066547_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: e0c73f066547 5 | Revises: 2e4f8364f7e2 6 | Create Date: 2021-06-20 12:42:31.056065 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'e0c73f066547' 15 | down_revision = '2e4f8364f7e2' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.add_column(sa.Column('api_type', sa.String(), nullable=True)) 24 | 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | with op.batch_alter_table('medias', schema=None) as batch_op: 31 | batch_op.drop_column('api_type') 32 | 33 | # ### end Alembic commands ### 34 | -------------------------------------------------------------------------------- /extras/OFSorter/ofsorter.py: -------------------------------------------------------------------------------- 1 | import os 2 | from itertools import chain 3 | import shutil 4 | import filecmp 5 | 6 | 7 | def sorter(user_directory, api_type, location, metadata): 8 | legacy_directory = os.path.join(user_directory, api_type, location) 9 | if not os.path.isdir(legacy_directory): 10 | return 11 | legacy_files = os.listdir(legacy_directory) 12 | metadata_directory = os.path.join( 13 | user_directory, "Metadata", api_type+".json") 14 | results = list(chain(*metadata["valid"])) 15 | for result in results: 16 | legacy_filepath = os.path.join(legacy_directory, result["filename"]) 17 | filepath = os.path.join(result["directory"], result["filename"]) 18 | if result["filename"] in legacy_files: 19 | if os.path.isfile(filepath): 20 | same_file = filecmp.cmp( 21 | legacy_filepath, filepath, shallow=False) 22 | if same_file: 23 | os.remove(filepath) 24 | else: 25 | os.remove(legacy_filepath) 26 | continue 27 | shutil.move(legacy_filepath, filepath) 28 | if not os.listdir(legacy_directory): 29 | os.removedirs(legacy_directory) 30 | -------------------------------------------------------------------------------- /database/databases/user_data/models/api_table.py: -------------------------------------------------------------------------------- 1 | ### api_table.py ### 2 | 3 | from datetime import datetime 4 | from typing import cast 5 | 6 | import sqlalchemy 7 | from sqlalchemy.orm import declarative_base # type: ignore 8 | 9 | LegacyBase = declarative_base() 10 | 11 | 12 | class api_table: 13 | __tablename__ = "" 14 | id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 15 | post_id = sqlalchemy.Column(sqlalchemy.Integer, unique=True, nullable=False) 16 | text = sqlalchemy.Column(sqlalchemy.String) 17 | price = cast(int, sqlalchemy.Column(sqlalchemy.Integer)) 18 | paid = sqlalchemy.Column(sqlalchemy.Integer) 19 | archived = cast(bool, sqlalchemy.Column(sqlalchemy.Boolean, default=False)) 20 | created_at = cast(datetime, sqlalchemy.Column(sqlalchemy.TIMESTAMP)) 21 | 22 | def legacy(self, table_name): 23 | class legacy_api_table(LegacyBase): 24 | __tablename__ = table_name 25 | id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 26 | text = sqlalchemy.Column(sqlalchemy.String) 27 | price = sqlalchemy.Column(sqlalchemy.Integer) 28 | paid = sqlalchemy.Column(sqlalchemy.Integer) 29 | created_at = sqlalchemy.Column(sqlalchemy.DATETIME) 30 | 31 | return legacy_api_table 32 | -------------------------------------------------------------------------------- /updater.py: -------------------------------------------------------------------------------- 1 | import io 2 | import requests 3 | from zipfile import ZipFile 4 | import os 5 | from pathlib import Path 6 | import shutil 7 | import time 8 | 9 | # API request limit is around 30, so it fails 10 | # local_commit = "2ca95beec5dd526b9b825497dc6227aafbaf67ad" 11 | # response = requests.get("https://api.github.com/repos/digitalcriminal/onlyfans/branches/master") 12 | # response_json = response.json() 13 | # commit_id = response_json["commit"]["sha"] 14 | # downloaded = requests.get(f"https://github.com/DIGITALCRIMINAL/OnlyFans/archive/{commit_id}.zip") 15 | downloaded = requests.get( 16 | f"https://github.com/DIGITALCRIMINAL/OnlyFans/archive/refs/heads/master.zip" 17 | ) 18 | content = io.BytesIO(downloaded.content) 19 | # Zip download for manual extraction 20 | # download_path = "OnlyFans DataScraper.zip" 21 | # with open(download_path, "wb") as f: 22 | # f.write(downloaded.content) 23 | with ZipFile(content, "r") as zipObject: 24 | listOfFileNames = zipObject.namelist() 25 | root = listOfFileNames[0] 26 | zipObject.extractall() 27 | all_files = [] 28 | for root, subdirs, files in os.walk(root): 29 | x = [os.path.join(root, x) for x in files] 30 | all_files.extend(x) 31 | for filepath in all_files: 32 | filepath = os.path.normpath(filepath) 33 | parents = Path(filepath).parents 34 | p = Path(filepath).parts[0] 35 | renamed = os.path.relpath(filepath, p) 36 | folder = os.path.dirname(renamed) 37 | if folder: 38 | os.makedirs(os.path.dirname(renamed), exist_ok=True) 39 | q = shutil.move(filepath, renamed) 40 | print 41 | print(f"Script has been updated, exiting in 5 seconds") 42 | time.sleep(5) 43 | -------------------------------------------------------------------------------- /database/extras/db_upgrader/start.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | 5 | def start(): 6 | print 7 | 8 | try: 9 | if __name__ == "__main__": 10 | cwd = os.getcwd() 11 | cwd2 = os.path.dirname(__file__) 12 | if cwd == cwd2: 13 | x = os.path.realpath('../../../') 14 | else: 15 | x = os.path.realpath('') 16 | sys.path.insert(0, x) 17 | while True: 18 | from helpers.db_helper import database_collection, run_revisions 19 | db_collection = database_collection() 20 | key_list = db_collection.__dict__.items() 21 | key_list = list(key_list) 22 | string = f"" 23 | count = 0 24 | for key, item in key_list: 25 | print 26 | string += f"{str(count)} = {key} | " 27 | count += 1 28 | print(string) 29 | x = input() 30 | # x = 0 31 | x = int(x) 32 | database_path = None 33 | module = key_list[x][1] 34 | if module: 35 | api_type = os.path.basename(module.__file__) 36 | database_path = module.__file__ 37 | filename = f"test_{api_type}" 38 | filename = filename.replace("py", "db") 39 | database_directory = os.path.dirname(database_path) 40 | final_database_path = os.path.join(database_directory, filename) 41 | alembic_directory = database_directory 42 | run_revisions(alembic_directory, final_database_path) 43 | print("DONE") 44 | else: 45 | print("Failed") 46 | except Exception as e: 47 | input(e) 48 | -------------------------------------------------------------------------------- /database/databases/user_data/user_database.py: -------------------------------------------------------------------------------- 1 | ### messages.py ### 2 | 3 | import copy 4 | import datetime 5 | from typing import cast 6 | 7 | import sqlalchemy 8 | from database.databases.user_data.models.api_table import api_table 9 | from database.databases.user_data.models.media_table import \ 10 | template_media_table 11 | from sqlalchemy.orm.decl_api import declarative_base 12 | from sqlalchemy.sql.schema import Column, ForeignKey, Table 13 | from sqlalchemy.sql.sqltypes import Integer 14 | 15 | Base = declarative_base() 16 | LegacyBase = declarative_base() 17 | 18 | 19 | # class user_table(api_table,Base): 20 | # api_table.__tablename__ = "user_table" 21 | 22 | 23 | class stories_table(api_table, Base): 24 | api_table.__tablename__ = "stories" 25 | 26 | 27 | class posts_table(api_table, Base): 28 | api_table.__tablename__ = "posts" 29 | 30 | 31 | class messages_table(api_table, Base): 32 | api_table.__tablename__ = "messages" 33 | user_id = cast(int, sqlalchemy.Column(sqlalchemy.Integer)) 34 | 35 | class api_legacy_table(api_table, LegacyBase): 36 | pass 37 | 38 | 39 | # class comments_table(api_table,Base): 40 | # api_table.__tablename__ = "comments" 41 | 42 | class media_table(template_media_table, Base): 43 | class media_legacy_table(template_media_table().legacy_2(LegacyBase),LegacyBase): 44 | pass 45 | 46 | 47 | def table_picker(table_name, legacy=False): 48 | if table_name == "Stories": 49 | table = stories_table 50 | elif table_name == "Posts": 51 | table = posts_table 52 | elif table_name == "Messages": 53 | table = messages_table if not legacy else messages_table().api_legacy_table 54 | else: 55 | table = None 56 | input("Can't find table") 57 | return table 58 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/versions/a918b6b05d2f_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: a918b6b05d2f 5 | Revises: 6b1b10eb67de 6 | Create Date: 2021-05-31 02:56:28.192070 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'a918b6b05d2f' 15 | down_revision = '6b1b10eb67de' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.alter_column('created_at', 24 | existing_type=sa.DATETIME(), 25 | type_=sa.TIMESTAMP(), 26 | existing_nullable=True) 27 | 28 | with op.batch_alter_table('posts', schema=None) as batch_op: 29 | batch_op.add_column(sa.Column('archived', sa.Boolean(), nullable=True)) 30 | batch_op.alter_column('created_at', 31 | existing_type=sa.DATETIME(), 32 | type_=sa.TIMESTAMP(), 33 | existing_nullable=True) 34 | 35 | # ### end Alembic commands ### 36 | 37 | 38 | def downgrade(): 39 | # ### commands auto generated by Alembic - please adjust! ### 40 | with op.batch_alter_table('posts', schema=None) as batch_op: 41 | batch_op.alter_column('created_at', 42 | existing_type=sa.TIMESTAMP(), 43 | type_=sa.DATETIME(), 44 | existing_nullable=True) 45 | batch_op.drop_column('archived') 46 | 47 | with op.batch_alter_table('medias', schema=None) as batch_op: 48 | batch_op.alter_column('created_at', 49 | existing_type=sa.TIMESTAMP(), 50 | type_=sa.DATETIME(), 51 | existing_nullable=True) 52 | 53 | # ### end Alembic commands ### 54 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/versions/2e4f8364f7e2_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 2e4f8364f7e2 5 | Revises: ebc3f4bb0782 6 | Create Date: 2021-05-31 02:56:17.448718 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '2e4f8364f7e2' 15 | down_revision = 'ebc3f4bb0782' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.alter_column('created_at', 24 | existing_type=sa.DATETIME(), 25 | type_=sa.TIMESTAMP(), 26 | existing_nullable=True) 27 | 28 | with op.batch_alter_table('stories', schema=None) as batch_op: 29 | batch_op.add_column(sa.Column('archived', sa.Boolean(), nullable=True)) 30 | batch_op.alter_column('created_at', 31 | existing_type=sa.DATETIME(), 32 | type_=sa.TIMESTAMP(), 33 | existing_nullable=True) 34 | 35 | # ### end Alembic commands ### 36 | 37 | 38 | def downgrade(): 39 | # ### commands auto generated by Alembic - please adjust! ### 40 | with op.batch_alter_table('stories', schema=None) as batch_op: 41 | batch_op.alter_column('created_at', 42 | existing_type=sa.TIMESTAMP(), 43 | type_=sa.DATETIME(), 44 | existing_nullable=True) 45 | batch_op.drop_column('archived') 46 | 47 | with op.batch_alter_table('medias', schema=None) as batch_op: 48 | batch_op.alter_column('created_at', 49 | existing_type=sa.TIMESTAMP(), 50 | type_=sa.DATETIME(), 51 | existing_nullable=True) 52 | 53 | # ### end Alembic commands ### 54 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/versions/7c1c6e101059_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 7c1c6e101059 5 | Revises: aeb9fe314556 6 | Create Date: 2021-05-31 02:56:29.998095 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '7c1c6e101059' 15 | down_revision = 'aeb9fe314556' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | with op.batch_alter_table('medias', schema=None) as batch_op: 23 | batch_op.alter_column('created_at', 24 | existing_type=sa.DATETIME(), 25 | type_=sa.TIMESTAMP(), 26 | existing_nullable=True) 27 | 28 | with op.batch_alter_table('messages', schema=None) as batch_op: 29 | batch_op.add_column(sa.Column('archived', sa.Boolean(), nullable=True)) 30 | batch_op.alter_column('created_at', 31 | existing_type=sa.DATETIME(), 32 | type_=sa.TIMESTAMP(), 33 | existing_nullable=True) 34 | 35 | # ### end Alembic commands ### 36 | 37 | 38 | def downgrade(): 39 | # ### commands auto generated by Alembic - please adjust! ### 40 | with op.batch_alter_table('messages', schema=None) as batch_op: 41 | batch_op.alter_column('created_at', 42 | existing_type=sa.TIMESTAMP(), 43 | type_=sa.DATETIME(), 44 | existing_nullable=True) 45 | batch_op.drop_column('archived') 46 | 47 | with op.batch_alter_table('medias', schema=None) as batch_op: 48 | batch_op.alter_column('created_at', 49 | existing_type=sa.TIMESTAMP(), 50 | type_=sa.DATETIME(), 51 | existing_nullable=True) 52 | 53 | # ### end Alembic commands ### 54 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/versions/194e05269f09_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 194e05269f09 5 | Revises: 6 | Create Date: 2021-01-08 20:25:16.796179 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '194e05269f09' 15 | down_revision = None 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.create_table('medias', 23 | sa.Column('id', sa.Integer(), nullable=False), 24 | sa.Column('media_id', sa.Integer(), nullable=True), 25 | sa.Column('post_id', sa.Integer(), nullable=False), 26 | sa.Column('link', sa.String(), nullable=True), 27 | sa.Column('directory', sa.String(), nullable=True), 28 | sa.Column('filename', sa.String(), nullable=True), 29 | sa.Column('size', sa.Integer(), nullable=True), 30 | sa.Column('media_type', sa.String(), nullable=True), 31 | sa.Column('downloaded', sa.Integer(), nullable=True), 32 | sa.Column('created_at', sa.DATETIME(), nullable=True), 33 | sa.PrimaryKeyConstraint('id'), 34 | sa.UniqueConstraint('media_id') 35 | ) 36 | op.create_table('posts', 37 | sa.Column('id', sa.Integer(), nullable=False), 38 | sa.Column('post_id', sa.Integer(), nullable=False), 39 | sa.Column('text', sa.String(), nullable=True), 40 | sa.Column('price', sa.Integer(), nullable=True), 41 | sa.Column('paid', sa.Integer(), nullable=True), 42 | sa.Column('created_at', sa.DATETIME(), nullable=True), 43 | sa.PrimaryKeyConstraint('id'), 44 | sa.UniqueConstraint('post_id') 45 | ) 46 | # ### end Alembic commands ### 47 | 48 | 49 | def downgrade(): 50 | # ### commands auto generated by Alembic - please adjust! ### 51 | op.drop_table('posts') 52 | op.drop_table('medias') 53 | # ### end Alembic commands ### 54 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/versions/3076beb33c1b_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 3076beb33c1b 5 | Revises: 6 | Create Date: 2021-01-08 23:09:26.868834 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '3076beb33c1b' 15 | down_revision = None 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.create_table('medias', 23 | sa.Column('id', sa.Integer(), nullable=False), 24 | sa.Column('media_id', sa.Integer(), nullable=True), 25 | sa.Column('post_id', sa.Integer(), nullable=False), 26 | sa.Column('link', sa.String(), nullable=True), 27 | sa.Column('directory', sa.String(), nullable=True), 28 | sa.Column('filename', sa.String(), nullable=True), 29 | sa.Column('size', sa.Integer(), nullable=True), 30 | sa.Column('media_type', sa.String(), nullable=True), 31 | sa.Column('downloaded', sa.Integer(), nullable=True), 32 | sa.Column('created_at', sa.DATETIME(), nullable=True), 33 | sa.PrimaryKeyConstraint('id'), 34 | sa.UniqueConstraint('media_id') 35 | ) 36 | op.create_table('stories', 37 | sa.Column('id', sa.Integer(), nullable=False), 38 | sa.Column('post_id', sa.Integer(), nullable=False), 39 | sa.Column('text', sa.String(), nullable=True), 40 | sa.Column('price', sa.Integer(), nullable=True), 41 | sa.Column('paid', sa.Integer(), nullable=True), 42 | sa.Column('created_at', sa.DATETIME(), nullable=True), 43 | sa.PrimaryKeyConstraint('id'), 44 | sa.UniqueConstraint('post_id') 45 | ) 46 | # ### end Alembic commands ### 47 | 48 | 49 | def downgrade(): 50 | # ### commands auto generated by Alembic - please adjust! ### 51 | op.drop_table('stories') 52 | op.drop_table('medias') 53 | # ### end Alembic commands ### 54 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/versions/2c36fcc0b921_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | 3 | """content 4 | 5 | Revision ID: 2c36fcc0b921 6 | Revises: 7 | Create Date: 2021-01-08 20:25:52.456387 8 | 9 | """ 10 | from alembic import op 11 | import sqlalchemy as sa 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '2c36fcc0b921' 16 | down_revision = None 17 | branch_labels = None 18 | depends_on = None 19 | 20 | 21 | def upgrade(): 22 | # ### commands auto generated by Alembic - please adjust! ### 23 | op.create_table('medias', 24 | sa.Column('id', sa.Integer(), nullable=False), 25 | sa.Column('media_id', sa.Integer(), nullable=True), 26 | sa.Column('post_id', sa.Integer(), nullable=False), 27 | sa.Column('link', sa.String(), nullable=True), 28 | sa.Column('directory', sa.String(), nullable=True), 29 | sa.Column('filename', sa.String(), nullable=True), 30 | sa.Column('size', sa.Integer(), nullable=True), 31 | sa.Column('media_type', sa.String(), nullable=True), 32 | sa.Column('downloaded', sa.Integer(), nullable=True), 33 | sa.Column('created_at', sa.DATETIME(), nullable=True), 34 | sa.PrimaryKeyConstraint('id'), 35 | sa.UniqueConstraint('media_id') 36 | ) 37 | op.create_table('messages', 38 | sa.Column('id', sa.Integer(), nullable=False), 39 | sa.Column('post_id', sa.Integer(), nullable=False), 40 | sa.Column('text', sa.String(), nullable=True), 41 | sa.Column('price', sa.Integer(), nullable=True), 42 | sa.Column('paid', sa.Integer(), nullable=True), 43 | sa.Column('created_at', sa.DATETIME(), nullable=True), 44 | sa.PrimaryKeyConstraint('id'), 45 | sa.UniqueConstraint('post_id') 46 | ) 47 | # ### end Alembic commands ### 48 | 49 | 50 | def downgrade(): 51 | # ### commands auto generated by Alembic - please adjust! ### 52 | op.drop_table('messages') 53 | op.drop_table('medias') 54 | # ### end Alembic commands ### 55 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/create_story.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | 4 | class create_story: 5 | def __init__(self, option={}) -> None: 6 | self.id: int = option.get("id") 7 | self.userId: int = option.get("userId") 8 | self.createdAt: str = option.get("createdAt") 9 | self.expiredAt: str = option.get("expiredAt") 10 | self.isReady: bool = option.get("isReady") 11 | self.viewersCount: int = option.get("viewersCount") 12 | self.viewers: list = option.get("viewers") 13 | self.canLike: bool = option.get("canLike") 14 | self.mediaCount: int = option.get("mediaCount") 15 | self.isWatched: bool = option.get("isWatched") 16 | self.isLiked: bool = option.get("isLiked") 17 | self.canDelete: bool = option.get("canDelete") 18 | self.isHighlightCover: bool = option.get("isHighlightCover") 19 | self.isLastInHighlight: bool = option.get("isLastInHighlight") 20 | self.media: list = option.get("media") 21 | self.question: Any = option.get("question") 22 | self.placedContents: list = option.get("placedContents") 23 | self.answered: int = option.get("answered") 24 | 25 | async def link_picker(self,media, video_quality): 26 | link = "" 27 | if "source" in media: 28 | quality_key = "source" 29 | source = media[quality_key] 30 | link = source[quality_key] 31 | if link: 32 | if media["type"] == "video": 33 | qualities = media["videoSources"] 34 | qualities = dict(sorted(qualities.items(), reverse=False)) 35 | qualities[quality_key] = source[quality_key] 36 | for quality, quality_link in qualities.items(): 37 | video_quality = video_quality.removesuffix("p") 38 | if quality == video_quality: 39 | if quality_link: 40 | link = quality_link 41 | break 42 | print 43 | print 44 | print 45 | if "src" in media: 46 | link = media["src"] 47 | return link -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # timezone to use when rendering the date 11 | # within the migration file as well as the filename. 12 | # string value is passed to dateutil.tz.gettz() 13 | # leave blank for localtime 14 | # timezone = 15 | 16 | # max length of characters to apply to the 17 | # "slug" field 18 | # truncate_slug_length = 40 19 | 20 | # set to 'true' to run the environment during 21 | # the 'revision' command, regardless of autogenerate 22 | # revision_environment = false 23 | 24 | # set to 'true' to allow .pyc and .pyo files without 25 | # a source .py file to be detected as revisions in the 26 | # versions/ directory 27 | # sourceless = false 28 | 29 | # version location specification; this defaults 30 | # to alembic/versions. When using multiple version 31 | # directories, initial revisions must be specified with --version-path 32 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 33 | 34 | # the output encoding used when revision files 35 | # are written from script.py.mako 36 | # output_encoding = utf-8 37 | 38 | sqlalchemy.url = driver://user:pass@localhost/dbname 39 | 40 | 41 | [post_write_hooks] 42 | # post_write_hooks defines scripts or Python functions that are run 43 | # on newly generated revision scripts. See the documentation for further 44 | # detail and examples 45 | 46 | # format using "black" - use the console_scripts runner, against the "black" entrypoint 47 | # hooks=black 48 | # black.type=console_scripts 49 | # black.entrypoint=black 50 | # black.options=-l 79 51 | 52 | # Logging configuration 53 | [loggers] 54 | keys = root,sqlalchemy,alembic 55 | 56 | [handlers] 57 | keys = console 58 | 59 | [formatters] 60 | keys = generic 61 | 62 | [logger_root] 63 | level = WARN 64 | handlers = console 65 | qualname = 66 | 67 | [logger_sqlalchemy] 68 | level = WARN 69 | handlers = 70 | qualname = sqlalchemy.engine 71 | 72 | [logger_alembic] 73 | level = INFO 74 | handlers = 75 | qualname = alembic 76 | 77 | [handler_console] 78 | class = StreamHandler 79 | args = (sys.stderr,) 80 | level = NOTSET 81 | formatter = generic 82 | 83 | [formatter_generic] 84 | format = %(levelname)-5.5s [%(name)s] %(message)s 85 | datefmt = %H:%M:%S 86 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # timezone to use when rendering the date 11 | # within the migration file as well as the filename. 12 | # string value is passed to dateutil.tz.gettz() 13 | # leave blank for localtime 14 | # timezone = 15 | 16 | # max length of characters to apply to the 17 | # "slug" field 18 | # truncate_slug_length = 40 19 | 20 | # set to 'true' to run the environment during 21 | # the 'revision' command, regardless of autogenerate 22 | # revision_environment = false 23 | 24 | # set to 'true' to allow .pyc and .pyo files without 25 | # a source .py file to be detected as revisions in the 26 | # versions/ directory 27 | # sourceless = false 28 | 29 | # version location specification; this defaults 30 | # to alembic/versions. When using multiple version 31 | # directories, initial revisions must be specified with --version-path 32 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 33 | 34 | # the output encoding used when revision files 35 | # are written from script.py.mako 36 | # output_encoding = utf-8 37 | 38 | sqlalchemy.url = driver://user:pass@localhost/dbname 39 | 40 | 41 | [post_write_hooks] 42 | # post_write_hooks defines scripts or Python functions that are run 43 | # on newly generated revision scripts. See the documentation for further 44 | # detail and examples 45 | 46 | # format using "black" - use the console_scripts runner, against the "black" entrypoint 47 | # hooks=black 48 | # black.type=console_scripts 49 | # black.entrypoint=black 50 | # black.options=-l 79 51 | 52 | # Logging configuration 53 | [loggers] 54 | keys = root,sqlalchemy,alembic 55 | 56 | [handlers] 57 | keys = console 58 | 59 | [formatters] 60 | keys = generic 61 | 62 | [logger_root] 63 | level = WARN 64 | handlers = console 65 | qualname = 66 | 67 | [logger_sqlalchemy] 68 | level = WARN 69 | handlers = 70 | qualname = sqlalchemy.engine 71 | 72 | [logger_alembic] 73 | level = INFO 74 | handlers = 75 | qualname = alembic 76 | 77 | [handler_console] 78 | class = StreamHandler 79 | args = (sys.stderr,) 80 | level = NOTSET 81 | formatter = generic 82 | 83 | [formatter_generic] 84 | format = %(levelname)-5.5s [%(name)s] %(message)s 85 | datefmt = %H:%M:%S 86 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # timezone to use when rendering the date 11 | # within the migration file as well as the filename. 12 | # string value is passed to dateutil.tz.gettz() 13 | # leave blank for localtime 14 | # timezone = 15 | 16 | # max length of characters to apply to the 17 | # "slug" field 18 | # truncate_slug_length = 40 19 | 20 | # set to 'true' to run the environment during 21 | # the 'revision' command, regardless of autogenerate 22 | # revision_environment = false 23 | 24 | # set to 'true' to allow .pyc and .pyo files without 25 | # a source .py file to be detected as revisions in the 26 | # versions/ directory 27 | # sourceless = false 28 | 29 | # version location specification; this defaults 30 | # to alembic/versions. When using multiple version 31 | # directories, initial revisions must be specified with --version-path 32 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 33 | 34 | # the output encoding used when revision files 35 | # are written from script.py.mako 36 | # output_encoding = utf-8 37 | 38 | sqlalchemy.url = driver://user:pass@localhost/dbname 39 | 40 | 41 | [post_write_hooks] 42 | # post_write_hooks defines scripts or Python functions that are run 43 | # on newly generated revision scripts. See the documentation for further 44 | # detail and examples 45 | 46 | # format using "black" - use the console_scripts runner, against the "black" entrypoint 47 | # hooks=black 48 | # black.type=console_scripts 49 | # black.entrypoint=black 50 | # black.options=-l 79 51 | 52 | # Logging configuration 53 | [loggers] 54 | keys = root,sqlalchemy,alembic 55 | 56 | [handlers] 57 | keys = console 58 | 59 | [formatters] 60 | keys = generic 61 | 62 | [logger_root] 63 | level = WARN 64 | handlers = console 65 | qualname = 66 | 67 | [logger_sqlalchemy] 68 | level = WARN 69 | handlers = 70 | qualname = sqlalchemy.engine 71 | 72 | [logger_alembic] 73 | level = INFO 74 | handlers = 75 | qualname = alembic 76 | 77 | [handler_console] 78 | class = StreamHandler 79 | args = (sys.stderr,) 80 | level = NOTSET 81 | formatter = generic 82 | 83 | [formatter_generic] 84 | format = %(levelname)-5.5s [%(name)s] %(message)s 85 | datefmt = %H:%M:%S 86 | -------------------------------------------------------------------------------- /database/databases/user_data/alembic/env.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | from logging.config import fileConfig 3 | 4 | from sqlalchemy import engine_from_config 5 | from sqlalchemy import pool 6 | 7 | from alembic import context 8 | from database.databases.user_data import user_database 9 | 10 | # this is the Alembic Config object, which provides 11 | # access to the values within the .ini file in use. 12 | config = context.config 13 | 14 | # Interpret the config file for Python logging. 15 | # This line sets up loggers basically. 16 | fileConfig(config.config_file_name) 17 | 18 | # add your model's MetaData object here 19 | # for 'autogenerate' support 20 | # from myapp import mymodel 21 | # target_metadata = mymodel.Base.metadata 22 | target_metadata = user_database.Base.metadata 23 | 24 | # other values from the config, defined by the needs of env.py, 25 | # can be acquired: 26 | # my_important_option = config.get_main_option("my_important_option") 27 | # ... etc. 28 | 29 | 30 | def run_migrations_offline(): 31 | """Run migrations in 'offline' mode. 32 | 33 | This configures the context with just a URL 34 | and not an Engine, though an Engine is acceptable 35 | here as well. By skipping the Engine creation 36 | we don't even need a DBAPI to be available. 37 | 38 | Calls to context.execute() here emit the given string to the 39 | script output. 40 | 41 | """ 42 | url = config.get_main_option("sqlalchemy.url") 43 | context.configure( 44 | url=url, 45 | target_metadata=target_metadata, 46 | literal_binds=True, 47 | dialect_opts={"paramstyle": "named"}, 48 | ) 49 | 50 | with context.begin_transaction(): 51 | context.run_migrations() 52 | 53 | 54 | def run_migrations_online(): 55 | """Run migrations in 'online' mode. 56 | 57 | In this scenario we need to create an Engine 58 | and associate a connection with the context. 59 | 60 | """ 61 | connectable = engine_from_config( 62 | config.get_section(config.config_ini_section), 63 | prefix="sqlalchemy.", 64 | poolclass=pool.NullPool, 65 | ) 66 | 67 | with connectable.connect() as connection: 68 | context.configure( 69 | connection=connection, target_metadata=target_metadata 70 | ) 71 | 72 | with context.begin_transaction(): 73 | context.run_migrations() 74 | 75 | 76 | if context.is_offline_mode(): 77 | run_migrations_offline() 78 | else: 79 | run_migrations_online() 80 | -------------------------------------------------------------------------------- /start_ofd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import asyncio 3 | import os 4 | import time 5 | import traceback 6 | 7 | import tests.main_test as main_test 8 | 9 | try: 10 | 11 | main_test.version_check() 12 | main_test.check_config() 13 | main_test.check_profiles() 14 | 15 | if __name__ == "__main__": 16 | import datascraper.main_datascraper as main_datascraper 17 | import helpers.main_helper as main_helper 18 | 19 | config_path = os.path.join(".settings", "config.json") 20 | json_config, json_config2 = main_helper.get_config(config_path) 21 | json_settings = json_config["settings"] 22 | exit_on_completion = json_settings["exit_on_completion"] 23 | infinite_loop = json_settings["infinite_loop"] 24 | loop_timeout = json_settings["loop_timeout"] 25 | json_sites = json_config["supported"] 26 | domain = json_settings["auto_site_choice"] 27 | string, site_names = main_helper.module_chooser(domain, json_sites) 28 | 29 | # logging.basicConfig(level=logging.DEBUG, format="%(message)s") 30 | async def main(): 31 | while True: 32 | if domain: 33 | if site_names: 34 | site_name = domain 35 | else: 36 | print(string) 37 | continue 38 | else: 39 | print(string) 40 | site_choice = str(input()) 41 | site_choice = int(site_choice) 42 | site_name = site_names[site_choice] 43 | site_name_lower = site_name.lower() 44 | api = await main_datascraper.start_datascraper( 45 | json_config, site_name_lower 46 | ) 47 | if api: 48 | api.close_pools() 49 | if exit_on_completion: 50 | print("Now exiting.") 51 | exit(0) 52 | elif not infinite_loop: 53 | print("Input anything to continue") 54 | input() 55 | elif loop_timeout: 56 | print("Pausing scraper for " + loop_timeout + " seconds.") 57 | time.sleep(int(loop_timeout)) 58 | 59 | loop = asyncio.get_event_loop() 60 | loop.run_until_complete(main()) 61 | loop.close() 62 | except Exception as e: 63 | print(traceback.format_exc()) 64 | input() 65 | -------------------------------------------------------------------------------- /apis/onlyfans/onlyfans.py: -------------------------------------------------------------------------------- 1 | from multiprocessing.pool import Pool 2 | from typing import Any, Dict, Optional, Union 3 | 4 | from apis.onlyfans.classes import create_user 5 | from apis.onlyfans.classes.create_auth import create_auth 6 | from apis.onlyfans.classes.extras import auth_details, endpoint_links, legacy_auth_details 7 | 8 | from .. import api_helper 9 | 10 | 11 | # def session_retry_rules(response, link: str) -> int: 12 | # """ 13 | # 0 Fine, 1 Continue, 2 Break 14 | # """ 15 | # status_code = 0 16 | # if "https://onlyfans.com/api2/v2/" in link: 17 | # text = response.text 18 | # if "Invalid request sign" in text: 19 | # status_code = 1 20 | # elif "Access Denied" in text: 21 | # status_code = 2 22 | # else: 23 | # if not response.status_code == 200: 24 | # status_code = 1 25 | # return status_code 26 | 27 | 28 | class start: 29 | def __init__( 30 | self, 31 | max_threads: int = -1, 32 | ) -> None: 33 | self.auths: list[create_auth] = [] 34 | self.subscriptions: list[create_user] = [] 35 | self.max_threads = max_threads 36 | self.lists = None 37 | self.endpoint_links = endpoint_links 38 | self.pool: Pool = api_helper.multiprocessing() 39 | self.settings: dict[str, dict[str, Any]] = {} 40 | 41 | def add_auth(self, options: dict[str, str] = {}, only_active: bool = False): 42 | auth = create_auth(pool=self.pool, max_threads=self.max_threads) 43 | if only_active and not options.get("active"): 44 | return auth 45 | temp_auth_details = auth_details(options).upgrade_legacy(options) 46 | auth.auth_details = temp_auth_details 47 | auth.extras["settings"] = self.settings 48 | self.auths.append(auth) 49 | return auth 50 | 51 | def get_auth(self, identifier: Union[str, int]) -> Optional[create_auth]: 52 | final_auth = None 53 | for auth in self.auths: 54 | if auth.id == identifier: 55 | final_auth = auth 56 | elif auth.username == identifier: 57 | final_auth = auth 58 | if final_auth: 59 | break 60 | return final_auth 61 | 62 | def close_pools(self): 63 | self.pool.close() 64 | for auth in self.auths: 65 | auth.session_manager.pool.close() 66 | 67 | def has_active_auths(self): 68 | return bool([x for x in self.auths if x.active]) -------------------------------------------------------------------------------- /database/databases/user_data/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # sys.path path, will be prepended to sys.path if present. 11 | # defaults to the current working directory. 12 | prepend_sys_path = . 13 | 14 | # timezone to use when rendering the date 15 | # within the migration file as well as the filename. 16 | # string value is passed to dateutil.tz.gettz() 17 | # leave blank for localtime 18 | # timezone = 19 | 20 | # max length of characters to apply to the 21 | # "slug" field 22 | # truncate_slug_length = 40 23 | 24 | # set to 'true' to run the environment during 25 | # the 'revision' command, regardless of autogenerate 26 | # revision_environment = false 27 | 28 | # set to 'true' to allow .pyc and .pyo files without 29 | # a source .py file to be detected as revisions in the 30 | # versions/ directory 31 | # sourceless = false 32 | 33 | # version location specification; this defaults 34 | # to alembic/versions. When using multiple version 35 | # directories, initial revisions must be specified with --version-path 36 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 37 | 38 | # the output encoding used when revision files 39 | # are written from script.py.mako 40 | # output_encoding = utf-8 41 | 42 | sqlalchemy.url = driver://user:pass@localhost/dbname 43 | 44 | 45 | [post_write_hooks] 46 | # post_write_hooks defines scripts or Python functions that are run 47 | # on newly generated revision scripts. See the documentation for further 48 | # detail and examples 49 | 50 | # format using "black" - use the console_scripts runner, against the "black" entrypoint 51 | # hooks = black 52 | # black.type = console_scripts 53 | # black.entrypoint = black 54 | # black.options = -l 79 REVISION_SCRIPT_FILENAME 55 | 56 | # Logging configuration 57 | [loggers] 58 | keys = root,sqlalchemy,alembic 59 | 60 | [handlers] 61 | keys = console 62 | 63 | [formatters] 64 | keys = generic 65 | 66 | [logger_root] 67 | level = WARN 68 | handlers = console 69 | qualname = 70 | 71 | [logger_sqlalchemy] 72 | level = WARN 73 | handlers = 74 | qualname = sqlalchemy.engine 75 | 76 | [logger_alembic] 77 | level = INFO 78 | handlers = 79 | qualname = alembic 80 | 81 | [handler_console] 82 | class = StreamHandler 83 | args = (sys.stderr,) 84 | level = NOTSET 85 | formatter = generic 86 | 87 | [formatter_generic] 88 | format = %(levelname)-5.5s [%(name)s] %(message)s 89 | datefmt = %H:%M:%S 90 | -------------------------------------------------------------------------------- /database/archived_databases/posts/alembic/env.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | from logging.config import fileConfig 3 | 4 | from sqlalchemy import engine_from_config 5 | from sqlalchemy import pool 6 | 7 | from alembic import context 8 | from database.archived_databases.posts import posts 9 | 10 | # this is the Alembic Config object, which provides 11 | # access to the values within the .ini file in use. 12 | config = context.config 13 | 14 | # Interpret the config file for Python logging. 15 | # This line sets up loggers basically. 16 | fileConfig(config.config_file_name, disable_existing_loggers=False) 17 | 18 | # add your model's MetaData object here 19 | # for 'autogenerate' support 20 | # from myapp import mymodel 21 | # target_metadata = mymodel.Base.metadata 22 | target_metadata = posts.Base.metadata 23 | 24 | # other values from the config, defined by the needs of env.py, 25 | # can be acquired: 26 | # my_important_option = config.get_main_option("my_important_option") 27 | # ... etc. 28 | 29 | 30 | def run_migrations_offline(): 31 | """Run migrations in 'offline' mode. 32 | 33 | This configures the context with just a URL 34 | and not an Engine, though an Engine is acceptable 35 | here as well. By skipping the Engine creation 36 | we don't even need a DBAPI to be available. 37 | 38 | Calls to context.execute() here emit the given string to the 39 | script output. 40 | 41 | """ 42 | url = config.get_main_option("sqlalchemy.url") 43 | context.configure( 44 | url=url, 45 | target_metadata=target_metadata, 46 | literal_binds=True, 47 | dialect_opts={"paramstyle": "named"} 48 | ) 49 | 50 | with context.begin_transaction(): 51 | context.run_migrations() 52 | 53 | 54 | def run_migrations_online(): 55 | """Run migrations in 'online' mode. 56 | 57 | In this scenario we need to create an Engine 58 | and associate a connection with the context. 59 | 60 | """ 61 | connectable = engine_from_config( 62 | config.get_section(config.config_ini_section), 63 | prefix="sqlalchemy.", 64 | poolclass=pool.NullPool, 65 | ) 66 | 67 | with connectable.connect() as connection: 68 | context.configure( 69 | connection=connection, target_metadata=target_metadata, 70 | render_as_batch=True, 71 | compare_type=True, 72 | ) 73 | 74 | with context.begin_transaction(): 75 | context.run_migrations() 76 | 77 | 78 | if context.is_offline_mode(): 79 | run_migrations_offline() 80 | else: 81 | run_migrations_online() 82 | -------------------------------------------------------------------------------- /tests/main_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from os.path import dirname as up 4 | path = up(up(os.path.realpath(__file__))) 5 | os.chdir(path) 6 | 7 | 8 | def version_check(): 9 | version_info = sys.version_info 10 | python_version = f"{version_info.major}.{version_info.minor}" 11 | python_version = float(python_version) 12 | if python_version < 3.9: 13 | string = "Execute the script with Python 3.9 \n" 14 | string += "Press enter to continue" 15 | input(string) 16 | exit(0) 17 | # Updating any outdated config values 18 | 19 | 20 | def check_config(): 21 | file_name = "config.json" 22 | path = os.path.join('.settings', file_name) 23 | import helpers.main_helper as main_helper 24 | json_config, updated = main_helper.get_config(path) 25 | if updated: 26 | input( 27 | f"The .settings\\{file_name} file has been updated. Fill in whatever you need to fill in and then press enter when done.\n") 28 | return json_config 29 | 30 | 31 | def check_profiles(): 32 | file_name = "config.json" 33 | path = os.path.join('.settings', file_name) 34 | import helpers.main_helper as main_helper 35 | from apis.onlyfans.onlyfans import auth_details 36 | json_config, json_config2 = main_helper.get_config(path) 37 | json_settings = json_config["settings"] 38 | profile_directories = json_settings["profile_directories"] 39 | profile_directory = profile_directories[0] 40 | matches = ["OnlyFans"] 41 | for match in matches: 42 | q = os.path.abspath(profile_directory) 43 | profile_site_directory = os.path.join(q, match) 44 | if os.path.exists(profile_site_directory): 45 | e = os.listdir(profile_site_directory) 46 | e = [os.path.join(profile_site_directory, x, "auth.json") 47 | for x in e] 48 | e = [x for x in e if os.path.exists(x)] 49 | if e: 50 | continue 51 | default_profile_directory = os.path.join( 52 | profile_site_directory, "default") 53 | os.makedirs(default_profile_directory, exist_ok=True) 54 | auth_filepath = os.path.join(default_profile_directory, "auth.json") 55 | if not os.path.exists(auth_filepath): 56 | new_item = {} 57 | new_item["auth"] = auth_details().export() 58 | main_helper.export_data(new_item, auth_filepath) 59 | string = f"{auth_filepath} has been created. Fill in the relevant details and then press enter to continue." 60 | input(string) 61 | print 62 | print 63 | -------------------------------------------------------------------------------- /database/archived_databases/stories/alembic/env.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | from logging.config import fileConfig 3 | 4 | from sqlalchemy import engine_from_config 5 | from sqlalchemy import pool 6 | 7 | from alembic import context 8 | from database.archived_databases.stories import stories 9 | 10 | # this is the Alembic Config object, which provides 11 | # access to the values within the .ini file in use. 12 | config = context.config 13 | 14 | # Interpret the config file for Python logging. 15 | # This line sets up loggers basically. 16 | fileConfig(config.config_file_name, disable_existing_loggers=False) 17 | 18 | # add your model's MetaData object here 19 | # for 'autogenerate' support 20 | # from myapp import mymodel 21 | # target_metadata = mymodel.Base.metadata 22 | target_metadata = stories.Base.metadata 23 | 24 | # other values from the config, defined by the needs of env.py, 25 | # can be acquired: 26 | # my_important_option = config.get_main_option("my_important_option") 27 | # ... etc. 28 | 29 | 30 | def run_migrations_offline(): 31 | """Run migrations in 'offline' mode. 32 | 33 | This configures the context with just a URL 34 | and not an Engine, though an Engine is acceptable 35 | here as well. By skipping the Engine creation 36 | we don't even need a DBAPI to be available. 37 | 38 | Calls to context.execute() here emit the given string to the 39 | script output. 40 | 41 | """ 42 | url = config.get_main_option("sqlalchemy.url") 43 | context.configure( 44 | url=url, 45 | target_metadata=target_metadata, 46 | literal_binds=True, 47 | dialect_opts={"paramstyle": "named"} 48 | ) 49 | 50 | with context.begin_transaction(): 51 | context.run_migrations() 52 | 53 | 54 | def run_migrations_online(): 55 | """Run migrations in 'online' mode. 56 | 57 | In this scenario we need to create an Engine 58 | and associate a connection with the context. 59 | 60 | """ 61 | connectable = engine_from_config( 62 | config.get_section(config.config_ini_section), 63 | prefix="sqlalchemy.", 64 | poolclass=pool.NullPool, 65 | ) 66 | 67 | with connectable.connect() as connection: 68 | context.configure( 69 | connection=connection, target_metadata=target_metadata, 70 | render_as_batch=True, 71 | compare_type=True, 72 | ) 73 | 74 | with context.begin_transaction(): 75 | context.run_migrations() 76 | 77 | 78 | if context.is_offline_mode(): 79 | run_migrations_offline() 80 | else: 81 | run_migrations_online() 82 | -------------------------------------------------------------------------------- /database/archived_databases/messages/alembic/env.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | from logging.config import fileConfig 3 | 4 | from sqlalchemy import engine_from_config 5 | from sqlalchemy import pool 6 | 7 | from alembic import context 8 | from database.archived_databases.messages import messages 9 | 10 | # this is the Alembic Config object, which provides 11 | # access to the values within the .ini file in use. 12 | config = context.config 13 | 14 | # Interpret the config file for Python logging. 15 | # This line sets up loggers basically. 16 | fileConfig(config.config_file_name, disable_existing_loggers=False) 17 | 18 | # add your model's MetaData object here 19 | # for 'autogenerate' support 20 | # from myapp import mymodel 21 | # target_metadata = mymodel.Base.metadata 22 | target_metadata = messages.Base.metadata 23 | 24 | # other values from the config, defined by the needs of env.py, 25 | # can be acquired: 26 | # my_important_option = config.get_main_option("my_important_option") 27 | # ... etc. 28 | 29 | 30 | def run_migrations_offline(): 31 | """Run migrations in 'offline' mode. 32 | 33 | This configures the context with just a URL 34 | and not an Engine, though an Engine is acceptable 35 | here as well. By skipping the Engine creation 36 | we don't even need a DBAPI to be available. 37 | 38 | Calls to context.execute() here emit the given string to the 39 | script output. 40 | 41 | """ 42 | url = config.get_main_option("sqlalchemy.url") 43 | context.configure( 44 | url=url, 45 | target_metadata=target_metadata, 46 | literal_binds=True, 47 | dialect_opts={"paramstyle": "named"}, 48 | ) 49 | 50 | with context.begin_transaction(): 51 | context.run_migrations() 52 | 53 | 54 | def run_migrations_online(): 55 | """Run migrations in 'online' mode. 56 | 57 | In this scenario we need to create an Engine 58 | and associate a connection with the context. 59 | 60 | """ 61 | connectable = engine_from_config( 62 | config.get_section(config.config_ini_section), 63 | prefix="sqlalchemy.", 64 | poolclass=pool.NullPool, 65 | ) 66 | 67 | with connectable.connect() as connection: 68 | context.configure( 69 | connection=connection, target_metadata=target_metadata, 70 | render_as_batch=True, 71 | compare_type=True, 72 | ) 73 | 74 | with context.begin_transaction(): 75 | context.run_migrations() 76 | 77 | 78 | if context.is_offline_mode(): 79 | run_migrations_offline() 80 | else: 81 | run_migrations_online() 82 | -------------------------------------------------------------------------------- /database/databases/user_data/models/media_table.py: -------------------------------------------------------------------------------- 1 | ### api_table.py ### 2 | 3 | from datetime import datetime 4 | from typing import cast 5 | import sqlalchemy 6 | 7 | 8 | class template_media_table: 9 | __tablename__ = "medias" 10 | id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 11 | media_id = sqlalchemy.Column(sqlalchemy.Integer, unique=True) 12 | post_id = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) 13 | link = cast(str, sqlalchemy.Column(sqlalchemy.String)) 14 | directory = cast(str, sqlalchemy.Column(sqlalchemy.String)) 15 | filename = cast(str, sqlalchemy.Column(sqlalchemy.String)) 16 | size = cast(int, sqlalchemy.Column(sqlalchemy.Integer, default=None)) 17 | api_type = cast(str, sqlalchemy.Column(sqlalchemy.String)) 18 | media_type = sqlalchemy.Column(sqlalchemy.String) 19 | preview = sqlalchemy.Column(sqlalchemy.Integer, default=0) 20 | linked = sqlalchemy.Column(sqlalchemy.String, default=None) 21 | downloaded = cast(bool, sqlalchemy.Column(sqlalchemy.Integer, default=0)) 22 | created_at = cast(datetime, sqlalchemy.Column(sqlalchemy.TIMESTAMP)) 23 | 24 | def legacy(self, Base): 25 | class legacy_media_table(Base): 26 | __tablename__ = "medias" 27 | id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 28 | post_id = sqlalchemy.Column(sqlalchemy.Integer) 29 | link = sqlalchemy.Column(sqlalchemy.String) 30 | directory = sqlalchemy.Column(sqlalchemy.String) 31 | filename = sqlalchemy.Column(sqlalchemy.String) 32 | size = sqlalchemy.Column(sqlalchemy.Integer, default=None) 33 | media_type = sqlalchemy.Column(sqlalchemy.String) 34 | downloaded = sqlalchemy.Column(sqlalchemy.Integer, default=0) 35 | created_at = sqlalchemy.Column(sqlalchemy.DATETIME) 36 | 37 | return legacy_media_table 38 | 39 | def legacy_2(self, Base): 40 | class legacy_media_table(Base): 41 | __tablename__ = "medias" 42 | id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 43 | media_id = sqlalchemy.Column(sqlalchemy.Integer, unique=True) 44 | post_id = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) 45 | link = cast(str, sqlalchemy.Column(sqlalchemy.String)) 46 | directory = cast(str, sqlalchemy.Column(sqlalchemy.String)) 47 | filename = cast(str, sqlalchemy.Column(sqlalchemy.String)) 48 | size = cast(int, sqlalchemy.Column(sqlalchemy.Integer, default=None)) 49 | media_type = sqlalchemy.Column(sqlalchemy.String) 50 | preview = sqlalchemy.Column(sqlalchemy.Integer, default=0) 51 | linked = sqlalchemy.Column(sqlalchemy.String, default=None) 52 | downloaded = cast(bool, sqlalchemy.Column(sqlalchemy.Integer, default=0)) 53 | created_at = cast(datetime, sqlalchemy.Column(sqlalchemy.TIMESTAMP)) 54 | 55 | return legacy_media_table 56 | -------------------------------------------------------------------------------- /extras/OFRenamer/.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/code,linux,python,pycharm,windows 2 | # Edit at https://www.gitignore.io/?templates=code,linux,python,pycharm,windows 3 | 4 | ### Code ### 5 | .vscode/* 6 | 7 | ### PyCharm Patch ### 8 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 9 | .idea/* 10 | 11 | ### Python ### 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | pip-wheel-metadata/ 35 | share/python-wheels/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | MANIFEST 40 | 41 | ### venv ### 42 | # Virtualenv 43 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 44 | .Python 45 | [Bb]in 46 | [Ii]nclude 47 | [Ll]ib 48 | [Ll]ib64 49 | [Ll]ocal 50 | [Ss]cripts 51 | pyvenv.cfg 52 | .env 53 | .venv 54 | env/ 55 | venv/ 56 | ENV/ 57 | env.bak/ 58 | venv.bak/ 59 | pip-selfcheck.json 60 | 61 | 62 | # PyInstaller 63 | # Usually these files are written by a python script from a template 64 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 65 | *.manifest 66 | *.spec 67 | 68 | # Installer logs 69 | pip-log.txt 70 | pip-delete-this-directory.txt 71 | 72 | # Unit test / coverage reports 73 | htmlcov/ 74 | .tox/ 75 | .nox/ 76 | .coverage 77 | .coverage.* 78 | .cache 79 | nosetests.xml 80 | coverage.xml 81 | *.cover 82 | .hypothesis/ 83 | .pytest_cache/ 84 | 85 | # Translations 86 | *.mo 87 | *.pot 88 | 89 | # Scrapy stuff: 90 | .scrapy 91 | 92 | # Sphinx documentation 93 | docs/_build/ 94 | 95 | # PyBuilder 96 | target/ 97 | 98 | # pyenv 99 | .python-version 100 | 101 | # pipenv 102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 105 | # install all needed dependencies. 106 | #Pipfile.lock 107 | 108 | # mkdocs documentation 109 | /site 110 | 111 | # mypy 112 | .mypy_cache/ 113 | .dmypy.json 114 | dmypy.json 115 | 116 | ### Windows ### 117 | # Windows thumbnail cache files 118 | Thumbs.db 119 | Thumbs.db:encryptable 120 | ehthumbs.db 121 | ehthumbs_vista.db 122 | 123 | # Dump file 124 | *.stackdump 125 | 126 | # Folder config file 127 | [Dd]esktop.ini 128 | 129 | # Recycle Bin used on file shares 130 | $RECYCLE.BIN/ 131 | 132 | # Windows Installer files 133 | *.cab 134 | *.msi 135 | *.msix 136 | *.msm 137 | *.msp 138 | 139 | # Windows shortcuts 140 | *.lnk 141 | 142 | # End of https://www.gitignore.io/api/code,linux,python,pycharm,windows 143 | 144 | # Project Specific 145 | .sites 146 | *.log 147 | .settings/* 148 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/code,linux,python,pycharm,windows 2 | # Edit at https://www.gitignore.io/?templates=code,linux,python,pycharm,windows 3 | 4 | ### Code ### 5 | .vscode/* 6 | 7 | ### PyCharm Patch ### 8 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 9 | .idea/* 10 | 11 | ### Python ### 12 | # Byte-compiled / optimized / DLL files 13 | __pycache__/ 14 | *.py[cod] 15 | *$py.class 16 | 17 | # C extensions 18 | *.so 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | pip-wheel-metadata/ 35 | share/python-wheels/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | MANIFEST 40 | 41 | ### venv ### 42 | # Virtualenv 43 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 44 | .Python 45 | [Bb]in 46 | [Ii]nclude 47 | [Ll]ib 48 | [Ll]ib64 49 | [Ll]ocal 50 | [Ss]cripts 51 | pyvenv.cfg 52 | .env 53 | .venv 54 | env/ 55 | venv/ 56 | ENV/ 57 | env.bak/ 58 | venv.bak/ 59 | pip-selfcheck.json 60 | 61 | 62 | # PyInstaller 63 | # Usually these files are written by a python script from a template 64 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 65 | *.manifest 66 | *.spec 67 | 68 | # Installer logs 69 | pip-log.txt 70 | pip-delete-this-directory.txt 71 | 72 | # Unit test / coverage reports 73 | htmlcov/ 74 | .tox/ 75 | .nox/ 76 | .coverage 77 | .coverage.* 78 | .cache 79 | nosetests.xml 80 | coverage.xml 81 | *.cover 82 | .hypothesis/ 83 | .pytest_cache/ 84 | 85 | # Translations 86 | *.mo 87 | *.pot 88 | 89 | # Scrapy stuff: 90 | .scrapy 91 | 92 | # Sphinx documentation 93 | docs/_build/ 94 | 95 | # PyBuilder 96 | target/ 97 | 98 | # pyenv 99 | .python-version 100 | 101 | # pipenv 102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 105 | # install all needed dependencies. 106 | #Pipfile.lock 107 | 108 | # mkdocs documentation 109 | /site 110 | 111 | # mypy 112 | .mypy_cache/ 113 | .dmypy.json 114 | dmypy.json 115 | 116 | ### Windows ### 117 | # Windows thumbnail cache files 118 | Thumbs.db 119 | Thumbs.db:encryptable 120 | ehthumbs.db 121 | ehthumbs_vista.db 122 | 123 | ### Mac ### 124 | .DS_Store 125 | 126 | # Dump file 127 | *.stackdump 128 | 129 | # Folder config file 130 | [Dd]esktop.ini 131 | 132 | # Recycle Bin used on file shares 133 | $RECYCLE.BIN/ 134 | 135 | # Windows Installer files 136 | *.cab 137 | *.msi 138 | *.msix 139 | *.msm 140 | *.msp 141 | 142 | # Windows shortcuts 143 | *.lnk 144 | 145 | # End of https://www.gitignore.io/api/code,linux,python,pycharm,windows 146 | 147 | # Project Specific 148 | .profiles 149 | .metadatas 150 | .sites 151 | *.log 152 | .settings/* 153 | extras/OFLogin/geckodriver.exe 154 | extras/OFLogin/chromedriver.exe 155 | extras/OFMetadataFixer/* 156 | -------------------------------------------------------------------------------- /database/databases/user_data/alembic/versions/5493253cc03c_content.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """content 3 | 4 | Revision ID: 5493253cc03c 5 | Revises: 6 | Create Date: 2021-06-21 14:22:30.585216 7 | 8 | """ 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '5493253cc03c' 15 | down_revision = None 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.create_table('medias', 23 | sa.Column('id', sa.Integer(), nullable=False), 24 | sa.Column('media_id', sa.Integer(), nullable=True), 25 | sa.Column('post_id', sa.Integer(), nullable=False), 26 | sa.Column('link', sa.String(), nullable=True), 27 | sa.Column('directory', sa.String(), nullable=True), 28 | sa.Column('filename', sa.String(), nullable=True), 29 | sa.Column('size', sa.Integer(), nullable=True), 30 | sa.Column('api_type', sa.String(), nullable=True), 31 | sa.Column('media_type', sa.String(), nullable=True), 32 | sa.Column('preview', sa.Integer(), nullable=True), 33 | sa.Column('linked', sa.String(), nullable=True), 34 | sa.Column('downloaded', sa.Integer(), nullable=True), 35 | sa.Column('created_at', sa.TIMESTAMP(), nullable=True), 36 | sa.PrimaryKeyConstraint('id'), 37 | sa.UniqueConstraint('media_id') 38 | ) 39 | op.create_table('messages', 40 | sa.Column('id', sa.Integer(), nullable=False), 41 | sa.Column('post_id', sa.Integer(), nullable=False), 42 | sa.Column('text', sa.String(), nullable=True), 43 | sa.Column('price', sa.Integer(), nullable=True), 44 | sa.Column('paid', sa.Integer(), nullable=True), 45 | sa.Column('archived', sa.Boolean(), nullable=True), 46 | sa.Column('created_at', sa.TIMESTAMP(), nullable=True), 47 | sa.Column('user_id', sa.Integer(), nullable=True), 48 | sa.PrimaryKeyConstraint('id'), 49 | sa.UniqueConstraint('post_id') 50 | ) 51 | op.create_table('posts', 52 | sa.Column('id', sa.Integer(), nullable=False), 53 | sa.Column('post_id', sa.Integer(), nullable=False), 54 | sa.Column('text', sa.String(), nullable=True), 55 | sa.Column('price', sa.Integer(), nullable=True), 56 | sa.Column('paid', sa.Integer(), nullable=True), 57 | sa.Column('archived', sa.Boolean(), nullable=True), 58 | sa.Column('created_at', sa.TIMESTAMP(), nullable=True), 59 | sa.PrimaryKeyConstraint('id'), 60 | sa.UniqueConstraint('post_id') 61 | ) 62 | op.create_table('stories', 63 | sa.Column('id', sa.Integer(), nullable=False), 64 | sa.Column('post_id', sa.Integer(), nullable=False), 65 | sa.Column('text', sa.String(), nullable=True), 66 | sa.Column('price', sa.Integer(), nullable=True), 67 | sa.Column('paid', sa.Integer(), nullable=True), 68 | sa.Column('archived', sa.Boolean(), nullable=True), 69 | sa.Column('created_at', sa.TIMESTAMP(), nullable=True), 70 | sa.PrimaryKeyConstraint('id'), 71 | sa.UniqueConstraint('post_id') 72 | ) 73 | # ### end Alembic commands ### 74 | 75 | 76 | def downgrade(): 77 | # ### commands auto generated by Alembic - please adjust! ### 78 | op.drop_table('stories') 79 | op.drop_table('posts') 80 | op.drop_table('messages') 81 | op.drop_table('medias') 82 | # ### end Alembic commands ### 83 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/create_message.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from apis.onlyfans.classes.extras import endpoint_links 4 | 5 | from . import create_user 6 | 7 | 8 | class create_message: 9 | def __init__(self, option: dict, user: create_user) -> None: 10 | self.responseType: Optional[str] = option.get("responseType") 11 | self.text: Optional[str] = option.get("text") 12 | self.lockedText: Optional[bool] = option.get("lockedText") 13 | self.isFree: Optional[bool] = option.get("isFree") 14 | self.price: Optional[float] = option.get("price") 15 | self.isMediaReady: Optional[bool] = option.get("isMediaReady") 16 | self.mediaCount: Optional[int] = option.get("mediaCount") 17 | self.media: list = option.get("media",[]) 18 | self.previews: list = option.get("previews",[]) 19 | self.isTip: Optional[bool] = option.get("isTip") 20 | self.isReportedByMe: Optional[bool] = option.get("isReportedByMe") 21 | self.fromUser = create_user.create_user(option["fromUser"]) 22 | self.isFromQueue: Optional[bool] = option.get("isFromQueue") 23 | self.queueId: Optional[int] = option.get("queueId") 24 | self.canUnsendQueue: Optional[bool] = option.get("canUnsendQueue") 25 | self.unsendSecondsQueue: Optional[int] = option.get("unsendSecondsQueue") 26 | self.id: Optional[int] = option.get("id") 27 | self.isOpened: Optional[bool] = option.get("isOpened") 28 | self.isNew: Optional[bool] = option.get("isNew") 29 | self.createdAt: Optional[str] = option.get("createdAt") 30 | self.changedAt: Optional[str] = option.get("changedAt") 31 | self.cancelSeconds: Optional[int] = option.get("cancelSeconds") 32 | self.isLiked: Optional[bool] = option.get("isLiked") 33 | self.canPurchase: Optional[bool] = option.get("canPurchase") 34 | self.canPurchaseReason: Optional[str] = option.get("canPurchaseReason") 35 | self.canReport: Optional[bool] = option.get("canReport") 36 | # Custom 37 | self.user = user 38 | 39 | async def buy_message(self): 40 | """ 41 | This function will buy a ppv message from a model. 42 | """ 43 | message_price = self.price 44 | x = { 45 | "amount": message_price, 46 | "messageId": self.id, 47 | "paymentType": "message", 48 | "token": "", 49 | "unavailablePaymentGates": [], 50 | } 51 | link = endpoint_links().pay 52 | result = await self.user.session_manager.json_request( 53 | link, method="POST", payload=x 54 | ) 55 | return result 56 | 57 | async def link_picker(self,media, video_quality): 58 | link = "" 59 | if "source" in media: 60 | quality_key = "source" 61 | source = media[quality_key] 62 | link = source[quality_key] 63 | if link: 64 | if media["type"] == "video": 65 | qualities = media["videoSources"] 66 | qualities = dict(sorted(qualities.items(), reverse=False)) 67 | qualities[quality_key] = source[quality_key] 68 | for quality, quality_link in qualities.items(): 69 | video_quality = video_quality.removesuffix("p") 70 | if quality == video_quality: 71 | if quality_link: 72 | link = quality_link 73 | break 74 | print 75 | print 76 | print 77 | if "src" in media: 78 | link = media["src"] 79 | return link 80 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/create_post.py: -------------------------------------------------------------------------------- 1 | import apis.onlyfans.classes.create_user as create_user 2 | from apis.onlyfans.classes.extras import endpoint_links 3 | from typing import Any 4 | 5 | 6 | class create_post: 7 | def __init__(self, option, user) -> None: 8 | self.responseType: str = option.get("responseType") 9 | self.id: int = option.get("id") 10 | self.postedAt: str = option.get("postedAt") 11 | self.postedAtPrecise: str = option.get("postedAtPrecise") 12 | self.expiredAt: Any = option.get("expiredAt") 13 | self.author = create_user.create_user(option["author"]) 14 | self.text: str = option.get("text") 15 | self.rawText: str = option.get("rawText") 16 | self.lockedText: bool = option.get("lockedText") 17 | self.isFavorite: bool = option.get("isFavorite") 18 | self.isReportedByMe: bool = option.get("isReportedByMe") 19 | self.canReport: bool = option.get("canReport") 20 | self.canDelete: bool = option.get("canDelete") 21 | self.canComment: bool = option.get("canComment") 22 | self.canEdit: bool = option.get("canEdit") 23 | self.isPinned: bool = option.get("isPinned") 24 | self.favoritesCount: int = option.get("favoritesCount") 25 | self.mediaCount: int = option.get("mediaCount") 26 | self.isMediaReady: bool = option.get("isMediaReady") 27 | self.voting: list = option.get("voting") 28 | self.isOpened: bool = option.get("isOpened") 29 | self.canToggleFavorite: bool = option.get("canToggleFavorite") 30 | self.streamId: Any = option.get("streamId") 31 | self.price: Any = option.get("price") 32 | self.hasVoting: bool = option.get("hasVoting") 33 | self.isAddedToBookmarks: bool = option.get("isAddedToBookmarks") 34 | self.isArchived: bool = option.get("isArchived") 35 | self.isDeleted: bool = option.get("isDeleted") 36 | self.hasUrl: bool = option.get("hasUrl") 37 | self.commentsCount: int = option.get("commentsCount") 38 | self.mentionedUsers: list = option.get("mentionedUsers") 39 | self.linkedUsers: list = option.get("linkedUsers") 40 | self.linkedPosts: list = option.get("linkedPosts") 41 | self.media: list = option.get("media", []) 42 | self.canViewMedia: bool = option.get("canViewMedia") 43 | self.preview: list = option.get("preview") 44 | self.canPurchase: bool = option.get("canPurchase") 45 | self.user: create_user.create_user = user 46 | 47 | async def favorite(self): 48 | link = endpoint_links( 49 | identifier=f"{self.responseType}s", 50 | identifier2=self.id, 51 | identifier3=self.author.id, 52 | ).favorite 53 | results = await self.user.session_manager.json_request(link, method="POST") 54 | self.isFavorite = True 55 | return results 56 | 57 | async def link_picker(self, media, video_quality): 58 | link = "" 59 | if "source" in media: 60 | quality_key = "source" 61 | source = media[quality_key] 62 | link = source[quality_key] 63 | if link: 64 | if media["type"] == "video": 65 | qualities = media["videoSources"] 66 | qualities = dict(sorted(qualities.items(), reverse=False)) 67 | qualities[quality_key] = source[quality_key] 68 | for quality, quality_link in qualities.items(): 69 | video_quality = video_quality.removesuffix("p") 70 | if quality == video_quality: 71 | if quality_link: 72 | link = quality_link 73 | break 74 | print 75 | print 76 | print 77 | if "src" in media: 78 | link = media["src"] 79 | return link 80 | -------------------------------------------------------------------------------- /helpers/db_helper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sqlalchemy 3 | from sqlalchemy.engine.base import Engine 4 | from sqlalchemy.orm.session import Session, sessionmaker 5 | from sqlalchemy.orm import scoped_session 6 | from alembic.config import Config 7 | from alembic import command 8 | from sqlalchemy.exc import IntegrityError 9 | from sqlalchemy.sql.functions import func 10 | from database.databases.user_data import user_database 11 | 12 | 13 | def create_database_session( 14 | connection_info, connection_type="sqlite:///", autocommit=False, pool_size=5 15 | ) -> tuple[scoped_session, Engine]: 16 | kwargs = {} 17 | if connection_type == "mysql+mysqldb://": 18 | kwargs["pool_size"] = pool_size 19 | kwargs["pool_pre_ping"] = True 20 | kwargs["max_overflow"] = -1 21 | kwargs["isolation_level"] = "READ COMMITTED" 22 | 23 | engine = sqlalchemy.create_engine( 24 | f"{connection_type}{connection_info}?charset=utf8mb4", **kwargs 25 | ) 26 | session_factory = sessionmaker(bind=engine, autocommit=autocommit) 27 | Session = scoped_session(session_factory) 28 | return Session, engine 29 | 30 | 31 | def run_revisions(alembic_directory: str, database_path: str = ""): 32 | while True: 33 | try: 34 | ini_path = os.path.join(alembic_directory, "alembic.ini") 35 | script_location = os.path.join(alembic_directory, "alembic") 36 | full_database_path = f"sqlite:///{database_path}" 37 | alembic_cfg = Config(ini_path) 38 | alembic_cfg.set_main_option("script_location", script_location) 39 | alembic_cfg.set_main_option("sqlalchemy.url", full_database_path) 40 | command.upgrade(alembic_cfg, "head") 41 | command.revision(alembic_cfg, autogenerate=True, message="content") 42 | break 43 | except Exception as e: 44 | print(e) 45 | print 46 | 47 | 48 | def run_migrations(alembic_directory: str, database_path: str) -> None: 49 | while True: 50 | try: 51 | ini_path = os.path.join(alembic_directory, "alembic.ini") 52 | script_location = os.path.join(alembic_directory, "alembic") 53 | full_database_path = f"sqlite:///{database_path}" 54 | alembic_cfg = Config(ini_path) 55 | alembic_cfg.set_main_option("script_location", script_location) 56 | alembic_cfg.set_main_option("sqlalchemy.url", full_database_path) 57 | command.upgrade(alembic_cfg, "head") 58 | break 59 | except Exception as e: 60 | print(e) 61 | print 62 | 63 | 64 | class database_collection(object): 65 | def __init__(self) -> None: 66 | self.user_database = user_database 67 | 68 | def database_picker(self, database_name): 69 | if database_name == "user_data": 70 | database = self.user_database 71 | else: 72 | database = None 73 | print("Can't find database") 74 | input() 75 | return database 76 | 77 | 78 | def create_auth_array(item): 79 | auth_array = item.__dict__ 80 | auth_array["support_2fa"] = False 81 | return auth_array 82 | 83 | 84 | def get_or_create(session: Session, model, defaults=None, fbkwargs: dict = {}): 85 | fbkwargs2 = fbkwargs.copy() 86 | instance = session.query(model).filter_by(**fbkwargs2).one_or_none() 87 | if instance: 88 | return instance, True 89 | else: 90 | fbkwargs2 |= defaults or {} 91 | instance = model(**fbkwargs2) 92 | try: 93 | session.add(instance) 94 | session.commit() 95 | except IntegrityError: 96 | session.rollback() 97 | instance = session.query(model).filter_by(**fbkwargs2).one() 98 | return instance, False 99 | else: 100 | return instance, True 101 | 102 | 103 | def get_count(q): 104 | count_q = q.statement.with_only_columns([func.count()]).order_by(None) 105 | count = q.session.execute(count_q).scalar() 106 | return count 107 | -------------------------------------------------------------------------------- /extras/OFLogin/start_ofl.py: -------------------------------------------------------------------------------- 1 | import os 2 | from seleniumwire import webdriver 3 | from selenium.webdriver.common.by import By 4 | from selenium.webdriver.support.ui import WebDriverWait 5 | from selenium.webdriver.support import expected_conditions 6 | from user_agent import generate_user_agent 7 | 8 | 9 | def launch_browser(headers=None, user_agent=None, proxy=None, browser_type="Firefox"): 10 | options = {} 11 | if proxy: 12 | proxy = { 13 | 'http': proxy, 14 | 'https': proxy, 15 | } 16 | options["proxy"] = proxy 17 | directory = os.path.dirname(__file__) 18 | driver = None 19 | if browser_type == "Firefox": 20 | matches = ["geckodriver.exe","geckodriver"] 21 | driver_paths = list( 22 | map(lambda match: os.path.join(directory, match), matches)) 23 | found_paths = [ 24 | driver_path for driver_path in driver_paths if os.path.exists(driver_path)] 25 | if found_paths: 26 | driver_path = found_paths[0] 27 | opts = webdriver.FirefoxOptions() 28 | # opts.add_argument("--headless") 29 | profile = webdriver.FirefoxProfile() 30 | if not user_agent: 31 | user_agent = generate_user_agent() 32 | profile.set_preference("general.useragent.override", user_agent) 33 | driver = webdriver.Firefox(firefox_profile=profile, executable_path=driver_path, 34 | options=opts, seleniumwire_options=options) 35 | else: 36 | message = f"Download geckodriver from https://github.com/mozilla/geckodriver/releases/tag/v0.27.0 and paste it in {directory}" 37 | input(message) 38 | else: 39 | driver_path = os.path.join(directory, "chromedriver.exe") 40 | opts = webdriver.ChromeOptions() 41 | opts.add_argument(f"--proxy-server={opts}") 42 | driver = webdriver.Chrome( 43 | executable_path=driver_path, options=opts, seleniumwire_options=options) 44 | if not driver: 45 | input("DRIVER NOT FOUND") 46 | exit(0) 47 | driver.set_window_size(1920, 1080) 48 | browser = driver 49 | if headers: 50 | browser._client.set_header_overrides(headers=headers) 51 | return browser 52 | 53 | 54 | def monitor_cookies(web_browser): 55 | match = "auth_id" 56 | status = False 57 | cookies = None 58 | while not status: 59 | cookies = web_browser.get_cookies() 60 | if any(x for x in cookies if x["name"] == match): 61 | status = True 62 | cookies = {v["name"]: v["value"] for v in cookies} 63 | return cookies 64 | 65 | 66 | def login(authed, domain, proxy=None): 67 | auth_details = authed.auth_details 68 | email = auth_details.email 69 | password = auth_details.password 70 | web_browser = None 71 | cookies = None 72 | status = False 73 | while not status: 74 | print("Opening Browser") 75 | if web_browser: 76 | web_browser.close() 77 | web_browser = launch_browser( 78 | user_agent=auth_details.user_agent, proxy=proxy) 79 | web_browser.get( 80 | domain) 81 | try: 82 | WebDriverWait(web_browser, 60).until(expected_conditions.element_to_be_clickable( 83 | (By.CLASS_NAME, "g-btn.m-rounded.m-flex.m-lg.m-login-btn"))) 84 | except Exception as e: 85 | continue 86 | print 87 | email_input = web_browser.find_element_by_css_selector("input[type='email']") 88 | email_input.click() 89 | email_input.send_keys(email) 90 | password_input = web_browser.find_element_by_css_selector("input[type='password']") 91 | password_input.click() 92 | password_input.send_keys(password) 93 | login_button = web_browser.find_element_by_class_name( 94 | "g-btn.m-rounded.m-flex.m-lg.m-login-btn") 95 | login_button.submit() 96 | cookies = monitor_cookies(web_browser) 97 | if cookies: 98 | auth_details.auth_id = str(cookies["auth_id"]) 99 | auth_details.sess = cookies["sess"] 100 | status = True 101 | web_browser.close() 102 | return cookies 103 | -------------------------------------------------------------------------------- /datascraper/main_datascraper.py: -------------------------------------------------------------------------------- 1 | import os 2 | import timeit 3 | from typing import Optional 4 | 5 | import helpers.main_helper as main_helper 6 | import modules.onlyfans as m_onlyfans 7 | from apis.onlyfans import onlyfans as OnlyFans 8 | from apis.onlyfans.classes.create_user import create_user 9 | from apis.onlyfans.classes.extras import error_details 10 | from helpers.main_helper import choose_option 11 | 12 | api_helper = OnlyFans.api_helper 13 | 14 | 15 | async def start_datascraper( 16 | json_config: dict, 17 | site_name_lower: str, 18 | api: Optional[OnlyFans.start] = None, 19 | webhooks=True, 20 | ) -> Optional[OnlyFans.start]: 21 | json_settings = json_config["settings"] 22 | json_webhooks = json_settings["webhooks"] 23 | json_sites = json_config["supported"] 24 | domain = json_settings["auto_site_choice"] 25 | main_helper.assign_vars(json_config) 26 | 27 | json_site_settings = json_sites[site_name_lower]["settings"] 28 | 29 | auto_model_choice = json_site_settings["auto_model_choice"] 30 | if isinstance(auto_model_choice, str): 31 | temp_identifiers = auto_model_choice.split(",") 32 | identifiers = [x for x in temp_identifiers if x] 33 | else: 34 | identifiers = [] 35 | auto_profile_choice = json_site_settings["auto_profile_choice"] 36 | subscription_array = [] 37 | proxies = await api_helper.test_proxies(json_settings["proxies"]) 38 | if json_settings["proxies"] and not proxies: 39 | print("Unable to create session") 40 | return None 41 | archive_time = timeit.default_timer() 42 | if site_name_lower == "onlyfans": 43 | site_name = "OnlyFans" 44 | module = m_onlyfans 45 | if not api: 46 | api = OnlyFans.start(max_threads=json_settings["max_threads"]) 47 | api.settings = json_config 48 | api = main_helper.process_profiles(json_settings, proxies, site_name, api) 49 | print 50 | 51 | subscription_array = [] 52 | auth_count = 0 53 | jobs = json_site_settings["jobs"] 54 | subscription_list = module.format_options(api.auths, "users") 55 | if not auto_profile_choice: 56 | print("Choose Profile") 57 | auths = choose_option(subscription_list, auto_profile_choice, True) 58 | api.auths = [x.pop(0) for x in auths] 59 | for auth in api.auths: 60 | if not auth.auth_details: 61 | continue 62 | module.assign_vars( 63 | auth.auth_details, json_config, json_site_settings, site_name 64 | ) 65 | setup = False 66 | setup, subscriptions = await module.account_setup( 67 | auth, identifiers, jobs, auth_count 68 | ) 69 | if not setup: 70 | if webhooks: 71 | await main_helper.process_webhooks(api, "auth_webhook", "failed") 72 | auth_details = {} 73 | auth_details["auth"] = auth.auth_details.export() 74 | profile_directory = auth.profile_directory 75 | if profile_directory: 76 | user_auth_filepath = os.path.join( 77 | auth.profile_directory, "auth.json" 78 | ) 79 | main_helper.export_data(auth_details, user_auth_filepath) 80 | continue 81 | auth_count += 1 82 | subscription_array += subscriptions 83 | await main_helper.process_webhooks(api, "auth_webhook", "succeeded") 84 | # Do stuff with authed user 85 | subscription_list = module.format_options( 86 | subscription_array, "usernames", api.auths 87 | ) 88 | if jobs["scrape_paid_content"] and api.has_active_auths(): 89 | print("Scraping Paid Content") 90 | await module.paid_content_scraper(api, identifiers) 91 | if jobs["scrape_names"] and api.has_active_auths(): 92 | print("Scraping Subscriptions") 93 | await main_helper.process_names( 94 | module, 95 | subscription_list, 96 | auto_model_choice, 97 | api, 98 | json_config, 99 | site_name_lower, 100 | site_name, 101 | ) 102 | await main_helper.process_downloads(api, module) 103 | if webhooks: 104 | await main_helper.process_webhooks(api, "download_webhook", "succeeded") 105 | elif site_name_lower == "starsavn": 106 | pass 107 | # site_name = "StarsAVN" 108 | # original_api = StarsAVN 109 | # module = m_starsavn 110 | # apis = main_helper.process_profiles( 111 | # json_settings, original_sessions, site_name, original_api) 112 | # auto_profile_choice = json_site_settings["auto_profile_choice"] 113 | # subscription_array = [] 114 | # auth_count = -1 115 | # jobs = json_site_settings["jobs"] 116 | # subscription_list = module.format_options( 117 | # apis, "users") 118 | # apis = choose_option( 119 | # subscription_list, auto_profile_choice) 120 | # apis = [x.pop(0) for x in apis] 121 | # for api in apis: 122 | # module.assign_vars(api.auth.auth_details, json_config, 123 | # json_site_settings, site_name) 124 | # identifier = "" 125 | # setup = False 126 | # setup = module.account_setup(api, identifier=identifier) 127 | # if not setup: 128 | # auth_details = api.auth.auth_details.__dict__ 129 | # user_auth_filepath = os.path.join( 130 | # api.auth.profile_directory, "auth.json") 131 | # main_helper.export_data( 132 | # auth_details, user_auth_filepath) 133 | # continue 134 | # if jobs["scrape_names"]: 135 | # array = module.manage_subscriptions( 136 | # api, auth_count, identifier=identifier) 137 | # subscription_array += array 138 | # subscription_list = module.format_options( 139 | # subscription_array, "usernames") 140 | # if jobs["scrape_paid_content"]: 141 | # print("Scraping Paid Content") 142 | # paid_content = module.paid_content_scraper(apis) 143 | # if jobs["scrape_names"]: 144 | # print("Scraping Subscriptions") 145 | # names = main_helper.process_names( 146 | # module, subscription_list, auto_model_choice, apis, json_config, site_name_lower, site_name) 147 | # x = main_helper.process_downloads(apis, module) 148 | stop_time = str(int(timeit.default_timer() - archive_time) / 60)[:4] 149 | print("Archive Completed in " + stop_time + " Minutes") 150 | return api 151 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/extras.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from itertools import chain 3 | from typing import Any, Union 4 | 5 | 6 | class auth_details: 7 | def __init__(self, options: dict[str, Any] = {}) -> None: 8 | self.username = options.get("username", "") 9 | self.cookie = cookie_parser(options.get("cookie", "")) 10 | self.user_agent = options.get("user_agent", "") 11 | self.email = options.get("email", "") 12 | self.password = options.get("password", "") 13 | self.hashed = options.get("hashed", False) 14 | self.support_2fa = options.get("support_2fa", True) 15 | self.active = options.get("active", True) 16 | 17 | def upgrade_legacy(self, options: dict[str, Any]): 18 | if "cookie" not in options: 19 | self = legacy_auth_details(options).upgrade(self) 20 | return self 21 | 22 | def export(self): 23 | new_dict = copy.copy(self.__dict__) 24 | if isinstance(self.cookie, cookie_parser): 25 | cookie = self.cookie.convert() 26 | new_dict["cookie"] = cookie 27 | return new_dict 28 | 29 | 30 | class legacy_auth_details: 31 | def __init__(self, option: dict[str, Any] = {}): 32 | self.username = option.get("username", "") 33 | self.auth_id = option.get("auth_id", "") 34 | self.sess = option.get("sess", "") 35 | self.user_agent = option.get("user_agent", "") 36 | self.auth_hash = option.get("auth_hash", "") 37 | self.auth_uniq_ = option.get("auth_uniq_", "") 38 | self.x_bc = option.get("x_bc", "") 39 | self.email = option.get("email", "") 40 | self.password = option.get("password", "") 41 | self.hashed = option.get("hashed", False) 42 | self.support_2fa = option.get("support_2fa", True) 43 | self.active = option.get("active", True) 44 | 45 | def upgrade(self, new_auth_details: auth_details): 46 | new_dict = "" 47 | for key, value in self.__dict__.items(): 48 | value = value if value != None else "" 49 | skippable = ["username", "user_agent"] 50 | if key not in skippable: 51 | new_dict += f"{key}={value}; " 52 | new_dict = new_dict.strip() 53 | new_auth_details.cookie = cookie_parser(new_dict) 54 | return new_auth_details 55 | 56 | 57 | class cookie_parser: 58 | def __init__(self, options: str) -> None: 59 | new_dict = {} 60 | for crumble in options.strip().split(";"): 61 | if crumble: 62 | key, value = crumble.strip().split("=") 63 | new_dict[key] = value 64 | self.auth_id = new_dict.get("auth_id", "") 65 | self.sess = new_dict.get("sess", "") 66 | self.auth_hash = new_dict.get("auth_hash", "") 67 | self.auth_uniq_ = new_dict.get("auth_uniq_", "") 68 | self.auth_uid_ = new_dict.get("auth_uid_", "") 69 | 70 | def format(self): 71 | """ 72 | Typically used for adding cookies to requests 73 | """ 74 | return self.__dict__ 75 | 76 | def convert(self): 77 | new_dict = "" 78 | for key, value in self.__dict__.items(): 79 | key = key.replace("auth_uniq_", f"auth_uniq_{self.auth_id}") 80 | key = key.replace("auth_uid_", f"auth_uid_{self.auth_id}") 81 | new_dict += f"{key}={value}; " 82 | new_dict = new_dict.strip() 83 | return new_dict 84 | 85 | 86 | class content_types: 87 | def __init__(self, option={}) -> None: 88 | class archived_types(content_types): 89 | def __init__(self) -> None: 90 | self.Posts = [] 91 | 92 | self.Stories = [] 93 | self.Posts = [] 94 | self.Archived = archived_types() 95 | self.Chats = [] 96 | self.Messages = [] 97 | self.Highlights = [] 98 | self.MassMessages = [] 99 | 100 | def __iter__(self): 101 | for attr, value in self.__dict__.items(): 102 | yield attr, value 103 | 104 | 105 | class endpoint_links(object): 106 | def __init__( 107 | self, 108 | identifier=None, 109 | identifier2=None, 110 | identifier3=None, 111 | text="", 112 | only_links=True, 113 | global_limit=10, 114 | global_offset=0, 115 | ): 116 | self.customer = f"https://onlyfans.com/api2/v2/users/me" 117 | self.users = f"https://onlyfans.com/api2/v2/users/{identifier}" 118 | self.subscriptions = f"https://onlyfans.com/api2/v2/subscriptions/subscribes?limit={global_limit}&offset={global_offset}&type=active" 119 | self.lists = f"https://onlyfans.com/api2/v2/lists?limit=100&offset=0" 120 | self.lists_users = f"https://onlyfans.com/api2/v2/lists/{identifier}/users?limit={global_limit}&offset={global_offset}&query=" 121 | self.list_chats = f"https://onlyfans.com/api2/v2/chats?limit={global_limit}&offset={global_offset}&order=desc" 122 | self.post_by_id = f"https://onlyfans.com/api2/v2/posts/{identifier}" 123 | self.message_by_id = f"https://onlyfans.com/api2/v2/chats/{identifier}/messages?limit=10&offset=0&firstId={identifier2}&order=desc&skip_users=all&skip_users_dups=1" 124 | self.search_chat = f"https://onlyfans.com/api2/v2/chats/{identifier}/messages/search?query={text}" 125 | self.message_api = f"https://onlyfans.com/api2/v2/chats/{identifier}/messages?limit={global_limit}&offset={global_offset}&order=desc" 126 | self.search_messages = f"https://onlyfans.com/api2/v2/chats/{identifier}?limit=10&offset=0&filter=&order=activity&query={text}" 127 | self.mass_messages_api = f"https://onlyfans.com/api2/v2/messages/queue/stats?limit=100&offset=0&format=infinite" 128 | self.stories_api = f"https://onlyfans.com/api2/v2/users/{identifier}/stories?limit=100&offset=0&order=desc" 129 | self.list_highlights = f"https://onlyfans.com/api2/v2/users/{identifier}/stories/highlights?limit=100&offset=0&order=desc" 130 | self.highlight = f"https://onlyfans.com/api2/v2/stories/highlights/{identifier}" 131 | self.post_api = f"https://onlyfans.com/api2/v2/users/{identifier}/posts?limit={global_limit}&offset={global_offset}&order=publish_date_desc&skip_users_dups=0" 132 | self.archived_posts = f"https://onlyfans.com/api2/v2/users/{identifier}/posts/archived?limit={global_limit}&offset={global_offset}&order=publish_date_desc" 133 | self.archived_stories = f"https://onlyfans.com/api2/v2/stories/archive/?limit=100&offset=0&order=publish_date_desc" 134 | self.paid_api = f"https://onlyfans.com/api2/v2/posts/paid?{global_limit}&offset={global_offset}" 135 | self.pay = f"https://onlyfans.com/api2/v2/payments/pay" 136 | self.subscribe = f"https://onlyfans.com/api2/v2/users/{identifier}/subscribe" 137 | self.like = f"https://onlyfans.com/api2/v2/{identifier}/{identifier2}/like" 138 | self.favorite = f"https://onlyfans.com/api2/v2/{identifier}/{identifier2}/favorites/{identifier3}" 139 | self.transactions = ( 140 | f"https://onlyfans.com/api2/v2/payments/all/transactions?limit=10&offset=0" 141 | ) 142 | self.two_factor = f"https://onlyfans.com/api2/v2/users/otp/check" 143 | 144 | 145 | # Lol? 146 | class error_details: 147 | def __init__(self, result) -> None: 148 | error = result["error"] if "error" in result else result 149 | self.code = error["code"] 150 | self.message = error["message"] 151 | 152 | 153 | def create_headers( 154 | dynamic_rules: dict[str, Any], 155 | auth_id: Union[str, int], 156 | user_agent: str = "", 157 | link: str = "https://onlyfans.com/", 158 | ): 159 | headers: dict[str, Any] = {} 160 | headers["user-agent"] = user_agent 161 | headers["referer"] = link 162 | headers["user-id"] = str(auth_id) 163 | headers["x-bc"] = "" 164 | for remove_header in dynamic_rules["remove_headers"]: 165 | headers.pop(remove_header) 166 | return headers 167 | 168 | 169 | def handle_refresh(argument, argument2): 170 | argument = argument.get(argument2) 171 | return argument 172 | 173 | 174 | class media_types: 175 | def __init__(self, option={}, assign_states=False) -> None: 176 | self.Images = option.get("Images", []) 177 | self.Videos = option.get("Videos", []) 178 | self.Audios = option.get("Audios", []) 179 | self.Texts = option.get("Texts", []) 180 | if assign_states: 181 | for k, v in self: 182 | setattr(self, k, assign_states()) 183 | 184 | def remove_empty(self): 185 | copied = copy.deepcopy(self) 186 | for k, v in copied: 187 | if not v: 188 | delattr(self, k) 189 | print 190 | return self 191 | 192 | def get_status(self) -> list: 193 | x = [] 194 | for key, item in self: 195 | for key2, item2 in item: 196 | new_status = list(chain.from_iterable(item2)) 197 | x.extend(new_status) 198 | return x 199 | 200 | def extract(self, string: str) -> list: 201 | a = self.get_status() 202 | source_list = [getattr(x, string, None) for x in a] 203 | x = list(set(source_list)) 204 | return x 205 | 206 | def __iter__(self): 207 | for attr, value in self.__dict__.items(): 208 | yield attr, value 209 | -------------------------------------------------------------------------------- /extras/OFRenamer/start_ofr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import asyncio 3 | import os 4 | import shutil 5 | import traceback 6 | import urllib.parse as urlparse 7 | from itertools import chain 8 | 9 | from apis.onlyfans import onlyfans 10 | from apis.onlyfans.classes.create_user import create_user 11 | from database.databases.user_data.models.api_table import api_table 12 | from database.databases.user_data.models.media_table import template_media_table 13 | from sqlalchemy.orm.scoping import scoped_session 14 | from tqdm.asyncio import tqdm 15 | 16 | 17 | async def fix_directories( 18 | posts, 19 | api: onlyfans.start, 20 | subscription: create_user, 21 | all_files, 22 | database_session: scoped_session, 23 | folder, 24 | site_name, 25 | api_type, 26 | base_directory, 27 | json_settings, 28 | ): 29 | new_directories = [] 30 | 31 | async def fix_directories2(post: api_table, media_db: list[template_media_table]): 32 | delete_rows = [] 33 | final_api_type = ( 34 | os.path.join("Archived", api_type) if post.archived else api_type 35 | ) 36 | post_id = post.post_id 37 | media_db = [x for x in media_db if x.post_id == post_id] 38 | for media in media_db: 39 | media_id = media.media_id 40 | if media.link: 41 | path = urlparse.urlparse(media.link).path 42 | else: 43 | path: str = media.filename 44 | new_filename = os.path.basename(path) 45 | original_filename, ext = os.path.splitext(new_filename) 46 | ext = ext.replace(".", "") 47 | file_directory_format = json_settings["file_directory_format"] 48 | filename_format = json_settings["filename_format"] 49 | date_format = json_settings["date_format"] 50 | text_length = json_settings["text_length"] 51 | download_path = base_directory 52 | option = {} 53 | option["site_name"] = site_name 54 | option["post_id"] = post_id 55 | option["media_id"] = media_id 56 | option["profile_username"] = subscription.subscriber.username 57 | option["model_username"] = subscription.username 58 | option["api_type"] = final_api_type 59 | option["media_type"] = media.media_type 60 | option["filename"] = original_filename 61 | option["ext"] = ext 62 | option["text"] = post.text 63 | option["postedAt"] = media.created_at 64 | option["price"] = post.price 65 | option["date_format"] = date_format 66 | option["text_length"] = text_length 67 | option["directory"] = download_path 68 | option["preview"] = media.preview 69 | option["archived"] = post.archived 70 | prepared_format = prepare_reformat(option) 71 | file_directory = await main_helper.reformat( 72 | prepared_format, file_directory_format 73 | ) 74 | prepared_format.directory = file_directory 75 | old_filepath = "" 76 | if media.linked: 77 | filename_format = f"linked_{filename_format}" 78 | old_filepaths = [ 79 | x for x in all_files if original_filename in os.path.basename(x) 80 | ] 81 | if not old_filepaths: 82 | old_filepaths = [ 83 | x for x in all_files if str(media_id) in os.path.basename(x) 84 | ] 85 | print 86 | if not media.linked: 87 | old_filepaths = [x for x in old_filepaths if "linked_" not in x] 88 | if old_filepaths: 89 | old_filepath = old_filepaths[0] 90 | # a = randint(0,1) 91 | # await asyncio.sleep(a) 92 | new_filepath = await main_helper.reformat(prepared_format, filename_format) 93 | if old_filepath and old_filepath != new_filepath: 94 | if os.path.exists(new_filepath): 95 | os.remove(new_filepath) 96 | moved = None 97 | while not moved: 98 | try: 99 | if os.path.exists(old_filepath): 100 | if media.size: 101 | media.downloaded = True 102 | found_dupes = [ 103 | x 104 | for x in media_db 105 | if x.filename == new_filename and x.id != media.id 106 | ] 107 | delete_rows.extend(found_dupes) 108 | os.makedirs(os.path.dirname(new_filepath), exist_ok=True) 109 | if media.linked: 110 | if os.path.dirname(old_filepath) == os.path.dirname( 111 | new_filepath 112 | ): 113 | moved = shutil.move(old_filepath, new_filepath) 114 | else: 115 | moved = shutil.copy(old_filepath, new_filepath) 116 | else: 117 | moved = shutil.move(old_filepath, new_filepath) 118 | else: 119 | break 120 | except OSError as e: 121 | print(traceback.format_exc()) 122 | print 123 | print 124 | 125 | if os.path.exists(new_filepath): 126 | if media.size: 127 | media.downloaded = True 128 | if prepared_format.text: 129 | pass 130 | media.directory = file_directory 131 | media.filename = os.path.basename(new_filepath) 132 | new_directories.append(os.path.dirname(new_filepath)) 133 | return delete_rows 134 | 135 | result = database_session.query(folder.media_table) 136 | media_db = result.all() 137 | pool = api.pool 138 | # tasks = pool.starmap(fix_directories2, product(posts, [media_db])) 139 | tasks = [asyncio.ensure_future(fix_directories2(post,media_db)) for post in posts] 140 | settings = {"colour": "MAGENTA", "disable": False} 141 | delete_rows = await tqdm.gather(*tasks, **settings) 142 | delete_rows = list(chain(*delete_rows)) 143 | for delete_row in delete_rows: 144 | database_session.query(folder.media_table).filter( 145 | folder.media_table.id == delete_row.id 146 | ).delete() 147 | database_session.commit() 148 | new_directories = list(set(new_directories)) 149 | return posts, new_directories 150 | 151 | 152 | async def start( 153 | api: onlyfans.start, 154 | Session, 155 | api_type, 156 | site_name, 157 | subscription: create_user, 158 | folder, 159 | json_settings, 160 | ): 161 | api_table = folder.table_picker(api_type) 162 | database_session = Session() 163 | # Slow 164 | result = database_session.query(api_table).all() 165 | metadata = getattr(subscription.temp_scraped, api_type) 166 | download_info = subscription.download_info 167 | root_directory = download_info["directory"] 168 | date_format = json_settings["date_format"] 169 | text_length = json_settings["text_length"] 170 | reformats = {} 171 | reformats["metadata_directory_format"] = json_settings["metadata_directory_format"] 172 | reformats["file_directory_format"] = json_settings["file_directory_format"] 173 | reformats["filename_format"] = json_settings["filename_format"] 174 | model_username = subscription.username 175 | option = {} 176 | option["site_name"] = site_name 177 | option["api_type"] = api_type 178 | option["profile_username"] = subscription.subscriber.username 179 | option["model_username"] = model_username 180 | option["date_format"] = date_format 181 | option["maximum_length"] = text_length 182 | option["directory"] = root_directory 183 | formatted = format_types(reformats).check_unique() 184 | unique = formatted["unique"] 185 | for key, value in reformats.items(): 186 | key2 = getattr(unique, key)[0] 187 | reformats[key] = value.split(key2, 1)[0] + key2 188 | print 189 | print 190 | a, base_directory, c = await prepare_reformat(option, keep_vars=True).reformat(reformats) 191 | download_info["base_directory"] = base_directory 192 | print 193 | all_files = [] 194 | for root, subdirs, files in os.walk(base_directory): 195 | x = [os.path.join(root, x) for x in files] 196 | all_files.extend(x) 197 | 198 | await fix_directories( 199 | result, 200 | api, 201 | subscription, 202 | all_files, 203 | database_session, 204 | folder, 205 | site_name, 206 | api_type, 207 | root_directory, 208 | json_settings, 209 | ) 210 | database_session.close() 211 | return metadata 212 | 213 | 214 | if __name__ == "__main__": 215 | # WORK IN PROGRESS 216 | input("You can't use this manually yet lmao xqcl") 217 | exit() 218 | else: 219 | import helpers.main_helper as main_helper 220 | from classes.prepare_metadata import format_types, prepare_reformat 221 | -------------------------------------------------------------------------------- /classes/make_settings.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from typing import List, Union 3 | from urllib.parse import urlparse 4 | import os 5 | import uuid as uuid 6 | 7 | from yarl import URL 8 | current_version = None 9 | def fix(config={}): 10 | global current_version 11 | if config: 12 | info = config.get("info") 13 | if not info: 14 | print("If you're not using >= v7 release, please download said release so the script can properly update your config. \nIf you're using >= v7 release or you don't care about your current config settings, press enter to continue. If script crashes, delete config.") 15 | input() 16 | current_version = info["version"] 17 | return config 18 | 19 | 20 | class config(object): 21 | def __init__(self, info={}, settings={}, supported={}): 22 | class Info(object): 23 | def __init__(self) -> None: 24 | self.version = 7.2 25 | 26 | class Settings(object): 27 | def __init__(self, auto_site_choice="", profile_directories=[".profiles"], export_type="json", max_threads=-1, min_drive_space=0, helpers={}, webhooks={}, exit_on_completion=False, infinite_loop=True, loop_timeout="0", dynamic_rules_link="https://raw.githubusercontent.com/DATAHOARDERS/dynamic-rules/main/onlyfans.json", proxies=[], cert="", random_string=""): 28 | class webhooks_settings: 29 | def __init__(self, option={}) -> None: 30 | class webhook_template: 31 | def __init__(self, option={}) -> None: 32 | self.webhooks = option.get( 33 | 'webhooks', []) 34 | self.status = option.get( 35 | 'status', None) 36 | self.hide_sensitive_info = option.get( 37 | 'hide_sensitive_info', True) 38 | print 39 | 40 | class auth_webhook: 41 | def __init__(self, option={}) -> None: 42 | self.succeeded = webhook_template( 43 | option.get('succeeded', {})) 44 | self.failed = webhook_template( 45 | option.get('failed', {})) 46 | 47 | class download_webhook: 48 | def __init__(self, option={}) -> None: 49 | self.succeeded = webhook_template( 50 | option.get('succeeded', {})) 51 | 52 | self.global_webhooks = option.get( 53 | 'global_webhooks', []) 54 | self.global_status = option.get( 55 | 'global_status', True) 56 | self.auth_webhook = auth_webhook( 57 | option.get('auth_webhook', {})) 58 | self.download_webhook = download_webhook( 59 | option.get('download_webhook', {})) 60 | 61 | class helpers_settings: 62 | def __init__(self, option={}) -> None: 63 | self.renamer = option.get('renamer', True) 64 | self.reformat_media = option.get( 65 | 'reformat_media', True) 66 | self.downloader = option.get( 67 | 'downloader', True) 68 | self.delete_empty_directories = option.get( 69 | 'delete_empty_directories', False) 70 | self.auto_site_choice = auto_site_choice 71 | self.export_type = export_type 72 | self.profile_directories = profile_directories 73 | self.max_threads = max_threads 74 | self.min_drive_space = min_drive_space 75 | self.helpers = helpers_settings( 76 | settings.get("helpers", helpers)) 77 | self.webhooks = webhooks_settings(settings.get( 78 | 'webhooks', webhooks)) 79 | self.exit_on_completion = exit_on_completion 80 | self.infinite_loop = infinite_loop 81 | self.loop_timeout = loop_timeout 82 | dynamic_rules_link = URL(dynamic_rules_link) 83 | url_host = dynamic_rules_link.host 84 | if "github.com" == url_host: 85 | if "raw" != url_host: 86 | path = dynamic_rules_link.path.replace("blob/","") 87 | dynamic_rules_link = f"https://raw.githubusercontent.com/{path}" 88 | self.dynamic_rules_link = str(dynamic_rules_link) 89 | self.proxies = proxies 90 | self.cert = cert 91 | self.random_string = random_string if random_string else uuid.uuid1().hex 92 | 93 | def update_site_settings(options) -> dict: 94 | new_options = copy.copy(options) 95 | for key, value in options.items(): 96 | if "auto_scrape_names" == key: 97 | new_options["auto_model_choice"] = value 98 | elif "auto_scrape_apis" == key: 99 | new_options["auto_api_choice"] = value 100 | if "file_directory_format" == key: 101 | new_options["file_directory_format"] = value.replace("{username}","{model_username}") 102 | if "filename_format" == key: 103 | new_options["filename_format"] = value.replace("{username}","{model_username}") 104 | if "metadata_directory_format" == key: 105 | new_options["metadata_directory_format"] = value.replace("{username}","{model_username}") 106 | if "blacklist_name" == key: 107 | new_options["blacklists"] = [value] 108 | return new_options 109 | 110 | class Supported(object): 111 | def __init__(self, onlyfans={}, patreon={}, starsavn={}): 112 | self.onlyfans = self.OnlyFans(onlyfans) 113 | self.starsavn = self.StarsAvn(starsavn) 114 | 115 | class OnlyFans: 116 | def __init__(self, module): 117 | self.settings = self.Settings(module.get('settings', {})) 118 | 119 | class Settings(): 120 | def __init__(self, option={}): 121 | option = update_site_settings(option) 122 | 123 | class jobs: 124 | def __init__(self, option={}) -> None: 125 | self.scrape_names = option.get( 126 | 'scrape_names', True) 127 | self.scrape_paid_content = option.get( 128 | 'scrape_paid_content', True) 129 | 130 | class browser: 131 | def __init__(self, option={}) -> None: 132 | self.auth = option.get( 133 | 'auth', True) 134 | 135 | class database: 136 | def __init__(self, option={}) -> None: 137 | self.posts = option.get( 138 | 'posts', True) 139 | self.comments = option.get( 140 | 'comments', True) 141 | self.auto_profile_choice: Union[List] = option.get( 142 | 'auto_profile_choice', []) 143 | self.auto_model_choice = option.get( 144 | 'auto_model_choice', False) 145 | self.auto_media_choice = option.get( 146 | 'auto_media_choice', "") 147 | self.auto_api_choice = option.get( 148 | 'auto_api_choice', True) 149 | self.browser = browser(option.get( 150 | 'browser', {})) 151 | self.jobs = jobs(option.get( 152 | 'jobs', {})) 153 | self.download_directories = option.get( 154 | 'download_directories', [".sites"]) 155 | normpath = os.path.normpath 156 | self.file_directory_format = normpath(option.get( 157 | 'file_directory_format', "{site_name}/{model_username}/{api_type}/{value}/{media_type}")) 158 | self.filename_format = normpath(option.get( 159 | 'filename_format', "{filename}.{ext}")) 160 | self.metadata_directories = option.get( 161 | 'metadata_directories', [".sites"]) 162 | self.metadata_directory_format = normpath(option.get( 163 | 'metadata_directory_format', "{site_name}/{model_username}/Metadata")) 164 | self.delete_legacy_metadata = option.get( 165 | 'delete_legacy_metadata', False) 166 | self.text_length = option.get('text_length', 255) 167 | self.video_quality = option.get( 168 | 'video_quality', "source") 169 | self.overwrite_files = option.get( 170 | 'overwrite_files', False) 171 | self.date_format = option.get( 172 | 'date_format', "%d-%m-%Y") 173 | self.ignored_keywords = option.get( 174 | 'ignored_keywords', []) 175 | self.ignore_type = option.get( 176 | 'ignore_type', "") 177 | self.blacklists = option.get( 178 | 'blacklists', "") 179 | self.webhook = option.get( 180 | 'webhook', True) 181 | 182 | class StarsAvn: 183 | def __init__(self, module): 184 | self.settings = self.Settings(module.get('settings', {})) 185 | 186 | class Auth: 187 | def __init__(self, option={}): 188 | self.username = option.get('username', "") 189 | self.sess = option.get('sess', "") 190 | self.user_agent = option.get('user_agent', "") 191 | 192 | class Settings(): 193 | def __init__(self, option={}): 194 | option = update_site_settings(option) 195 | 196 | class jobs: 197 | def __init__(self, option={}) -> None: 198 | self.scrape_names = option.get( 199 | 'scrape_names', True) 200 | self.scrape_paid_content = option.get( 201 | 'scrape_paid_content', True) 202 | 203 | class browser: 204 | def __init__(self, option={}) -> None: 205 | self.auth = option.get( 206 | 'auth', True) 207 | self.auto_profile_choice = option.get( 208 | 'auto_profile_choice', "") 209 | self.auto_model_choice = option.get( 210 | 'auto_model_choice', False) 211 | self.auto_media_choice = option.get( 212 | 'auto_media_choice', "") 213 | self.auto_api_choice = option.get( 214 | 'auto_api_choice', True) 215 | self.browser = browser(option.get( 216 | 'browser', {})) 217 | self.jobs = jobs(option.get( 218 | 'jobs', {})) 219 | self.download_directories = option.get( 220 | 'download_directories', [".sites"]) 221 | normpath = os.path.normpath 222 | self.file_directory_format = normpath(option.get( 223 | 'file_directory_format', "{site_name}/{model_username}/{api_type}/{value}/{media_type}")) 224 | self.filename_format = normpath(option.get( 225 | 'filename_format', "{filename}.{ext}")) 226 | self.metadata_directories = option.get( 227 | 'metadata_directories', [".sites"]) 228 | self.metadata_directory_format = normpath(option.get( 229 | 'metadata_directory_format', "{site_name}/{model_username}/Metadata")) 230 | self.delete_legacy_metadata = option.get( 231 | 'delete_legacy_metadata', False) 232 | self.text_length = option.get('text_length', 255) 233 | self.video_quality = option.get( 234 | 'video_quality', "source") 235 | self.overwrite_files = option.get( 236 | 'overwrite_files', False) 237 | self.date_format = option.get( 238 | 'date_format', "%d-%m-%Y") 239 | self.ignored_keywords = option.get( 240 | 'ignored_keywords', []) 241 | self.ignore_type = option.get( 242 | 'ignore_type', "") 243 | self.blacklist_name = option.get( 244 | 'blacklist_name', "") 245 | self.webhook = option.get( 246 | 'webhook', True) 247 | self.info = Info() 248 | self.settings = Settings(**settings) 249 | self.supported = Supported(**supported) 250 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OnlyFans DataScraper (Python 3.9.X) 2 | [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/DIGITALCRIMINAL/OnlyFans.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/DIGITALCRIMINAL/OnlyFans/context:python) 3 | # ![app-token](examples/64255399-96a86700-cf21-11e9-8c62-87a483f33701.png) 4 | 5 | # Mandatory Tutorial 6 | 7 | Read the [#FAQ](README.md#faq) at the bottom of this page before submitting a issue. 8 | 9 | ## Running the app via docker 10 | 11 | Build and run the image, mounting the appropriate directories: 12 | 13 | `docker build -t only-fans . && docker run -it --rm --name onlyfans -v ${PWD}/.settings:/usr/src/app/.settings -v ${PWD}/.profiles:/usr/src/app/.profiles -v ${PWD}/.sites:/usr/src/app/.sites only-fans` 14 | 15 | ## Running on Linux 16 | 17 | Edit: These instructions are not necessary in Ubuntu 21.04. Just run ./start_ofd.py! 18 | 19 | FYI, I've got this working in Linux under Ubuntu 20.10. Here's my setup, in case others might find it helpful. 20 | 21 | Disclaimers: 22 | I'm a sysadmin. And I provide no warranties or support for this setup. This is what worked for me, it might not work for you. But hopefully, it will point you in the right directions. 23 | 24 | The main problem is that the default Ubuntu 20.10 python is v2.7.18 and python3 is 3.8.6 (not 3.9, as this script requires). So the solution involves installing python3.9 and then using a venv whenever updating and running this code. 25 | 26 | I am personally running each of these as a script, but here are the core parts. 27 | 28 | Setup 29 | These are one-time setup steps. You should only need to do this on new Linux installs. 30 | 31 | Install Python 3.9 32 | sudo add-apt-repository ppa:deadsnakes/ppa 33 | sudo apt update && sudo apt install python3.9 python3.9-venv python3.9-dev 34 | Create the venv in the repo root 35 | We'll call it venv. 36 | Ensure that you are in the root directory of this repository! 37 | cd ~/src/OnlyFans 38 | python3.9 -m venv venv 39 | Updates 40 | Every time we pull the repo, we need to make sure we activate the venv and grab any updates in the possibly updated requirements.txt file. 41 | 42 | cd ~/src/OnlyFans 43 | git pull 44 | source venv/bin/activate 45 | pip3 install -r requirements.txt 46 | Run 47 | Every time we run, we need to activate the venv before starting the script. 48 | (technically, the source command only needs to be done once per session, but it hurts nothing to do it repeatedly) 49 | 50 | cd ~/src/OnlyFans 51 | source venv/bin/activate 52 | python start_ofd.py 53 | Resources 54 | I used the following links to help me. 55 | 56 | http://ubuntuhandbook.org/index.php/2020/10/python-3-9-0-released-install-ppa-ubuntu/ 57 | https://docs.python.org/3/tutorial/venv.html 58 | 59 | ## Running the app locally 60 | 61 | From the project folder open CMD/Terminal and run the command below: 62 | 63 | `pip install --upgrade --user -r requirements.txt` 64 | 65 | Start: 66 | 67 | `python start_ofd.py` | `python3 start_ofd.py` | double click `start_ofd.py` 68 | 69 | --- 70 | 71 | Open and edit: 72 | 73 | `.profiles/default/auth.json` 74 | 75 | [auth] 76 | 77 | You have to fill in the following: 78 | 79 | - `{"cookie":"your_cookie"}` 80 | - `{"user-agent":"your_user-agent"}` 81 | 82 | Go to www.onlyfans.com and login, open the network debugger, then check the image below on how to get said above cookies 83 | 84 | ![app-token](examples/3.png) 85 | ![app-token](examples/4.png) 86 | 87 | Your auth config should look similar to this 88 | 89 | ![app-token](examples/5.png) 90 | 91 | If you want to auth via browser, add your email and password. 92 | 93 | If you get auth attempt errors, only YOU can fix it unless you're willing to let me into your account so I can see if it's working or not. 94 | All issues about auth errors will be closed automatically. It's spam at this point, there's like 1000s of them and I don't care for anyone who can't use the search function lmao. 95 | 96 | Note: If active is set to False, the script will ignore the profile. 97 | 98 | # USAGE 99 | 100 | `python start_ofd.py` | `python3 start_ofd.py` | double click `start_ofd.py` 101 | 102 | Enter in inputs as prompted by console. 103 | 104 | # OPTIONAL 105 | 106 | Open: 107 | 108 | `config.json` (Open with a texteditor) 109 | 110 | [settings] 111 | 112 | ### profile_directories: 113 | 114 | Where your account information is stored (auth.json). 115 | 116 | Default = [".profiles"] 117 | 118 | If you're going to fill, please remember to use forward ("/") slashes only. 119 | 120 | ### download_directories: 121 | 122 | Where downloaded content is stored. 123 | 124 | Default = [".sites"] 125 | 126 | If you're going to fill, please remember to use forward ("/") slashes only. 127 | 128 | You can add multiple directories and the script will automatically rollover to the next directory if the current is full. 129 | 130 | ### metadata_directories: 131 | 132 | Where metadata content is stored. 133 | 134 | Default = [".sites"] 135 | 136 | If you're going to fill, please remember to use forward ("/") slashes only. 137 | 138 | Automatic rollover not supported yet. 139 | 140 | ### path_formatting: 141 | 142 | Overview for [file_directory_format](#file_directory_format), [filename_format](#filename_format) and [metadata_directory_format](#metadata_directory_format) 143 | 144 | {site_name} = The site you're scraping. 145 | 146 | {first_letter} = First letter of the model you're scraping. 147 | 148 | {post_id} = The posts' ID. 149 | 150 | {media_id} = The media's ID. 151 | 152 | {profile_username} = Your account's username. 153 | 154 | {model_username} = The model's username. 155 | 156 | {api_type} = Posts, Messages, etc. 157 | 158 | {media_type} = Images, Videos, etc. 159 | 160 | {filename} = The media's filename. 161 | 162 | {value} = Value of the content. Paid or Free. 163 | 164 | {text} = The media's text. 165 | 166 | {date} = The post's creation date. 167 | 168 | {ext} = The media's file extension. 169 | 170 | Don't use the text variable. If you do, enjoy emojis in your filepaths and errors lmao. 171 | 172 | ### file_directory_format: 173 | 174 | This puts each media file into a folder. 175 | 176 | The list below are unique identifiers that you must include. 177 | 178 | You can choose one or more. 179 | 180 | Default = "{site_name}/{model_username}/{api_type}/{value}/{media_type}" 181 | Default Translated = "OnlyFans/belledelphine/Posts/Free/Images" 182 | 183 | {model_username} = belledelphine 184 | 185 | ### filename_format: 186 | 187 | Usage: Format for a filename 188 | 189 | The list below are unique identifiers that you must include. 190 | 191 | You must choose one or more. 192 | 193 | Default = "{filename}.{ext}" 194 | Default Translated = "5fb5a5e4b4ce6c47ce2b4_source.mp4" 195 | 196 | {filename} = 5fb5a5e4b4ce6c47ce2b4_source 197 | {media_id} = 133742069 198 | 199 | ### metadata_directory_format: 200 | 201 | Usage: Filepath for metadata. It's tied with download_directories so ignore metadata_directories in the config. 202 | 203 | The list below are unique identifiers that you must include. 204 | 205 | You must choose one or more. 206 | 207 | Default = "{site_name}/{model_username}/Metadata" 208 | Default Translated = "OnlyFans/belledelphine/Metadata" 209 | 210 | {model_username} = belledelphine 211 | 212 | ### text_length: 213 | 214 | Usage: When you use {text} in filename_format, a limit of how many characters can be set by inputting a number. 215 | 216 | Default = "" 217 | Ideal = "50" 218 | Max = "255" 219 | 220 | The ideal is actually 0. 221 | 222 | ### video_quality: 223 | 224 | Usage: Select the resolution of the video. 225 | 226 | Default = "source" 227 | 720p = "720" | "720p" 228 | 240p = "240" | "240p" 229 | 230 | ### auto_site_choice: 231 | Types: str|int 232 | 233 | Usage: You can automatically choose which site you want to scrape. 234 | 235 | Default = "" 236 | 237 | OnlyFans = "onlyfans" 238 | 239 | ### auto_media_choice: 240 | Types: list|int|str|bool 241 | 242 | Usage: You can automatically choose which media type you want to scrape. 243 | 244 | Default = "" 245 | 246 | Inputs: Images, Videos, etc 247 | Inputs: 0,1,etc 248 | 249 | You can automatically choose which type of media you want to scrape. 250 | 251 | ### auto_model_choice: 252 | Types: list|int|str|bool 253 | 254 | Default = false 255 | 256 | If set to true, the script will scrape all the names. 257 | 258 | ### auto_api_choice: 259 | 260 | Default = true 261 | 262 | If set to false, you'll be given the option to scrape individual apis. 263 | 264 | ### jobs: 265 | 266 | "scrape_names" - This will scrape your standard content 267 | "scrape_paid_content" - This will scrape paid content 268 | 269 | If set to false, it won't do the job. 270 | 271 | ### export_type: 272 | 273 | Default = "json" 274 | 275 | JSON = "json" 276 | 277 | You can export an archive to different formats (not anymore lol). 278 | 279 | ### overwrite_files: 280 | 281 | Default = false 282 | 283 | If set to true, any file with the same name will be redownloaded. 284 | 285 | ### date_format: 286 | 287 | Default = "%d-%m-%Y" 288 | 289 | If you live in the USA and you want to use the incorrect format, use the following: 290 | 291 | "%m-%d-%Y" 292 | 293 | ### max_threads: 294 | 295 | Default = -1 296 | 297 | When number is set below 1, it will use all threads. 298 | Set a number higher than 0 to limit threads. 299 | 300 | ### min_drive_space: 301 | 302 | Default = 0 303 | Type: Float 304 | 305 | Space is calculated in GBs. 306 | 0.5 is 500mb, 1 is 1gb,etc. 307 | When a drive goes below minimum drive space, it will move onto the next drive or go into an infinite loop until drive is above the minimum space. 308 | 309 | ### webhooks: 310 | 311 | Default = [] 312 | 313 | Supported webhooks: 314 | Discord 315 | 316 | Data is sent whenever you've completely downloaded a model. 317 | You can also put in your own custom url and parse the data. 318 | Need another webhook? Open an issue. 319 | 320 | ### exit_on_completion: 321 | 322 | Default = false 323 | 324 | If set to true the scraper run once and exit upon completion, otherwise the scraper will give the option to run again. This is useful if the scraper is being executed by a cron job or another script. 325 | 326 | ### infinite_loop: 327 | 328 | Default = true 329 | 330 | If set to false, the script will run once and ask you to input anything to continue. 331 | 332 | ### loop_timeout: 333 | 334 | Default = 0 335 | 336 | When infinite_loop is set to true this will set the time in seconds to pause the loop in between runs. 337 | 338 | ### boards: 339 | 340 | Default = [] 341 | Example = ["s", "gif"] 342 | 343 | Input boards names that you want to automatically scrape. 344 | 345 | ### ignored_keywords: 346 | 347 | Default = [] 348 | Example = ["ignore", "me"] 349 | 350 | Any words you input, the script will ignore any content that contains these words. 351 | 352 | ### ignore_type: 353 | 354 | Default = "" 355 | a = "paid" 356 | b = "free" 357 | 358 | This setting will not include any paid or free accounts in your subscription list. 359 | 360 | Example: "ignore_type": "paid" 361 | 362 | This choice will not include any accounts that you've paid for. 363 | 364 | ### export_metadata: 365 | 366 | Default = true 367 | 368 | Set to false if you don't want to save metadata. 369 | 370 | ### blacklist_name: 371 | 372 | Default = "" 373 | 374 | This setting will not include any blacklisted usernames when you choose the "scrape all" option. 375 | 376 | Go to https://onlyfans.com/my/lists and create a new list; you can name it whatever you want but I called mine "Blacklisted". 377 | 378 | Add the list's name to the config. 379 | 380 | Example: "blacklist_name": "Blacklisted" 381 | 382 | You can create as many lists as you want. 383 | 384 | # FAQ 385 | 386 | Before troubleshooting, make sure you're using Python 3.9 and the latest commit of the script. 387 | 388 | Error: Access Denied / Auth Loop 389 | 390 | > Quadrupal check that the cookies and user agent are correct. 391 | > Remove 2FA. 392 | 393 | AttributeError: type object 'datetime.datetime' has no attribute 'fromisoformat' 394 | 395 | > Only works with Python 3.7 and above. 396 | 397 | I can't see ".settings" folder' 398 | 399 | > Make sure you can see hidden files 400 | > 401 | > [Windows Tutorial](https://support.microsoft.com/en-gb/help/4028316/windows-view-hidden-files-and-folders-in-windows-10) 402 | > 403 | > [Mac Tutorial](https://setapp.com/how-to/show-hidden-files-on-mac) 404 | > 405 | > [Linux](https://www.google.com) 406 | 407 | I'm getting authed into the wrong account 408 | 409 | > Enjoy the free content. | This has been patched lol. 410 | 411 | I'm using Linux OS and something isn't working. 412 | 413 | > Script was built on Windows 10. If you're using Linux you can still submit an issue and I'll try my best to fix it. 414 | 415 | Am I able to bypass paywalls with this script? 416 | 417 | > Hell yeah! My open source script can bypass paywalls for free. 418 | > [Tutorial](https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcS5hTXe02DNlsktpFfkrr--OQ0ScILMibxmpQ&usqp=CAU) 419 | 420 | Do OnlyFans or OnlyFans models know I'm using this script? 421 | 422 | > OnlyFans may know that you're using this script, but I try to keep it as anon as possible. 423 | 424 | > Generally, models will not know unless OnlyFans tells them but there is identifiable information in the metadata folder which contains your IP address, so don't share it unless you're using a proxy/vpn or just don't care. 425 | 426 | Do you collect session information? 427 | 428 | > No. The code is on Github which allows you to audit the codebase yourself. You can use wireshark or any other network analysis program to verify the outgoing connections are respective to the modules you chose. 429 | 430 | Disclaimer (lmao): 431 | 432 | > OnlyFans is a registered trademark of Fenix International Limited. 433 | 434 | > The contributors of this script isn't in any way affiliated with, sponsored by, or endorsed by Fenix International Limited. 435 | 436 | > The contributors of this script are not responsible for the end users' actions... lmao. 437 | -------------------------------------------------------------------------------- /apis/api_helper.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import copy 3 | import hashlib 4 | import json 5 | import os 6 | import re 7 | import threading 8 | import time 9 | from itertools import chain 10 | from multiprocessing import cpu_count 11 | from multiprocessing.dummy import Pool as ThreadPool 12 | from multiprocessing.pool import Pool 13 | from os.path import dirname as up 14 | from random import randint 15 | from typing import Any, Optional 16 | from urllib.parse import urlparse 17 | 18 | import python_socks 19 | import requests 20 | from aiohttp import ClientSession 21 | from aiohttp.client_exceptions import ( 22 | ClientConnectorError, 23 | ClientOSError, 24 | ClientPayloadError, 25 | ContentTypeError, 26 | ServerDisconnectedError, 27 | ) 28 | from aiohttp.client_reqrep import ClientResponse 29 | from aiohttp_socks import ProxyConnectionError, ProxyConnector, ProxyError 30 | from database.databases.user_data.models.media_table import template_media_table 31 | 32 | from apis.onlyfans.classes import create_auth, create_user 33 | from apis.onlyfans.classes.extras import error_details 34 | 35 | path = up(up(os.path.realpath(__file__))) 36 | os.chdir(path) 37 | 38 | 39 | global_settings: dict[str, Any] = {} 40 | global_settings[ 41 | "dynamic_rules_link" 42 | ] = "https://raw.githubusercontent.com/DATAHOARDERS/dynamic-rules/main/onlyfans.json" 43 | 44 | 45 | class set_settings: 46 | def __init__(self, option={}): 47 | global global_settings 48 | self.proxies = option.get("proxies") 49 | self.cert = option.get("cert") 50 | self.json_global_settings = option 51 | global_settings = self.json_global_settings 52 | 53 | 54 | async def remove_errors(results: list): 55 | wrapped = False 56 | if not isinstance(results, list): 57 | wrapped = True 58 | results = [results] 59 | results = [x for x in results if not isinstance(x, error_details)] 60 | if wrapped and results: 61 | results = results[0] 62 | return results 63 | 64 | 65 | def chunks(l, n): 66 | final = [l[i * n : (i + 1) * n] for i in range((len(l) + n - 1) // n)] 67 | return final 68 | 69 | 70 | def calculate_max_threads(max_threads=None): 71 | if not max_threads: 72 | max_threads = -1 73 | max_threads2 = cpu_count() 74 | if max_threads < 1 or max_threads >= max_threads2: 75 | max_threads = max_threads2 76 | return max_threads 77 | 78 | 79 | def multiprocessing(max_threads: Optional[int] = None): 80 | max_threads = calculate_max_threads(max_threads) 81 | pool = ThreadPool(max_threads) 82 | return pool 83 | 84 | 85 | class session_manager: 86 | def __init__( 87 | self, 88 | auth: create_auth, 89 | headers: dict[str, Any] = {}, 90 | proxies: list[str] = [], 91 | max_threads: int = -1, 92 | ) -> None: 93 | self.pool: Pool = auth.pool if auth.pool else multiprocessing() 94 | self.max_threads = max_threads 95 | self.kill = False 96 | self.headers = headers 97 | self.proxies: list[str] = proxies 98 | dr_link = global_settings["dynamic_rules_link"] 99 | dynamic_rules = requests.get(dr_link).json() # type: ignore 100 | self.dynamic_rules = dynamic_rules 101 | self.auth = auth 102 | 103 | def create_client_session(self): 104 | proxy = self.get_proxy() 105 | connector = ProxyConnector.from_url(proxy) if proxy else None 106 | 107 | final_cookies = self.auth.auth_details.cookie.format() 108 | client_session = ClientSession( 109 | connector=connector, cookies=final_cookies, read_timeout=None 110 | ) 111 | return client_session 112 | 113 | def get_proxy(self) -> str: 114 | proxies = self.proxies 115 | proxy = self.proxies[randint(0, len(proxies) - 1)] if proxies else "" 116 | return proxy 117 | 118 | def stimulate_sessions(self): 119 | # Some proxies switch IP addresses if no request have been made for x amount of secondss 120 | def do(session_manager): 121 | while not session_manager.kill: 122 | for session in session_manager.sessions: 123 | 124 | def process_links(link, session): 125 | response = session.get(link) 126 | text = response.text.strip("\n") 127 | if text == session.ip: 128 | print 129 | else: 130 | found_dupe = [ 131 | x for x in session_manager.sessions if x.ip == text 132 | ] 133 | if found_dupe: 134 | return 135 | cloned_session = copy.deepcopy(session) 136 | cloned_session.ip = text 137 | cloned_session.links = [] 138 | session_manager.sessions.append(cloned_session) 139 | print(text) 140 | print 141 | return text 142 | 143 | time.sleep(62) 144 | link = "https://checkip.amazonaws.com" 145 | ip = process_links(link, session) 146 | print 147 | 148 | t1 = threading.Thread(target=do, args=[self]) 149 | t1.start() 150 | 151 | async def json_request( 152 | self, 153 | link: str, 154 | session: Optional[ClientSession] = None, 155 | method: str = "GET", 156 | stream: bool = False, 157 | json_format: bool = True, 158 | payload: dict[str, str] = {}, 159 | ) -> Any: 160 | headers = {} 161 | custom_session = False 162 | if not session: 163 | custom_session = True 164 | session = self.create_client_session() 165 | headers = self.session_rules(link) 166 | headers["accept"] = "application/json, text/plain, */*" 167 | headers["Connection"] = "keep-alive" 168 | temp_payload = payload.copy() 169 | 170 | request_method = None 171 | result = None 172 | if method == "HEAD": 173 | request_method = session.head 174 | elif method == "GET": 175 | request_method = session.get 176 | elif method == "POST": 177 | request_method = session.post 178 | headers["content-type"] = "application/json" 179 | temp_payload = json.dumps(payload) 180 | elif method == "DELETE": 181 | request_method = session.delete 182 | else: 183 | return None 184 | while True: 185 | try: 186 | response = await request_method(link, headers=headers, data=temp_payload) 187 | if method == "HEAD": 188 | result = response 189 | else: 190 | if json_format and not stream: 191 | result = await response.json() 192 | if "error" in result: 193 | result = error_details(result) 194 | elif stream and not json_format: 195 | result = response 196 | else: 197 | result = await response.read() 198 | break 199 | except (ClientConnectorError, ProxyError): 200 | break 201 | except ( 202 | ClientPayloadError, 203 | ContentTypeError, 204 | ClientOSError, 205 | ServerDisconnectedError, 206 | ProxyConnectionError, 207 | ConnectionResetError, 208 | ): 209 | continue 210 | if custom_session: 211 | await session.close() 212 | return result 213 | 214 | async def async_requests(self, items: list[str]) -> list: 215 | tasks = [] 216 | 217 | async def run(links) -> list: 218 | proxies = self.proxies 219 | proxy = self.proxies[randint(0, len(proxies) - 1)] if proxies else "" 220 | connector = ProxyConnector.from_url(proxy) if proxy else None 221 | async with ClientSession( 222 | connector=connector, 223 | cookies=self.auth.auth_details.cookie.format(), 224 | read_timeout=None, 225 | ) as session: 226 | for link in links: 227 | task = asyncio.ensure_future(self.json_request(link, session)) 228 | tasks.append(task) 229 | responses = list(await asyncio.gather(*tasks)) 230 | return responses 231 | 232 | results = await asyncio.ensure_future(run(items)) 233 | return results 234 | 235 | async def download_content( 236 | self, 237 | download_item: template_media_table, 238 | session: ClientSession, 239 | progress_bar, 240 | subscription: create_user, 241 | ): 242 | attempt_count = 1 243 | new_task = {} 244 | while attempt_count <= 3: 245 | attempt_count += 1 246 | if not download_item.link: 247 | continue 248 | response: ClientResponse 249 | response = await asyncio.ensure_future( 250 | self.json_request( 251 | download_item.link, 252 | session, 253 | json_format=False, 254 | stream=True, 255 | ) 256 | ) 257 | if response and response.status != 200: 258 | if response.content_length: 259 | progress_bar.update_total_size(-response.content_length) 260 | api_type = download_item.__module__.split(".")[-1] 261 | post_id = download_item.post_id 262 | new_result = None 263 | if api_type == "messages": 264 | new_result = await subscription.get_message_by_id( 265 | message_id=post_id 266 | ) 267 | elif api_type == "posts": 268 | new_result = await subscription.get_post(post_id) 269 | if isinstance(new_result, error_details): 270 | continue 271 | if new_result and new_result.media: 272 | media_list = [ 273 | x for x in new_result.media if x["id"] == download_item.media_id 274 | ] 275 | if media_list: 276 | media = media_list[0] 277 | quality = subscription.subscriber.extras["settings"][ 278 | "supported" 279 | ]["onlyfans"]["settings"]["video_quality"] 280 | link = await new_result.link_picker(media, quality) 281 | download_item.link = link 282 | continue 283 | new_task["response"] = response 284 | new_task["download_item"] = download_item 285 | break 286 | return new_task 287 | 288 | def session_rules(self, link: str) -> dict[str, Any]: 289 | headers = self.headers 290 | if "https://onlyfans.com/api2/v2/" in link: 291 | dynamic_rules = self.dynamic_rules 292 | headers["app-token"] = dynamic_rules["app_token"] 293 | # auth_id = headers["user-id"] 294 | a = [link, 0, dynamic_rules] 295 | headers2 = self.create_signed_headers(*a) 296 | headers |= headers2 297 | return headers 298 | 299 | def create_signed_headers(self, link: str, auth_id: int, dynamic_rules: dict): 300 | # Users: 300000 | Creators: 301000 301 | final_time = str(int(round(time.time()))) 302 | path = urlparse(link).path 303 | query = urlparse(link).query 304 | path = path if not query else f"{path}?{query}" 305 | a = [dynamic_rules["static_param"], final_time, path, str(auth_id)] 306 | msg = "\n".join(a) 307 | message = msg.encode("utf-8") 308 | hash_object = hashlib.sha1(message) 309 | sha_1_sign = hash_object.hexdigest() 310 | sha_1_b = sha_1_sign.encode("ascii") 311 | checksum = ( 312 | sum([sha_1_b[number] for number in dynamic_rules["checksum_indexes"]]) 313 | + dynamic_rules["checksum_constant"] 314 | ) 315 | headers = {} 316 | headers["sign"] = dynamic_rules["format"].format(sha_1_sign, abs(checksum)) 317 | headers["time"] = final_time 318 | return headers 319 | 320 | 321 | async def test_proxies(proxies: list[str]): 322 | final_proxies = [] 323 | for proxy in proxies: 324 | connector = ProxyConnector.from_url(proxy) if proxy else None 325 | async with ClientSession(connector=connector) as session: 326 | link = "https://checkip.amazonaws.com" 327 | try: 328 | response = await session.get(link) 329 | ip = await response.text() 330 | ip = ip.strip() 331 | print("Session IP: " + ip + "\n") 332 | final_proxies.append(proxy) 333 | except python_socks._errors.ProxyConnectionError as e: 334 | print(f"Proxy Not Set: {proxy}\n") 335 | continue 336 | return final_proxies 337 | 338 | 339 | def restore_missing_data(master_set2, media_set, split_by): 340 | count = 0 341 | new_set = [] 342 | for item in media_set: 343 | if not item: 344 | link = master_set2[count] 345 | offset = int(link.split("?")[-1].split("&")[1].split("=")[1]) 346 | limit = int(link.split("?")[-1].split("&")[0].split("=")[1]) 347 | if limit == split_by + 1: 348 | break 349 | offset2 = offset 350 | limit2 = int(limit / split_by) 351 | for item in range(1, split_by + 1): 352 | link2 = link.replace("limit=" + str(limit), "limit=" + str(limit2)) 353 | link2 = link2.replace("offset=" + str(offset), "offset=" + str(offset2)) 354 | offset2 += limit2 355 | new_set.append(link2) 356 | count += 1 357 | new_set = new_set if new_set else master_set2 358 | return new_set 359 | 360 | 361 | async def scrape_endpoint_links(links, session_manager: session_manager, api_type): 362 | media_set = [] 363 | max_attempts = 100 364 | api_type = api_type.capitalize() 365 | for attempt in list(range(max_attempts)): 366 | if not links: 367 | continue 368 | print("Scrape Attempt: " + str(attempt + 1) + "/" + str(max_attempts)) 369 | results = await session_manager.async_requests(links) 370 | results = await remove_errors(results) 371 | not_faulty = [x for x in results if x] 372 | faulty = [ 373 | {"key": k, "value": v, "link": links[k]} 374 | for k, v in enumerate(results) 375 | if not v 376 | ] 377 | last_number = len(results) - 1 378 | if faulty: 379 | positives = [x for x in faulty if x["key"] != last_number] 380 | false_positive = [x for x in faulty if x["key"] == last_number] 381 | if positives: 382 | attempt = attempt if attempt > 1 else attempt + 1 383 | num = int(len(faulty) * (100 / attempt)) 384 | split_by = 2 385 | print("Missing " + str(num) + " Posts... Retrying...") 386 | links = restore_missing_data(links, results, split_by) 387 | media_set.extend(not_faulty) 388 | if not positives and false_positive: 389 | media_set.extend(not_faulty) 390 | break 391 | print 392 | else: 393 | media_set.extend(not_faulty) 394 | break 395 | media_set = list(chain(*media_set)) 396 | return media_set 397 | 398 | 399 | def calculate_the_unpredictable(link, limit, multiplier=1): 400 | final_links = [] 401 | a = list(range(1, multiplier + 1)) 402 | for b in a: 403 | parsed_link = urlparse(link) 404 | q = parsed_link.query.split("&") 405 | offset = q[1] 406 | old_offset_num = int(re.findall("\\d+", offset)[0]) 407 | new_offset_num = old_offset_num + (limit * b) 408 | new_link = link.replace(offset, f"offset={new_offset_num}") 409 | final_links.append(new_link) 410 | return final_links 411 | -------------------------------------------------------------------------------- /classes/prepare_metadata.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | from itertools import chain, groupby 4 | from typing import MutableMapping, Union 5 | 6 | import jsonpickle 7 | from apis.onlyfans.classes.create_auth import create_auth 8 | from apis.onlyfans.classes.extras import media_types 9 | from helpers import main_helper 10 | 11 | global_version = 2 12 | 13 | 14 | class create_metadata(object): 15 | def __init__( 16 | self, 17 | authed: create_auth = None, 18 | metadata: Union[list, dict, MutableMapping] = {}, 19 | standard_format=False, 20 | api_type: str = "", 21 | ) -> None: 22 | self.version = global_version 23 | fixed_metadata = self.fix_metadata(metadata, standard_format, api_type) 24 | self.content = format_content( 25 | authed, fixed_metadata["version"], fixed_metadata["content"] 26 | ).content 27 | 28 | def fix_metadata(self, metadata, standard_format=False, api_type: str = "") -> dict: 29 | new_format = {} 30 | new_format["version"] = 1 31 | new_format["content"] = {} 32 | if isinstance(metadata, list): 33 | version = 0.3 34 | for m in metadata: 35 | new_format["content"] |= self.fix_metadata(m)["content"] 36 | print 37 | metadata = new_format 38 | else: 39 | version = metadata.get("version", None) 40 | if any(x for x in metadata if x in media_types().__dict__.keys()): 41 | standard_format = True 42 | print 43 | if not version and not standard_format and metadata: 44 | legacy_metadata = metadata 45 | media_type = legacy_metadata.get("type", None) 46 | if not media_type: 47 | version = 0.1 48 | media_type = api_type if api_type else media_type 49 | else: 50 | version = 0.2 51 | if version == 0.2: 52 | legacy_metadata.pop("type") 53 | new_format["content"][media_type] = {} 54 | for key, posts in legacy_metadata.items(): 55 | if all(isinstance(x, list) for x in posts): 56 | posts = list(chain(*posts)) 57 | new_format["content"][media_type][key] = posts 58 | print 59 | print 60 | elif standard_format: 61 | if any(x for x in metadata if x in media_types().__dict__.keys()): 62 | metadata.pop("directories", None) 63 | for key, status in metadata.items(): 64 | if isinstance(status, int): 65 | continue 66 | for key2, posts in status.items(): 67 | if all(x and isinstance(x, list) for x in posts): 68 | posts = list(chain(*posts)) 69 | metadata[key][key2] = posts 70 | print 71 | print 72 | print 73 | new_format["content"] = metadata 74 | print 75 | else: 76 | if global_version == version: 77 | new_format = metadata 78 | else: 79 | print 80 | print 81 | if "content" not in new_format: 82 | print 83 | return new_format 84 | 85 | def export(self, convert_type="json", keep_empty_items=False) -> dict: 86 | if not keep_empty_items: 87 | self.remove_empty() 88 | value = {} 89 | if convert_type == "json": 90 | new_format_copied = copy.deepcopy(self) 91 | for key, status in new_format_copied.content: 92 | for key2, posts in status: 93 | for post in posts: 94 | for media in post.medias: 95 | delattr(media, "session") 96 | if getattr(media, "old_filepath", None) != None: 97 | delattr(media, "old_filepath") 98 | if getattr(media, "new_filepath", None) != None: 99 | delattr(media, "new_filepath") 100 | print 101 | print 102 | print 103 | value = jsonpickle.encode(new_format_copied, unpicklable=False) 104 | value = jsonpickle.decode(value) 105 | if not isinstance(value, dict): 106 | return {} 107 | return value 108 | 109 | def convert(self, convert_type="json", keep_empty_items=False) -> dict: 110 | if not keep_empty_items: 111 | self.remove_empty() 112 | value = {} 113 | if convert_type == "json": 114 | new_format_copied = copy.deepcopy(self) 115 | value = jsonpickle.encode(new_format_copied, unpicklable=False) 116 | value = jsonpickle.decode(value) 117 | if not isinstance(value, dict): 118 | return {} 119 | return value 120 | 121 | def remove_empty(self): 122 | copied = copy.deepcopy(self) 123 | for k, v in copied: 124 | if not v: 125 | delattr(self, k) 126 | print 127 | return self 128 | 129 | def __iter__(self): 130 | for attr, value in self.__dict__.items(): 131 | yield attr, value 132 | 133 | 134 | class format_content(object): 135 | def __init__( 136 | self, 137 | authed=None, 138 | version=None, 139 | temp_old_content: dict = {}, 140 | export=False, 141 | reformat=False, 142 | args={}, 143 | ): 144 | class assign_state(object): 145 | def __init__(self) -> None: 146 | self.valid = [] 147 | self.invalid = [] 148 | 149 | def __iter__(self): 150 | for attr, value in self.__dict__.items(): 151 | yield attr, value 152 | old_content = temp_old_content.copy() 153 | old_content.pop("directories", None) 154 | new_content = media_types(assign_states=assign_state) 155 | for key, new_item in new_content: 156 | old_item = old_content.get(key) 157 | if not old_item: 158 | continue 159 | for old_key, old_item2 in old_item.items(): 160 | new_posts = [] 161 | if global_version == version: 162 | posts = old_item2 163 | for old_post in posts: 164 | post = self.post_item(old_post) 165 | new_medias = [] 166 | for media in post.medias: 167 | media["media_type"] = key 168 | media2 = self.media_item(media) 169 | new_medias.append(media2) 170 | post.medias = new_medias 171 | new_posts.append(post) 172 | print 173 | 174 | elif version == 1: 175 | old_item2.sort(key=lambda x: x["post_id"]) 176 | media_list = [ 177 | list(g) 178 | for k, g in groupby(old_item2, key=lambda x: x["post_id"]) 179 | ] 180 | for media_list2 in media_list: 181 | old_post = media_list2[0] 182 | post = self.post_item(old_post) 183 | for item in media_list2: 184 | item["media_type"] = key 185 | media = self.media_item(item) 186 | post.medias.append(media) 187 | new_posts.append(post) 188 | else: 189 | media_list = [] 190 | input("METADATA VERSION: INVALID") 191 | setattr(new_item, old_key, new_posts) 192 | self.content = new_content 193 | 194 | class post_item(create_metadata, object): 195 | def __init__(self, option={}): 196 | create_metadata.__init__(self,option) 197 | self.post_id = option.get("post_id", None) 198 | self.text = option.get("text", "") 199 | self.price = option.get("price", 0) 200 | self.paid = option.get("paid", False) 201 | self.medias = option.get("medias", []) 202 | self.postedAt = option.get("postedAt", "") 203 | 204 | def convert(self, convert_type="json", keep_empty_items=False) -> dict: 205 | if not keep_empty_items: 206 | self.remove_empty() 207 | value = {} 208 | if convert_type == "json": 209 | new_format_copied = copy.deepcopy(self) 210 | for media in new_format_copied.medias: 211 | media.convert() 212 | value = jsonpickle.encode(new_format_copied, unpicklable=False) 213 | value = jsonpickle.decode(value) 214 | if not isinstance(value, dict): 215 | return {} 216 | return value 217 | 218 | class media_item(create_metadata): 219 | def __init__(self, option={}): 220 | create_metadata.__init__(self,option) 221 | self.media_id = option.get("media_id", None) 222 | link = option.get("link", []) 223 | if link: 224 | link = [link] 225 | self.links = option.get("links", link) 226 | self.directory = option.get("directory", "") 227 | self.filename = option.get("filename", "") 228 | self.size = option.get("size", None) 229 | self.media_type = option.get("media_type", None) 230 | self.session = option.get("session", None) 231 | self.downloaded = option.get("downloaded", False) 232 | 233 | def convert(self, convert_type="json", keep_empty_items=False) -> dict: 234 | if not keep_empty_items: 235 | self.remove_empty() 236 | value = {} 237 | if convert_type == "json": 238 | value.pop("session", None) 239 | new_format_copied = copy.deepcopy(self) 240 | value = jsonpickle.encode(new_format_copied, unpicklable=False) 241 | value = jsonpickle.decode(value) 242 | if not isinstance(value, dict): 243 | return {} 244 | return value 245 | 246 | def __iter__(self): 247 | for attr, value in self.__dict__.items(): 248 | yield attr, value 249 | 250 | 251 | class format_variables(object): 252 | def __init__(self): 253 | self.site_name = "{site_name}" 254 | self.first_letter = "{first_letter}" 255 | self.post_id = "{post_id}" 256 | self.media_id = "{media_id}" 257 | self.profile_username = "{profile_username}" 258 | self.model_username = "{model_username}" 259 | self.api_type = "{api_type}" 260 | self.media_type = "{media_type}" 261 | self.filename = "{filename}" 262 | self.value = "{value}" 263 | self.text = "{text}" 264 | self.date = "{date}" 265 | self.ext = "{ext}" 266 | 267 | def whitelist(self, wl): 268 | new_wl = [] 269 | new_format_copied = copy.deepcopy(self) 270 | for key, value in new_format_copied: 271 | if value not in wl: 272 | new_wl.append(value) 273 | return new_wl 274 | 275 | def __iter__(self): 276 | for attr, value in self.__dict__.items(): 277 | yield attr, value 278 | 279 | 280 | class format_types: 281 | def __init__(self, options) -> None: 282 | self.file_directory_format = options.get("file_directory_format") 283 | self.filename_format = options.get("filename_format") 284 | self.metadata_directory_format = options.get("metadata_directory_format") 285 | 286 | def check_rules(self): 287 | bool_status = True 288 | wl = [] 289 | invalid_list = [] 290 | string = "" 291 | for key, value in self: 292 | if key == "file_directory_format": 293 | bl = format_variables() 294 | wl = [v for k, v in bl.__dict__.items()] 295 | bl = bl.whitelist(wl) 296 | invalid_list = [] 297 | for b in bl: 298 | if b in self.file_directory_format: 299 | invalid_list.append(b) 300 | if key == "filename_format": 301 | bl = format_variables() 302 | wl = [v for k, v in bl.__dict__.items()] 303 | bl = bl.whitelist(wl) 304 | invalid_list = [] 305 | for b in bl: 306 | if b in self.filename_format: 307 | invalid_list.append(b) 308 | if key == "metadata_directory_format": 309 | wl = [ 310 | "{site_name}", 311 | "{first_letter}", 312 | "{model_id}", 313 | "{profile_username}", 314 | "{model_username}", 315 | ] 316 | bl = format_variables().whitelist(wl) 317 | invalid_list = [] 318 | for b in bl: 319 | if b in self.metadata_directory_format: 320 | invalid_list.append(b) 321 | bool_status = True 322 | if invalid_list: 323 | string += f"You cannot use {','.join(invalid_list)} in {key}. Use any from this list {','.join(wl)}" 324 | bool_status = False 325 | 326 | return string, bool_status 327 | 328 | def check_unique(self, return_unique=True): 329 | string = "" 330 | values = [] 331 | unique = [] 332 | new_format_copied = copy.deepcopy(self) 333 | option = {} 334 | option["string"] = "" 335 | option["bool_status"] = True 336 | option["unique"] = new_format_copied 337 | f = format_variables() 338 | for key, value in self: 339 | if key == "file_directory_format": 340 | unique = ["{media_id}", "{model_username}"] 341 | value = os.path.normpath(value) 342 | values = value.split(os.sep) 343 | option["unique"].file_directory_format = unique 344 | elif key == "filename_format": 345 | values = [] 346 | unique = ["{media_id}", "{filename}"] 347 | value = os.path.normpath(value) 348 | for key2, value2 in f: 349 | if value2 in value: 350 | values.append(value2) 351 | option["unique"].filename_format = unique 352 | elif key == "metadata_directory_format": 353 | unique = ["{model_username}"] 354 | value = os.path.normpath(value) 355 | values = value.split(os.sep) 356 | option["unique"].metadata_directory_format = unique 357 | if key != "filename_format": 358 | e = [x for x in values if x in unique] 359 | else: 360 | e = [x for x in unique if x in values] 361 | if e: 362 | setattr(option["unique"], key, e) 363 | else: 364 | option[ 365 | "string" 366 | ] += f"{key} is a invalid format since it has no unique identifiers. Use any from this list {','.join(unique)}\n" 367 | option["bool_status"] = False 368 | return option 369 | 370 | def __iter__(self): 371 | for attr, value in self.__dict__.items(): 372 | yield attr, value 373 | 374 | 375 | class prepare_reformat(object): 376 | def __init__(self, option, keep_vars=False): 377 | format_variables2 = format_variables() 378 | self.site_name = option.get("site_name", format_variables2.site_name) 379 | self.post_id = option.get("post_id", format_variables2.post_id) 380 | self.media_id = option.get("media_id", format_variables2.media_id) 381 | self.profile_username = option.get( 382 | "profile_username", format_variables2.profile_username 383 | ) 384 | self.model_username = option.get("model_username", format_variables2.model_username) 385 | self.api_type = option.get("api_type", format_variables2.api_type) 386 | self.media_type = option.get("media_type", format_variables2.media_type) 387 | self.filename = option.get("filename", format_variables2.filename) 388 | self.ext = option.get("ext", format_variables2.ext) 389 | self.text = option.get("text", format_variables2.text) 390 | self.date = option.get("postedAt", format_variables2.date) 391 | self.price = option.get("price", 0) 392 | self.archived = option.get("archived", False) 393 | self.date_format = option.get("date_format") 394 | self.maximum_length = 255 395 | self.text_length = option.get("text_length", self.maximum_length) 396 | self.directory = option.get("directory") 397 | self.preview = option.get("preview") 398 | if not keep_vars: 399 | for key, value in self: 400 | print 401 | if isinstance(value, str): 402 | key = main_helper.find_between(value, "{", "}") 403 | e = getattr(format_variables2, key, None) 404 | if e: 405 | setattr(self, key, "") 406 | print 407 | print 408 | 409 | def __iter__(self): 410 | for attr, value in self.__dict__.items(): 411 | yield attr, value 412 | 413 | async def reformat(self, unformatted_list) -> list[str]: 414 | x = [] 415 | format_variables2 = format_variables() 416 | for key, unformatted_item in unformatted_list.items(): 417 | if "filename_format" == key: 418 | unformatted_item = os.path.join(x[1], unformatted_item) 419 | print 420 | string = await main_helper.reformat(self, unformatted_item) 421 | final_path = [] 422 | paths = string.split(os.sep) 423 | for path in paths: 424 | key = main_helper.find_between(path, "{", "}") 425 | e = getattr(format_variables2, key, None) 426 | if path == e: 427 | break 428 | final_path.append(path) 429 | final_path = os.sep.join(final_path) 430 | print 431 | x.append(final_path) 432 | return x 433 | 434 | def convert(self, convert_type="json", keep_empty_items=False) -> dict: 435 | if not keep_empty_items: 436 | self.remove_empty() 437 | value = {} 438 | if convert_type == "json": 439 | new_format_copied = copy.deepcopy(self) 440 | delattr(new_format_copied, "session") 441 | value = jsonpickle.encode(new_format_copied, unpicklable=False) 442 | value = jsonpickle.decode(value) 443 | if not isinstance(value, dict): 444 | return {} 445 | return value 446 | 447 | def remove_empty(self): 448 | copied = copy.deepcopy(self) 449 | for k, v in copied: 450 | if not v: 451 | delattr(self, k) 452 | print 453 | return self 454 | -------------------------------------------------------------------------------- /apis/onlyfans/classes/create_auth.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import math 3 | from datetime import datetime 4 | from itertools import chain, product 5 | from multiprocessing.pool import Pool 6 | from typing import Any, Dict, List, Optional, Union 7 | 8 | import jsonpickle 9 | from apis import api_helper 10 | from apis.onlyfans.classes.create_message import create_message 11 | from apis.onlyfans.classes.create_post import create_post 12 | from apis.onlyfans.classes.create_user import create_user 13 | from apis.onlyfans.classes.extras import ( 14 | auth_details, 15 | content_types, 16 | create_headers, 17 | endpoint_links, 18 | error_details, 19 | handle_refresh, 20 | ) 21 | from dateutil.relativedelta import relativedelta 22 | from user_agent import generate_user_agent 23 | 24 | 25 | class create_auth(create_user): 26 | def __init__( 27 | self, 28 | option: dict[str, Any] = {}, 29 | pool: Pool = None, 30 | max_threads: int = -1, 31 | ) -> None: 32 | create_user.__init__(self, option) 33 | if not self.username: 34 | self.username = f"u{self.id}" 35 | self.lists = {} 36 | self.links = content_types() 37 | self.subscriptions: list[create_user] = [] 38 | self.chats = None 39 | self.archived_stories = {} 40 | self.mass_messages = [] 41 | self.paid_content = [] 42 | temp_pool = pool if pool else api_helper.multiprocessing() 43 | self.pool = temp_pool 44 | self.session_manager = api_helper.session_manager(self, max_threads=max_threads) 45 | self.auth_details: auth_details = auth_details() 46 | self.profile_directory = option.get("profile_directory", "") 47 | self.guest = False 48 | self.active: bool = False 49 | self.errors: list[error_details] = [] 50 | self.extras: dict[str, Any] = {} 51 | 52 | def update(self, data: Dict[str, Any]): 53 | if not data["username"]: 54 | data["username"] = f"u{data['id']}" 55 | for key, value in data.items(): 56 | found_attr = hasattr(self, key) 57 | if found_attr: 58 | setattr(self, key, value) 59 | 60 | async def login(self, max_attempts: int = 10, guest: bool = False): 61 | auth_version = "(V1)" 62 | auth_items = self.auth_details 63 | if not auth_items: 64 | return self 65 | if guest and auth_items: 66 | auth_items.cookie.auth_id = "0" 67 | auth_items.user_agent = generate_user_agent() # type: ignore 68 | link = endpoint_links().customer 69 | user_agent = auth_items.user_agent # type: ignore 70 | auth_id = str(auth_items.cookie.auth_id) 71 | # expected string error is fixed by auth_id 72 | dynamic_rules = self.session_manager.dynamic_rules 73 | a: List[Any] = [dynamic_rules, auth_id, user_agent, link] 74 | self.session_manager.headers = create_headers(*a) 75 | if guest: 76 | print("Guest Authentication") 77 | return self 78 | 79 | count = 1 80 | while count < max_attempts + 1: 81 | string = f"Auth {auth_version} Attempt {count}/{max_attempts}" 82 | print(string) 83 | await self.get_authed() 84 | count += 1 85 | 86 | async def resolve_auth(auth: create_auth): 87 | if self.errors: 88 | error = self.errors[-1] 89 | print(error.message) 90 | if error.code == 101: 91 | if auth_items.support_2fa: 92 | link = f"https://onlyfans.com/api2/v2/users/otp/check" 93 | count = 1 94 | max_count = 3 95 | while count < max_count + 1: 96 | print( 97 | "2FA Attempt " + str(count) + "/" + str(max_count) 98 | ) 99 | code = input("Enter 2FA Code\n") 100 | data = {"code": code, "rememberMe": True} 101 | response = await self.session_manager.json_request( 102 | link, method="POST", payload=data 103 | ) 104 | if isinstance(response, error_details): 105 | error.message = response.message 106 | count += 1 107 | else: 108 | print("Success") 109 | auth.active = False 110 | auth.errors.remove(error) 111 | await self.get_authed() 112 | break 113 | 114 | await resolve_auth(self) 115 | if not self.active: 116 | if self.errors: 117 | error = self.errors[-1] 118 | error_message = error.message 119 | if "token" in error_message: 120 | break 121 | if "Code wrong" in error_message: 122 | break 123 | if "Please refresh" in error_message: 124 | break 125 | else: 126 | print("Auth 404'ed") 127 | continue 128 | else: 129 | print(f"Welcome {self.name} | {self.username}") 130 | break 131 | if not self.active: 132 | user = await self.get_user(auth_id) 133 | if isinstance(user, create_user): 134 | self.update(user.__dict__) 135 | return self 136 | 137 | async def get_authed(self): 138 | if not self.active: 139 | link = endpoint_links().customer 140 | response = await self.session_manager.json_request(link) 141 | if response: 142 | self.resolve_auth_errors(response) 143 | if not self.errors: 144 | # merged = self.__dict__ | response 145 | # self = create_auth(merged,self.pool,self.session_manager.max_threads) 146 | self.active = True 147 | self.update(response) 148 | else: 149 | # 404'ed 150 | self.active = False 151 | return self 152 | 153 | def resolve_auth_errors(self, response: Union[dict[str, Any], error_details]): 154 | # Adds an error object to self.auth.errors 155 | if isinstance(response, error_details): 156 | error = response 157 | elif isinstance(response, dict) and "error" in response: 158 | error = response["error"] 159 | error = error_details(error) 160 | else: 161 | self.errors.clear() 162 | return 163 | error_message = error.message 164 | error_code = error.code 165 | if error_code == 0: 166 | pass 167 | elif error_code == 101: 168 | error_message = "Blocked by 2FA." 169 | elif error_code == 401: 170 | # Session/Refresh 171 | pass 172 | error.code = error_code 173 | error.message = error_message 174 | self.errors.append(error) 175 | 176 | async def get_lists(self, refresh=True, limit=100, offset=0): 177 | api_type = "lists" 178 | if not self.active: 179 | return 180 | if not refresh: 181 | subscriptions = handle_refresh(self, api_type) 182 | return subscriptions 183 | link = endpoint_links(global_limit=limit, global_offset=offset).lists 184 | results = await self.session_manager.json_request(link) 185 | self.lists = results 186 | return results 187 | 188 | async def get_user( 189 | self, identifier: Union[str, int] 190 | ) -> Union[create_user, error_details]: 191 | link = endpoint_links(identifier).users 192 | response = await self.session_manager.json_request(link) 193 | if not isinstance(response, error_details): 194 | response["session_manager"] = self.session_manager 195 | response = create_user(response, self) 196 | return response 197 | 198 | async def get_lists_users( 199 | self, identifier, check: bool = False, refresh=True, limit=100, offset=0 200 | ): 201 | if not self.active: 202 | return 203 | link = endpoint_links( 204 | identifier, global_limit=limit, global_offset=offset 205 | ).lists_users 206 | results = await self.session_manager.json_request(link) 207 | if len(results) >= limit and not check: 208 | results2 = await self.get_lists_users( 209 | identifier, limit=limit, offset=limit + offset 210 | ) 211 | results.extend(results2) 212 | return results 213 | 214 | async def get_subscription( 215 | self, check: bool = False, identifier="", limit=100, offset=0 216 | ) -> Union[create_user, None]: 217 | subscriptions = await self.get_subscriptions(refresh=False) 218 | valid = None 219 | for subscription in subscriptions: 220 | if identifier == subscription.username or identifier == subscription.id: 221 | valid = subscription 222 | break 223 | return valid 224 | 225 | async def get_subscriptions( 226 | self, 227 | resume=None, 228 | refresh=True, 229 | identifiers: list = [], 230 | extra_info=True, 231 | limit=20, 232 | offset=0, 233 | ) -> list[create_user]: 234 | if not self.active: 235 | return [] 236 | if not refresh: 237 | subscriptions = self.subscriptions 238 | return subscriptions 239 | ceil = math.ceil(self.subscribesCount / limit) 240 | a = list(range(ceil)) 241 | offset_array = [] 242 | for b in a: 243 | b = b * limit 244 | link = endpoint_links(global_limit=limit, global_offset=b).subscriptions 245 | offset_array.append(link) 246 | 247 | # Following logic is unique to creators only 248 | results = [] 249 | if self.isPerformer: 250 | temp_session_manager = self.session_manager 251 | temp_pool = self.pool 252 | temp_paid_content = self.paid_content 253 | delattr(self, "session_manager") 254 | delattr(self, "pool") 255 | delattr(self, "paid_content") 256 | json_authed = jsonpickle.encode(self, unpicklable=False) 257 | json_authed = jsonpickle.decode(json_authed) 258 | self.session_manager = temp_session_manager 259 | self.pool = temp_pool 260 | self.paid_content = temp_paid_content 261 | temp_auth = await self.get_user(self.username) 262 | if isinstance(json_authed, dict): 263 | json_authed = json_authed | temp_auth.__dict__ 264 | 265 | subscription = create_user(json_authed, self) 266 | subscription.subscriber = self 267 | subscription.subscribedByData = {} 268 | new_date = datetime.now() + relativedelta(years=1) 269 | subscription.subscribedByData["expiredAt"] = new_date.isoformat() 270 | subscription = [subscription] 271 | results.append(subscription) 272 | if not identifiers: 273 | 274 | async def multi(item): 275 | link = item 276 | subscriptions = await self.session_manager.json_request(link) 277 | valid_subscriptions = [] 278 | extras = {} 279 | extras["auth_check"] = "" 280 | if isinstance(subscriptions, error_details): 281 | return 282 | subscriptions = [ 283 | subscription 284 | for subscription in subscriptions 285 | if "error" != subscription 286 | ] 287 | tasks = [] 288 | for subscription in subscriptions: 289 | subscription["session_manager"] = self.session_manager 290 | if extra_info: 291 | task = self.get_user(subscription["username"]) 292 | tasks.append(task) 293 | tasks = await asyncio.gather(*tasks) 294 | for task in tasks: 295 | if isinstance(task, error_details): 296 | continue 297 | subscription2: create_user = task 298 | for subscription in subscriptions: 299 | if subscription["id"] != subscription2.id: 300 | continue 301 | subscription = subscription | subscription2.__dict__ 302 | subscription = create_user(subscription, self) 303 | if subscription.isBlocked: 304 | continue 305 | subscription.session_manager = self.session_manager 306 | subscription.subscriber = self 307 | valid_subscriptions.append(subscription) 308 | return valid_subscriptions 309 | 310 | pool = self.pool 311 | tasks = pool.starmap(multi, product(offset_array)) 312 | results += await asyncio.gather(*tasks) 313 | else: 314 | for identifier in identifiers: 315 | if self.id == identifier or self.username == identifier: 316 | continue 317 | link = endpoint_links(identifier=identifier).users 318 | result = await self.session_manager.json_request(link) 319 | if isinstance(result, error_details) or not result["subscribedBy"]: 320 | continue 321 | subscription = create_user(result, self) 322 | if subscription.isBlocked: 323 | continue 324 | subscription.session_manager = self.session_manager 325 | subscription.subscriber = self 326 | results.append([subscription]) 327 | print 328 | print 329 | results = [x for x in results if x is not None] 330 | results = list(chain(*results)) 331 | self.subscriptions = results 332 | return results 333 | 334 | async def get_chats( 335 | self, 336 | links: Optional[list] = None, 337 | limit=100, 338 | offset=0, 339 | refresh=True, 340 | inside_loop=False, 341 | ) -> list: 342 | api_type = "chats" 343 | if not self.active: 344 | return [] 345 | if not refresh: 346 | result = handle_refresh(self, api_type) 347 | if result: 348 | return result 349 | if links is None: 350 | links = [] 351 | api_count = self.chatMessagesCount 352 | if api_count and not links: 353 | link = endpoint_links( 354 | identifier=self.id, global_limit=limit, global_offset=offset 355 | ).list_chats 356 | ceil = math.ceil(api_count / limit) 357 | numbers = list(range(ceil)) 358 | for num in numbers: 359 | num = num * limit 360 | link = link.replace(f"limit={limit}", f"limit={limit}") 361 | new_link = link.replace("offset=0", f"offset={num}") 362 | links.append(new_link) 363 | multiplier = getattr(self.session_manager.pool, "_processes") 364 | if links: 365 | link = links[-1] 366 | else: 367 | link = endpoint_links( 368 | identifier=self.id, global_limit=limit, global_offset=offset 369 | ).list_chats 370 | links2 = api_helper.calculate_the_unpredictable(link, limit, multiplier) 371 | if not inside_loop: 372 | links += links2 373 | else: 374 | links = links2 375 | results = await self.session_manager.async_requests(links) 376 | has_more = results[-1]["hasMore"] 377 | final_results = [x["list"] for x in results] 378 | final_results = list(chain.from_iterable(final_results)) 379 | 380 | if has_more: 381 | results2 = await self.get_chats( 382 | links=[links[-1]], limit=limit, offset=limit + offset, inside_loop=True 383 | ) 384 | final_results.extend(results2) 385 | 386 | final_results.sort(key=lambda x: x["withUser"]["id"], reverse=True) 387 | self.chats = final_results 388 | return final_results 389 | 390 | async def get_mass_messages( 391 | self, resume=None, refresh=True, limit=10, offset=0 392 | ) -> list: 393 | api_type = "mass_messages" 394 | if not self.active: 395 | return [] 396 | if not refresh: 397 | result = handle_refresh(self, api_type) 398 | if result: 399 | return result 400 | link = endpoint_links( 401 | global_limit=limit, global_offset=offset 402 | ).mass_messages_api 403 | results = await self.session_manager.json_request(link) 404 | items = results.get("list", []) 405 | if not items: 406 | return items 407 | if resume: 408 | for item in items: 409 | if any(x["id"] == item["id"] for x in resume): 410 | resume.sort(key=lambda x: x["id"], reverse=True) 411 | self.mass_messages = resume 412 | return resume 413 | else: 414 | resume.append(item) 415 | 416 | if results["hasMore"]: 417 | results2 = self.get_mass_messages( 418 | resume=resume, limit=limit, offset=limit + offset 419 | ) 420 | items.extend(results2) 421 | if resume: 422 | items = resume 423 | 424 | items.sort(key=lambda x: x["id"], reverse=True) 425 | self.mass_messages = items 426 | return items 427 | 428 | async def get_paid_content( 429 | self, 430 | check: bool = False, 431 | refresh: bool = True, 432 | limit: int = 99, 433 | offset: int = 0, 434 | inside_loop: bool = False, 435 | ) -> list[Union[create_message, create_post]]: 436 | api_type = "paid_content" 437 | if not self.active: 438 | return [] 439 | if not refresh: 440 | result = handle_refresh(self, api_type) 441 | if result: 442 | return result 443 | link = endpoint_links(global_limit=limit, global_offset=offset).paid_api 444 | final_results = await self.session_manager.json_request(link) 445 | if not isinstance(final_results,error_details): 446 | if len(final_results) >= limit and not check: 447 | results2 = self.get_paid_content( 448 | limit=limit, offset=limit + offset, inside_loop=True 449 | ) 450 | final_results.extend(results2) 451 | if not inside_loop: 452 | temp = [] 453 | for final_result in final_results: 454 | content = None 455 | if final_result["responseType"] == "message": 456 | user = create_user(final_result["fromUser"], self) 457 | content = create_message(final_result, user) 458 | print 459 | elif final_result["responseType"] == "post": 460 | user = create_user(final_result["author"], self) 461 | content = create_post(final_result, user) 462 | if content: 463 | temp.append(content) 464 | final_results = temp 465 | self.paid_content = final_results 466 | return final_results 467 | --------------------------------------------------------------------------------