├── models
├── magnetModel.js
├── elasticsearch
│ └── index.js
└── db.js
├── docs
└── screenshots
│ ├── 0.png
│ └── 1.png
├── postcss.config.js
├── public
├── favicon
│ ├── apple-touch-icon.png
│ ├── android-chrome-192x192.png
│ ├── android-chrome-512x512.png
│ ├── site.webmanifest
│ ├── favicon.svg
│ └── favicon-dark.svg
├── static
│ ├── fonts
│ │ ├── google
│ │ │ ├── inter
│ │ │ │ ├── Inter-Bold.ttf
│ │ │ │ ├── Inter-Light.ttf
│ │ │ │ ├── Inter-Medium.ttf
│ │ │ │ ├── Inter-Regular.ttf
│ │ │ │ └── Inter-SemiBold.ttf
│ │ │ └── manrope
│ │ │ │ ├── Manrope-Bold.ttf
│ │ │ │ ├── Manrope-Medium.ttf
│ │ │ │ └── Manrope-SemiBold.ttf
│ │ └── fontawesome
│ │ │ ├── fa-brands-400.woff2
│ │ │ ├── fa-solid-900.woff2
│ │ │ └── fa-regular-400.woff2
│ └── css
│ │ └── google-fonts.css
└── css
│ ├── core.css
│ └── directory-tree.css
├── .gitignore
├── ecosystem.json
├── lib
├── ktable.js
├── utils.js
├── database.js
├── peer-queue.js
├── index.js
├── btclient.js
├── redis.js
├── p2p.js
├── dhtspider.js
└── wire.js
├── routes
└── index.js
├── utils
├── ensureDataDir.js
├── bulkIndexToElasticsearch.js
└── fileTreeUtils.js
├── .env.sample
├── LICENSE
├── src
└── input.css
├── package.json
├── tailwind.config.js
├── config
├── env.js
└── express.js
├── views
├── includes
│ ├── footer.ejs
│ ├── header.ejs
│ └── navbar.ejs
├── error.ejs
├── searchform.ejs
├── infohash.ejs
├── index.ejs
├── search.ejs
└── statistics.ejs
├── app.js
├── reset_data.sh
├── services
└── websocket.js
├── README.md
├── CHANGELOG.md
└── RELEASE_NOTES.md
/models/magnetModel.js:
--------------------------------------------------------------------------------
1 | const { Magnet } = require('./db');
2 |
3 | module.exports = Magnet;
4 |
--------------------------------------------------------------------------------
/docs/screenshots/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/docs/screenshots/0.png
--------------------------------------------------------------------------------
/docs/screenshots/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/docs/screenshots/1.png
--------------------------------------------------------------------------------
/postcss.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | }
6 | }
--------------------------------------------------------------------------------
/public/favicon/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/favicon/apple-touch-icon.png
--------------------------------------------------------------------------------
/public/favicon/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/favicon/android-chrome-192x192.png
--------------------------------------------------------------------------------
/public/favicon/android-chrome-512x512.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/favicon/android-chrome-512x512.png
--------------------------------------------------------------------------------
/public/static/fonts/google/inter/Inter-Bold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/inter/Inter-Bold.ttf
--------------------------------------------------------------------------------
/public/static/fonts/google/inter/Inter-Light.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/inter/Inter-Light.ttf
--------------------------------------------------------------------------------
/public/static/fonts/google/inter/Inter-Medium.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/inter/Inter-Medium.ttf
--------------------------------------------------------------------------------
/public/static/fonts/fontawesome/fa-brands-400.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/fontawesome/fa-brands-400.woff2
--------------------------------------------------------------------------------
/public/static/fonts/fontawesome/fa-solid-900.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/fontawesome/fa-solid-900.woff2
--------------------------------------------------------------------------------
/public/static/fonts/google/inter/Inter-Regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/inter/Inter-Regular.ttf
--------------------------------------------------------------------------------
/public/static/fonts/google/inter/Inter-SemiBold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/inter/Inter-SemiBold.ttf
--------------------------------------------------------------------------------
/public/static/fonts/google/manrope/Manrope-Bold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/manrope/Manrope-Bold.ttf
--------------------------------------------------------------------------------
/public/static/fonts/fontawesome/fa-regular-400.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/fontawesome/fa-regular-400.woff2
--------------------------------------------------------------------------------
/public/static/fonts/google/manrope/Manrope-Medium.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/manrope/Manrope-Medium.ttf
--------------------------------------------------------------------------------
/public/static/fonts/google/manrope/Manrope-SemiBold.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thejordanprice/p2pspider/HEAD/public/static/fonts/google/manrope/Manrope-SemiBold.ttf
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | node_modules/*
3 |
4 | # Environment variables
5 | .env
6 |
7 | # Database files
8 | data/magnet.db
9 | data/magnet.db-shm
10 | data/magnet.db-wal
11 |
12 | # System files
13 | .DS_Store
14 | docs/.DS_Store
15 |
--------------------------------------------------------------------------------
/ecosystem.json:
--------------------------------------------------------------------------------
1 | {
2 | "apps": [
3 | {
4 | "name": "p2pspider",
5 | "script": "app.js",
6 | "instances": 1,
7 | "exec_mode": "fork",
8 | "env": {
9 | "NODE_ENV": "production"
10 | },
11 | "node_args": "--max-old-space-size=2048"
12 | }
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/lib/ktable.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const utils = require('./utils');
4 |
5 | class KTable {
6 | constructor(maxsize) {
7 | this.nid = utils.randomID();
8 | this.nodes = [];
9 | this.maxsize = maxsize;
10 | }
11 |
12 | push(node) {
13 | if (this.nodes.length < this.maxsize) {
14 | this.nodes.push(node);
15 | }
16 | }
17 | }
18 |
19 | module.exports = KTable;
20 |
--------------------------------------------------------------------------------
/routes/index.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const router = express.Router();
3 | const magnetController = require('../controllers/magnetController');
4 |
5 | router.get('/', magnetController.index);
6 | router.get('/latest', magnetController.latest);
7 | router.get('/statistics', magnetController.statistics);
8 | router.get('/infohash', magnetController.infohash);
9 | router.get('/search', magnetController.search);
10 | router.get('/api/count', magnetController.count);
11 |
12 | module.exports = router;
13 |
--------------------------------------------------------------------------------
/public/favicon/site.webmanifest:
--------------------------------------------------------------------------------
1 | {
2 | "name": "P2P Magnet Search",
3 | "short_name": "P2P Magnet",
4 | "icons": [
5 | {
6 | "src": "/public/favicon/android-chrome-192x192.png",
7 | "sizes": "192x192",
8 | "type": "image/png"
9 | },
10 | {
11 | "src": "/public/favicon/android-chrome-512x512.png",
12 | "sizes": "512x512",
13 | "type": "image/png"
14 | }
15 | ],
16 | "theme_color": "#0ea5e9",
17 | "background_color": "#ffffff",
18 | "display": "standalone"
19 | }
--------------------------------------------------------------------------------
/utils/ensureDataDir.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const fs = require('fs');
4 | const path = require('path');
5 |
6 | /**
7 | * Ensures that the data directory exists for SQLite database
8 | * @param {string} dbPath - Path to the SQLite database file
9 | */
10 | function ensureDataDir(dbPath) {
11 | const dir = path.dirname(dbPath);
12 | if (!fs.existsSync(dir)) {
13 | console.log(`Creating directory: ${dir}`);
14 | fs.mkdirSync(dir, { recursive: true });
15 | }
16 | }
17 |
18 | module.exports = ensureDataDir;
--------------------------------------------------------------------------------
/public/favicon/favicon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/public/favicon/favicon-dark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/lib/utils.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const crypto = require('crypto');
4 |
5 | const randomID = () => crypto.createHash('sha1').update(crypto.randomBytes(20)).digest();
6 |
7 | const decodeNodes = (data) => {
8 | const nodes = [];
9 | for (let i = 0; i + 26 <= data.length; i += 26) {
10 | nodes.push({
11 | nid: data.slice(i, i + 20),
12 | address: `${data[i + 20]}.${data[i + 21]}.${data[i + 22]}.${data[i + 23]}`,
13 | port: data.readUInt16BE(i + 24)
14 | });
15 | }
16 | return nodes;
17 | };
18 |
19 | const genNeighborID = (target, nid) => Buffer.concat([target.slice(0, 10), nid.slice(10)]);
20 |
21 | module.exports = { randomID, decodeNodes, genNeighborID };
22 |
--------------------------------------------------------------------------------
/.env.sample:
--------------------------------------------------------------------------------
1 | REDIS_URI=redis://127.0.0.1:6379
2 | MONGO_URI=mongodb://127.0.0.1/magnetdb
3 |
4 | # Include protocol and port in SITE_HOSTNAME for WebSocket connections
5 | SITE_HOSTNAME=http://127.0.0.1:8080
6 | SITE_NAME=DHT Spider
7 | SITE_PORT=8080
8 |
9 | # Database options: "mongodb" or "sqlite"
10 | DB_TYPE=sqlite
11 |
12 | # Redis options: "true" or "false"
13 | USE_REDIS=false
14 |
15 | # SQLite database file location (only used if DB_TYPE=sqlite)
16 | SQLITE_PATH=./data/magnet.db
17 |
18 | # Elasticsearch options: "true" or "false"
19 | USE_ELASTICSEARCH=false
20 |
21 | # Elasticsearch connection - using localhost for direct connection
22 | ELASTICSEARCH_NODE=http://localhost:9200
23 | ELASTICSEARCH_INDEX=magnets
24 |
25 | # Run options: "true" or "false"
26 | RUN_DAEMON=true
27 | RUN_WEBSERVER=true
--------------------------------------------------------------------------------
/lib/database.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { Database, getDatabase } = require('../models/db');
4 |
5 | /**
6 | * Database initialization
7 | */
8 | async function initializeDatabase() {
9 | // Check if database is already initialized by controller
10 | const existingDb = getDatabase();
11 |
12 | if (existingDb && existingDb.connected) {
13 | console.log(`Using existing database (${existingDb.type}) connection.`);
14 | return existingDb;
15 | }
16 |
17 | const db = new Database();
18 | await db.connect()
19 | .then(() => console.log(`Database (${db.type}) is connected.`))
20 | .catch(err => {
21 | console.error(`Database connection error:`, err);
22 | // Consider exiting if the database connection is critical and fails
23 | process.exit(1);
24 | });
25 | return db;
26 | }
27 |
28 | module.exports = { initializeDatabase };
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 thejordanprice
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/input.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | @layer components {
6 | .search-input {
7 | @apply w-full px-10 py-3 border-2 border-gray-300 rounded-lg bg-white transition-colors duration-200 ease-in-out focus:outline-none focus:ring-2 focus:ring-primary-500 focus:border-primary-500;
8 | }
9 |
10 | .btn-primary {
11 | @apply px-4 py-2 bg-primary-600 text-white rounded-lg shadow-sm hover:bg-primary-700 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:ring-offset-2 transition-colors duration-150;
12 | }
13 |
14 | .btn-secondary {
15 | @apply px-4 py-2 bg-white text-primary-700 border border-primary-500 rounded-lg shadow-sm hover:bg-primary-50 focus:outline-none focus:ring-2 focus:ring-primary-500 focus:ring-offset-2 transition-colors duration-150;
16 | }
17 |
18 | .btn-outline {
19 | @apply px-4 py-2 bg-white text-dark-700 border border-dark-300 rounded-lg shadow-sm hover:bg-dark-50 focus:outline-none focus:ring-2 focus:ring-dark-500 focus:ring-offset-2 transition-colors duration-150;
20 | }
21 |
22 | .card {
23 | @apply bg-white rounded-xl shadow-elegant p-6 transition-all duration-200 hover:shadow-lg;
24 | }
25 |
26 | .page-link {
27 | @apply inline-flex items-center px-4 py-2 text-sm font-medium text-primary-700 bg-white border border-gray-300 rounded-lg hover:bg-primary-50 transition-colors duration-150 focus:z-10 focus:outline-none focus:ring-2 focus:ring-primary-500;
28 | }
29 | }
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "p2pspider",
3 | "description": "DHT Spider + BitTorrent Client + Web Front-end = P2P Magnet Search",
4 | "version": "0.1.0",
5 | "author": {
6 | "name": "thejordanprice"
7 | },
8 | "repository": "https://github.com/thejordanprice/p2pspider",
9 | "main": "lib/index.js",
10 | "bugs": {
11 | "url": "https://github.com/thejordanprice/p2pspider/issues"
12 | },
13 | "scripts": {
14 | "build:css": "tailwindcss -i ./src/input.css -o ./public/css/tailwind.css --minify",
15 | "watch:css": "tailwindcss -i ./src/input.css -o ./public/css/tailwind.css --watch",
16 | "start": "node app.js",
17 | "start:prod": "NODE_ENV=production node app.js",
18 | "start:pm2": "pm2 start ecosystem.json",
19 | "index:elastic": "node utils/bulkIndexToElasticsearch.js"
20 | },
21 | "dependencies": {
22 | "@elastic/elasticsearch": "^8.12.1",
23 | "axios": "^1.8.4",
24 | "bencode": "^0.7.0",
25 | "bitfield": "^1.1.2",
26 | "compression": "^1.7.4",
27 | "dotenv": "^16.4.5",
28 | "ejs": "^3.1.10",
29 | "express": "^4.15.3",
30 | "mongoose": "^8.5.1",
31 | "path": "^0.12.7",
32 | "pm2": "^5.4.2",
33 | "redis": "^4.6.15",
34 | "sqlite3": "^5.1.7",
35 | "ws": "^8.18.0"
36 | },
37 | "devDependencies": {
38 | "autoprefixer": "^10.4.14",
39 | "postcss": "^8.4.24",
40 | "tailwindcss": "^3.3.2"
41 | },
42 | "keywords": [
43 | "torrent",
44 | "bittorrent",
45 | "p2p",
46 | "dht"
47 | ],
48 | "license": "MIT"
49 | }
50 |
--------------------------------------------------------------------------------
/lib/peer-queue.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | class PeerQueue {
4 | constructor(maxSize = 200, perLimit = 10) {
5 | this.maxSize = maxSize;
6 | this.perLimit = perLimit;
7 | this.peers = {};
8 | this.reqs = [];
9 | }
10 |
11 | _shift() {
12 | if (this.length() > 0) {
13 | const req = this.reqs.shift();
14 | this.peers[req.infohash.toString('hex')] = [];
15 | return req;
16 | }
17 | }
18 |
19 | push(peer) {
20 | const infohashHex = peer.infohash.toString('hex');
21 | const peers = this.peers[infohashHex];
22 |
23 | if (peers && peers.length < this.perLimit) {
24 | peers.push(peer);
25 | } else if (this.length() < this.maxSize) {
26 | this.reqs.push(peer);
27 | }
28 | }
29 |
30 | shift(infohash, successful) {
31 | if (infohash) {
32 | const infohashHex = infohash.toString('hex');
33 | if (successful === true) {
34 | delete this.peers[infohashHex];
35 | } else {
36 | const peers = this.peers[infohashHex];
37 | if (peers) {
38 | if (peers.length > 0) {
39 | return peers.shift();
40 | } else {
41 | delete this.peers[infohashHex];
42 | }
43 | }
44 | }
45 | }
46 | return this._shift();
47 | }
48 |
49 | length() {
50 | return this.reqs.length;
51 | }
52 | }
53 |
54 | module.exports = PeerQueue;
55 |
--------------------------------------------------------------------------------
/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | module.exports = {
3 | content: [
4 | './views/**/*.ejs',
5 | './public/**/*.js',
6 | ],
7 | theme: {
8 | extend: {
9 | colors: {
10 | primary: {
11 | 50: '#f0f9ff',
12 | 100: '#e0f2fe',
13 | 200: '#bae6fd',
14 | 300: '#7dd3fc',
15 | 400: '#38bdf8',
16 | 500: '#0ea5e9',
17 | 600: '#0284c7',
18 | 700: '#0369a1',
19 | 800: '#075985',
20 | 900: '#0c4a6e',
21 | 950: '#082f49',
22 | },
23 | secondary: {
24 | 50: '#f5f3ff',
25 | 100: '#ede9fe',
26 | 200: '#ddd6fe',
27 | 300: '#c4b5fd',
28 | 400: '#a78bfa',
29 | 500: '#8b5cf6',
30 | 600: '#7c3aed',
31 | 700: '#6d28d9',
32 | 800: '#5b21b6',
33 | 900: '#4c1d95',
34 | 950: '#2e1065',
35 | },
36 | dark: {
37 | 50: '#f8fafc',
38 | 100: '#f1f5f9',
39 | 200: '#e2e8f0',
40 | 300: '#cbd5e1',
41 | 400: '#94a3b8',
42 | 500: '#64748b',
43 | 600: '#475569',
44 | 700: '#334155',
45 | 800: '#1e293b',
46 | 900: '#0f172a',
47 | 950: '#020617',
48 | },
49 | },
50 | fontFamily: {
51 | sans: ['Inter', 'sans-serif'],
52 | display: ['Manrope', 'Inter', 'sans-serif'],
53 | },
54 | boxShadow: {
55 | elegant: '0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05)',
56 | },
57 | },
58 | },
59 | plugins: [],
60 | };
--------------------------------------------------------------------------------
/public/static/css/google-fonts.css:
--------------------------------------------------------------------------------
1 | @font-face {
2 | font-family: 'Inter';
3 | font-style: normal;
4 | font-weight: 300;
5 | font-display: swap;
6 | src: url(../fonts/google/inter/Inter-Light.ttf) format('truetype');
7 | }
8 | @font-face {
9 | font-family: 'Inter';
10 | font-style: normal;
11 | font-weight: 400;
12 | font-display: swap;
13 | src: url(../fonts/google/inter/Inter-Regular.ttf) format('truetype');
14 | }
15 | @font-face {
16 | font-family: 'Inter';
17 | font-style: normal;
18 | font-weight: 500;
19 | font-display: swap;
20 | src: url(../fonts/google/inter/Inter-Medium.ttf) format('truetype');
21 | }
22 | @font-face {
23 | font-family: 'Inter';
24 | font-style: normal;
25 | font-weight: 600;
26 | font-display: swap;
27 | src: url(../fonts/google/inter/Inter-SemiBold.ttf) format('truetype');
28 | }
29 | @font-face {
30 | font-family: 'Inter';
31 | font-style: normal;
32 | font-weight: 700;
33 | font-display: swap;
34 | src: url(../fonts/google/inter/Inter-Bold.ttf) format('truetype');
35 | }
36 | @font-face {
37 | font-family: 'Manrope';
38 | font-style: normal;
39 | font-weight: 500;
40 | font-display: swap;
41 | src: url(../fonts/google/manrope/Manrope-Medium.ttf) format('truetype');
42 | }
43 | @font-face {
44 | font-family: 'Manrope';
45 | font-style: normal;
46 | font-weight: 600;
47 | font-display: swap;
48 | src: url(../fonts/google/manrope/Manrope-SemiBold.ttf) format('truetype');
49 | }
50 | @font-face {
51 | font-family: 'Manrope';
52 | font-style: normal;
53 | font-weight: 700;
54 | font-display: swap;
55 | src: url(../fonts/google/manrope/Manrope-Bold.ttf) format('truetype');
56 | }
--------------------------------------------------------------------------------
/config/env.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | require('dotenv').config();
4 |
5 | // Environment configuration constants
6 | const USE_REDIS = process.env.USE_REDIS === 'true';
7 | const REDIS_URI = process.env.REDIS_URI;
8 | const REDIS_HASH_TTL = 60 * 60 * 24; // 24 hours in seconds
9 | const P2P_PORT = parseInt(process.env.P2P_PORT || '6881', 10);
10 | const P2P_HOST = process.env.P2P_HOST || '0.0.0.0';
11 | const SITE_HOSTNAME = process.env.SITE_HOSTNAME;
12 | const SITE_NAME = process.env.SITE_NAME || 'P2P Spider';
13 | const SITE_PORT = parseInt(process.env.SITE_PORT || '3000', 10);
14 | const PRODUCTION = process.env.NODE_ENV === 'production';
15 | const RUN_DAEMON = process.env.RUN_DAEMON !== 'false'; // Default to true if not set
16 | const RUN_WEBSERVER = process.env.RUN_WEBSERVER !== 'false'; // Default to true if not set
17 |
18 | /**
19 | * Validate environment configuration
20 | */
21 | function validateEnvironment() {
22 | // Check SITE_HOSTNAME format
23 | if (SITE_HOSTNAME && !SITE_HOSTNAME.startsWith('http')) {
24 | console.warn(`
25 | ⚠️ WARNING: SITE_HOSTNAME "${SITE_HOSTNAME}" does not include protocol (http:// or https://)
26 | For WebSocket communication to work properly, update your .env file:
27 | SITE_HOSTNAME=http://${SITE_HOSTNAME}:${SITE_PORT || '3000'}
28 | `);
29 | }
30 |
31 | if (isNaN(P2P_PORT)) {
32 | console.error('Invalid P2P_PORT defined in environment');
33 | process.exit(1);
34 | }
35 | if (isNaN(SITE_PORT)) {
36 | console.error('Invalid SITE_PORT defined in environment');
37 | process.exit(1);
38 | }
39 | }
40 |
41 | module.exports = {
42 | USE_REDIS,
43 | REDIS_URI,
44 | REDIS_HASH_TTL,
45 | P2P_PORT,
46 | P2P_HOST,
47 | SITE_HOSTNAME,
48 | SITE_NAME,
49 | SITE_PORT,
50 | PRODUCTION,
51 | RUN_DAEMON,
52 | RUN_WEBSERVER,
53 | validateEnvironment
54 | };
--------------------------------------------------------------------------------
/views/includes/footer.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | <%= site_name || 'P2P Magnet Search' %>
11 |
12 |
13 |
14 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/lib/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const EventEmitter = require('events');
4 | const util = require('util');
5 | const DHTSpider = require('./dhtspider');
6 | const BTClient = require('./btclient');
7 |
8 | class P2PSpider extends EventEmitter {
9 | constructor(options = {}) {
10 | super();
11 | this.options = options;
12 | this._ignore = undefined;
13 | this.udpServer = null;
14 | this.btclient = null;
15 | this.intervalId = null;
16 | }
17 |
18 | ignore(ignore) {
19 | this._ignore = ignore;
20 | }
21 |
22 | listen(port = 6881, address = '0.0.0.0') {
23 | this.port = port;
24 | this.address = address;
25 |
26 | this.btclient = new BTClient({
27 | timeout: this.options.timeout || 10000,
28 | ignore: this._ignore,
29 | maxConnections: this.options.maxConnections
30 | });
31 |
32 | this.btclient.on('complete', (metadata, infohash, rinfo) => {
33 | const _metadata = { ...metadata, address: rinfo.address, port: rinfo.port, infohash: infohash.toString('hex') };
34 | _metadata.magnet = `magnet:?xt=urn:btih:${_metadata.infohash}`;
35 | this.emit('metadata', _metadata);
36 | });
37 |
38 | const dhtSpider = DHTSpider.start({
39 | btclient: this.btclient,
40 | address: this.address,
41 | port: this.port,
42 | nodesMaxSize: this.options.nodesMaxSize || 4000
43 | });
44 |
45 | // Store reference to UDP server for cleanup
46 | if (dhtSpider && dhtSpider.udp) {
47 | this.udpServer = dhtSpider.udp;
48 | }
49 | }
50 |
51 | close(callback) {
52 | if (this.udpServer) {
53 | try {
54 | this.udpServer.close(() => {
55 | console.log('UDP server closed');
56 | if (callback && typeof callback === 'function') {
57 | callback();
58 | }
59 | });
60 | } catch (err) {
61 | console.error('Error closing UDP server:', err);
62 | if (callback && typeof callback === 'function') {
63 | callback(err);
64 | }
65 | }
66 | } else {
67 | console.log('No UDP server to close');
68 | if (callback && typeof callback === 'function') {
69 | callback();
70 | }
71 | }
72 | }
73 | }
74 |
75 | module.exports = P2PSpider;
76 |
--------------------------------------------------------------------------------
/views/error.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%- include('includes/header') %>
4 |
5 | <%- include('includes/navbar') %>
6 |
7 |
8 |
9 |
10 |
11 |
12 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
Error Occurred
23 |
24 |
25 |
28 |
29 |
42 |
43 |
44 |
45 |
46 |
47 |
48 | <%- include('includes/footer') %>
49 |
50 |
--------------------------------------------------------------------------------
/views/includes/header.ejs:
--------------------------------------------------------------------------------
1 |
2 | <%= typeof title !== 'undefined' ? title : 'P2P Magnet Search' %>
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/public/css/core.css:
--------------------------------------------------------------------------------
1 | /* Enhanced Search Input Styling */
2 | input[id=search] {
3 | width: 100%;
4 | box-sizing: border-box;
5 | border: 1px solid rgba(203, 213, 225, 0.5);
6 | border-radius: 9999px;
7 | font-size: 16px;
8 | background-color: white;
9 | padding: 16px 56px;
10 | box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
11 | transition: all 0.3s ease;
12 | }
13 |
14 | input[id=search]:focus {
15 | box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05), 0 0 0 3px rgba(14, 165, 233, 0.2);
16 | border-color: rgba(14, 165, 233, 0.5);
17 | outline: none;
18 | }
19 |
20 | /* Button Styles */
21 | .btn-primary {
22 | display: inline-flex;
23 | align-items: center;
24 | justify-content: center;
25 | padding: 0.5rem 1.25rem;
26 | font-size: 0.875rem;
27 | font-weight: 500;
28 | border-radius: 0.5rem;
29 | background-color: #0ea5e9;
30 | color: white;
31 | transition: all 0.2s ease;
32 | box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
33 | }
34 |
35 | .btn-primary:hover {
36 | background-color: #0284c7;
37 | transform: translateY(-1px);
38 | box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
39 | }
40 |
41 | .btn-secondary {
42 | display: inline-flex;
43 | align-items: center;
44 | justify-content: center;
45 | padding: 0.5rem 1.25rem;
46 | font-size: 0.875rem;
47 | font-weight: 500;
48 | border-radius: 0.5rem;
49 | background-color: #1e293b;
50 | color: white;
51 | transition: all 0.2s ease;
52 | box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
53 | }
54 |
55 | .btn-secondary:hover {
56 | background-color: #334155;
57 | transform: translateY(-1px);
58 | box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
59 | }
60 |
61 | /* Card Styles */
62 | .card {
63 | background-color: white;
64 | border-radius: 0.75rem;
65 | overflow: hidden;
66 | box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
67 | }
68 |
69 | /* Shadow Styles */
70 | .shadow-elegant {
71 | box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
72 | }
73 |
74 | .shadow-elegant-lg {
75 | box-shadow: 0 20px 25px -5px rgba(0, 0, 0, 0.1), 0 10px 10px -5px rgba(0, 0, 0, 0.04);
76 | }
77 |
78 | /* Typography Styles */
79 | body {
80 | font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif;
81 | }
82 |
83 | h1, h2, h3, h4, h5, h6 {
84 | font-family: 'Manrope', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif;
85 | }
86 |
87 |
--------------------------------------------------------------------------------
/lib/btclient.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const EventEmitter = require('events');
4 | const net = require('net');
5 |
6 | const PeerQueue = require('./peer-queue');
7 | const Wire = require('./wire');
8 |
9 | class BTClient extends EventEmitter {
10 | constructor(options) {
11 | super();
12 |
13 | this.timeout = options.timeout;
14 | this.maxConnections = options.maxConnections || 200;
15 | this.activeConnections = 0;
16 | this.peers = new PeerQueue(this.maxConnections);
17 | this.on('download', this._download.bind(this));
18 |
19 | if (typeof options.ignore === 'function') {
20 | this.ignore = options.ignore;
21 | } else {
22 | this.ignore = (infohash, rinfo, ignore) => {
23 | ignore(false);
24 | };
25 | }
26 | }
27 |
28 | _next(infohash, successful) {
29 | const req = this.peers.shift(infohash, successful);
30 | if (req) {
31 | this.ignore(req.infohash.toString('hex'), req.rinfo, (drop) => {
32 | if (!drop) {
33 | this.emit('download', req.rinfo, req.infohash);
34 | }
35 | });
36 | }
37 | }
38 |
39 | _download(rinfo, infohash) {
40 | this.activeConnections++;
41 |
42 | let successful = false;
43 | const socket = new net.Socket();
44 |
45 | socket.setTimeout(this.timeout || 5000);
46 |
47 | socket.connect(rinfo.port, rinfo.address, () => {
48 | const wire = new Wire(infohash);
49 | socket.pipe(wire).pipe(socket);
50 |
51 | wire.on('metadata', (metadata, infoHash) => {
52 | successful = true;
53 | this.emit('complete', metadata, infoHash, rinfo);
54 | socket.destroy();
55 | });
56 |
57 | wire.on('fail', () => {
58 | socket.destroy();
59 | });
60 |
61 | wire.sendHandshake();
62 | });
63 |
64 | socket.on('error', () => {
65 | socket.destroy();
66 | });
67 |
68 | socket.on('timeout', () => {
69 | socket.destroy();
70 | });
71 |
72 | socket.once('close', () => {
73 | this.activeConnections--;
74 | this._next(infohash, successful);
75 | });
76 | }
77 |
78 | add(rinfo, infohash) {
79 | this.peers.push({ infohash, rinfo });
80 | if (this.activeConnections < this.maxConnections && this.peers.length() > 0) {
81 | this._next();
82 | }
83 | }
84 |
85 | isIdle() {
86 | return this.peers.length() === 0;
87 | }
88 | }
89 |
90 | module.exports = BTClient;
91 |
--------------------------------------------------------------------------------
/lib/redis.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const redis = require('redis');
4 | const { USE_REDIS, REDIS_URI, REDIS_HASH_TTL } = require('../config/env');
5 |
6 | let redisClient = null;
7 |
8 | /**
9 | * Attempt to reconnect to Redis after a disconnection
10 | */
11 | function reconnectRedisClient(client) {
12 | console.log('Attempting to reconnect to Redis...');
13 | setTimeout(() => {
14 | client.connect().catch(err => {
15 | console.error('Redis reconnection error:', err);
16 | reconnectRedisClient(client); // Retry reconnection
17 | });
18 | }, 5000); // Retry after 5 seconds
19 | }
20 |
21 | /**
22 | * Redis client initialization and configuration
23 | */
24 | async function initializeRedis() {
25 | if (!USE_REDIS) {
26 | console.log('Redis is disabled');
27 | return null;
28 | }
29 |
30 | if (redisClient && redisClient.isOpen) {
31 | console.log('Using existing Redis connection.');
32 | return redisClient;
33 | }
34 |
35 | const client = redis.createClient({ url: REDIS_URI });
36 |
37 | client.on('error', err => console.error('Redis error:', err));
38 | client.on('end', () => {
39 | console.log('Redis client disconnected');
40 | redisClient = null; // Clear the client reference on disconnect
41 | reconnectRedisClient(client);
42 | });
43 |
44 | try {
45 | await client.connect();
46 | console.log('Redis client connected');
47 | redisClient = client; // Store the connected client
48 | return client;
49 | } catch (err) {
50 | console.error('Error connecting to Redis:', err);
51 | reconnectRedisClient(client); // Attempt reconnect even on initial failure
52 | // Return null or handle the error appropriately, maybe don't return the client if connect failed?
53 | return null;
54 | }
55 | }
56 |
57 | /**
58 | * Check if infohash exists in Redis
59 | */
60 | async function isInfohashInRedis(infohash, client) {
61 | if (!USE_REDIS || !client || !client.isOpen) {
62 | return false;
63 | }
64 |
65 | try {
66 | return await client.exists(`hashes:${infohash}`);
67 | } catch (err) {
68 | console.error('Error checking Redis for infohash:', err);
69 | return false;
70 | }
71 | }
72 |
73 | /**
74 | * Store infohash in Redis with TTL
75 | */
76 | async function storeInfohashInRedis(infohash, client) {
77 | if (!USE_REDIS || !client || !client.isOpen) {
78 | return;
79 | }
80 |
81 | try {
82 | await client.set(`hashes:${infohash}`, infohash, { EX: REDIS_HASH_TTL });
83 | } catch (err) {
84 | console.error('Error storing infohash in Redis:', err);
85 | }
86 | }
87 |
88 | function getRedisClient() {
89 | return redisClient;
90 | }
91 |
92 | async function closeRedis() {
93 | if (redisClient && redisClient.isOpen) {
94 | await redisClient.quit();
95 | console.log('Redis client closed');
96 | redisClient = null;
97 | }
98 | }
99 |
100 | module.exports = {
101 | initializeRedis,
102 | isInfohashInRedis,
103 | storeInfohashInRedis,
104 | getRedisClient,
105 | closeRedis
106 | };
--------------------------------------------------------------------------------
/app.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const http = require('http');
4 | const {
5 | validateEnvironment,
6 | SITE_PORT,
7 | RUN_DAEMON,
8 | RUN_WEBSERVER,
9 | USE_REDIS
10 | } = require('./config/env');
11 | const { initializeDatabase } = require('./lib/database');
12 | const { initializeRedis, closeRedis } = require('./lib/redis');
13 | const { initializeP2PSpider, startP2PSpider, closeP2PSpider } = require('./lib/p2p');
14 | const { initializeWebSocket } = require('./services/websocket');
15 | const { configureExpressApp } = require('./config/express');
16 |
17 | let server = null;
18 |
19 | /**
20 | * Graceful shutdown handler
21 | */
22 | async function gracefulShutdown() {
23 | console.log('\nReceived shutdown signal. Shutting down gracefully...');
24 |
25 | // Close HTTP server
26 | if (server) {
27 | await new Promise((resolve) => server.close(resolve));
28 | console.log('HTTP server closed.');
29 | }
30 |
31 | // Close P2P Spider
32 | if (RUN_DAEMON) {
33 | await new Promise((resolve) => closeP2PSpider(resolve));
34 | console.log('P2P Spider closed.');
35 | }
36 |
37 | // Close Redis connection
38 | if (USE_REDIS) {
39 | await closeRedis();
40 | // No log here, closeRedis logs internally
41 | }
42 |
43 | // Database connection is likely managed by mongoose/sqlite driver,
44 | // typically doesn't require explicit close on SIGINT unless specified.
45 |
46 | console.log('Graceful shutdown complete.');
47 | process.exit(0);
48 | }
49 |
50 | /**
51 | * Main application startup
52 | */
53 | async function main() {
54 | try {
55 | // Validate environment variables
56 | validateEnvironment();
57 |
58 | // Initialize Database
59 | const db = await initializeDatabase();
60 |
61 | // Initialize Redis (if enabled)
62 | const redisClient = await initializeRedis();
63 |
64 | // Initialize P2P Spider (if enabled)
65 | if (RUN_DAEMON) {
66 | initializeP2PSpider(db, redisClient);
67 | }
68 |
69 | // Initialize and start Webserver (if enabled)
70 | if (RUN_WEBSERVER) {
71 | const app = configureExpressApp(db); // Pass db if needed by routes
72 | server = http.createServer(app);
73 | initializeWebSocket(server, db); // Pass db to WebSocket service
74 |
75 | server.listen(SITE_PORT, () => {
76 | console.log(`Web server listening on port ${SITE_PORT}`);
77 | });
78 |
79 | server.on('error', (err) => {
80 | console.error('HTTP Server error:', err);
81 | process.exit(1);
82 | });
83 | } else {
84 | console.log('Webserver is disabled (RUN_WEBSERVER=false)');
85 | }
86 |
87 | // Start P2P Spider listening (if enabled)
88 | if (RUN_DAEMON) {
89 | startP2PSpider();
90 | } else {
91 | console.log('P2P Spider daemon is disabled (RUN_DAEMON=false)');
92 | }
93 |
94 | // Setup signal handlers for graceful shutdown
95 | process.on('SIGINT', gracefulShutdown);
96 | process.on('SIGTERM', gracefulShutdown);
97 |
98 | console.log('Application started successfully.');
99 |
100 | } catch (err) {
101 | console.error('Application startup failed:', err);
102 | process.exit(1);
103 | }
104 | }
105 |
106 | // Start the application
107 | main();
--------------------------------------------------------------------------------
/utils/bulkIndexToElasticsearch.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | require('dotenv').config();
4 | const { getDatabase, Database } = require('../models/db');
5 | const elasticsearch = require('../models/elasticsearch');
6 |
7 | const BATCH_SIZE = 1000; // Process in batches to avoid memory issues
8 |
9 | /**
10 | * Index all existing magnets from the database into Elasticsearch
11 | */
12 | async function bulkIndexAll() {
13 | try {
14 | console.log('Starting Elasticsearch bulk indexing process...');
15 |
16 | // Initialize Elasticsearch
17 | const esInitialized = await elasticsearch.initialize();
18 | if (!esInitialized) {
19 | console.error('Failed to initialize Elasticsearch. Please check your connection settings.');
20 | process.exit(1);
21 | }
22 |
23 | // Get database instance or create a new one if needed
24 | let db = getDatabase();
25 | if (!db) {
26 | console.log('No existing database instance found, creating a new one...');
27 | db = new Database();
28 | }
29 |
30 | // Connect to the database
31 | await db.connect();
32 | console.log(`Connected to ${db.type} database.`);
33 |
34 | // Get total count for progress tracking - use totalCount instead of countDocuments
35 | const totalCount = db.totalCount;
36 | console.log(`Found ${totalCount.toLocaleString()} documents to index`);
37 |
38 | let indexed = 0;
39 | let skip = 0;
40 | let successCount = 0;
41 | let errorCount = 0;
42 |
43 | // Process in batches
44 | while (true) {
45 | console.log(`Processing batch starting at offset ${skip}...`);
46 |
47 | // Fetch batch from database
48 | const batch = await db.find({}, {
49 | skip: skip,
50 | limit: BATCH_SIZE,
51 | sort: { fetchedAt: -1 }
52 | });
53 |
54 | if (!batch || batch.length === 0) {
55 | console.log('No more documents found. Indexing complete.');
56 | break;
57 | }
58 |
59 | console.log(`Indexing batch of ${batch.length} documents...`);
60 |
61 | // Process each document in the batch
62 | const promises = batch.map(async (doc) => {
63 | try {
64 | await elasticsearch.indexDocument(doc);
65 | return { success: true };
66 | } catch (err) {
67 | return {
68 | success: false,
69 | error: err.message,
70 | infohash: doc.infohash
71 | };
72 | }
73 | });
74 |
75 | // Wait for all promises to resolve
76 | const results = await Promise.all(promises);
77 |
78 | // Count results
79 | const batchSuccess = results.filter(r => r.success).length;
80 | const batchError = results.filter(r => !r.success).length;
81 |
82 | successCount += batchSuccess;
83 | errorCount += batchError;
84 | indexed += batch.length;
85 |
86 | const progress = Math.round((indexed / totalCount) * 100);
87 | console.log(`Progress: ${progress}% (${indexed.toLocaleString()}/${totalCount.toLocaleString()}) - Success: ${successCount}, Errors: ${errorCount}`);
88 |
89 | // Prepare for next batch
90 | skip += BATCH_SIZE;
91 | }
92 |
93 | console.log('Bulk indexing complete!');
94 | console.log(`Total documents processed: ${indexed.toLocaleString()}`);
95 | console.log(`Successfully indexed: ${successCount.toLocaleString()}`);
96 | console.log(`Failed to index: ${errorCount.toLocaleString()}`);
97 |
98 | // Exit the process
99 | process.exit(0);
100 | } catch (error) {
101 | console.error('Bulk indexing failed:', error);
102 | process.exit(1);
103 | }
104 | }
105 |
106 | // Run the bulk indexing function
107 | bulkIndexAll();
--------------------------------------------------------------------------------
/config/express.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const express = require('express');
4 | const path = require('path');
5 | const compression = require('compression');
6 | const routes = require('../routes/index');
7 | const { PRODUCTION, SITE_NAME } = require('./env');
8 | const { getWebSocketServerAddress } = require('../services/websocket');
9 |
10 | /**
11 | * Cache control middleware
12 | */
13 | function cacheControl(req, res, next) {
14 | // Static assets cache (adjust regex as needed)
15 | if (req.url.match(/\.(css|js|ico|jpg|jpeg|png|gif|woff|woff2|ttf|svg|eot)$/)) {
16 | res.set('Cache-Control', `public, max-age=${PRODUCTION ? 86400 : 0}`); // 1 day in prod, no cache in dev
17 | if (PRODUCTION) {
18 | res.set('Expires', new Date(Date.now() + 86400000).toUTCString());
19 | }
20 | }
21 | // HTML pages short cache (adjust as needed)
22 | else if (req.method === 'GET') {
23 | res.set('Cache-Control', 'public, max-age=60'); // 1 minute for dynamic content
24 | }
25 |
26 | next();
27 | }
28 |
29 | /**
30 | * Logging middleware
31 | */
32 | function loggingMiddleware(req, res, next) {
33 | const ip = req.headers['x-forwarded-for'] || req.socket.remoteAddress;
34 | res.locals.ip = ip; // Make IP available to routes if needed
35 |
36 | // Skip logging for static assets in production for cleaner logs
37 | if (PRODUCTION && req.originalUrl.startsWith('/public/')) {
38 | return next();
39 | }
40 |
41 | console.log('\x1b[36m%s\x1b[0m', `REQ FROM: ${ip} ON: ${req.method} ${req.originalUrl}`);
42 | next();
43 | }
44 |
45 | /**
46 | * Express app configuration
47 | */
48 | function configureExpressApp(db) { // Pass DB if needed by routes/middleware later
49 | const app = express();
50 |
51 | // Use Helmet for basic security headers (recommended)
52 | // const helmet = require('helmet');
53 | // app.use(helmet());
54 |
55 | // Compression middleware
56 | app.use(compression());
57 |
58 | // Cache control middleware
59 | app.use(cacheControl);
60 |
61 | // Body parser middleware
62 | app.use(express.json());
63 | app.use(express.urlencoded({ extended: true }));
64 |
65 | // Static files serving
66 | app.use('/public', express.static(path.join(__dirname, '..', 'public'), {
67 | maxAge: PRODUCTION ? '1d' : 0, // Match cacheControl duration
68 | etag: true // Enable ETag generation
69 | }));
70 |
71 | // View engine setup
72 | app.set('views', path.join(__dirname, '..', 'views')); // Correct path to views
73 | app.set('view engine', 'ejs');
74 |
75 | // Logging middleware
76 | app.use(loggingMiddleware);
77 |
78 | // Development settings
79 | if (!PRODUCTION) {
80 | app.locals.pretty = true; // Pretty print HTML in development
81 | }
82 |
83 | // Global template variables & Routes
84 | app.use((req, res, next) => {
85 | res.locals.wsServerAddress = getWebSocketServerAddress();
86 | res.locals.site_name = SITE_NAME;
87 | // Pass db instance to request locals if needed by routes
88 | // res.locals.db = db;
89 | next();
90 | });
91 |
92 | // Mount main application routes
93 | app.use('/', routes);
94 |
95 | // Basic 404 handler
96 | app.use((req, res, next) => {
97 | res.status(404).render('404', { title: 'Not Found' }); // Assuming a 404.ejs view exists
98 | });
99 |
100 | // Basic error handler
101 | app.use((err, req, res, next) => {
102 | console.error('Express Error:', err.stack);
103 | res.status(500).render('error', {
104 | title: 'Server Error',
105 | message: PRODUCTION ? 'An unexpected error occurred' : err.message,
106 | error: PRODUCTION ? {} : err // Only show stack trace in development
107 | }); // Assuming an error.ejs view exists
108 | });
109 |
110 | return app;
111 | }
112 |
113 | module.exports = { configureExpressApp };
--------------------------------------------------------------------------------
/views/searchform.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%- include('includes/header') %>
4 |
5 | <%- include('includes/navbar') %>
6 |
7 |
8 |
9 |
10 |
11 | Search Magnets
12 |
14 |
15 |
16 |
17 |
18 | Search through millions of magnet links discovered from the BitTorrent DHT network
19 |
20 |
21 |
42 |
43 |
44 |
51 |
52 |
59 |
60 |
67 |
68 |
69 |
70 |
71 |
72 | <%- include('includes/footer') %>
73 |
74 |
--------------------------------------------------------------------------------
/reset_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # WARNING: This script deletes data from the database, Redis, and Elasticsearch.
4 | # Make sure you understand what it does before running.
5 |
6 | # --- Configuration ---
7 | # Load .env file if it exists in the current directory
8 | if [ -f .env ]; then
9 | echo "Loading environment variables from .env file..."
10 | export $(grep -v '^#' .env | xargs)
11 | fi
12 |
13 | # Use environment variables or defaults from project files
14 | DB_TYPE=${DB_TYPE:-sqlite} # Get DB type (though we only handle sqlite deletion here)
15 | SQLITE_DB_PATH=${SQLITE_PATH:-./data/magnet.db}
16 | REDIS_URI=${REDIS_URI:-redis://127.0.0.1:6379} # Default if not in .env
17 | ELASTICSEARCH_NODE=${ELASTICSEARCH_NODE:-http://localhost:9200} # Default if not in .env
18 | ELASTICSEARCH_INDEX=${ELASTICSEARCH_INDEX:-magnets} # Default if not in .env
19 |
20 | # Elasticsearch index mapping (copied from models/elasticsearch/index.js)
21 | ES_MAPPING='{
22 | "mappings": {
23 | "properties": {
24 | "name": {
25 | "type": "text",
26 | "analyzer": "standard",
27 | "fields": {
28 | "keyword": {
29 | "type": "keyword",
30 | "ignore_above": 256
31 | }
32 | }
33 | },
34 | "infohash": {
35 | "type": "keyword"
36 | },
37 | "magnet": {
38 | "type": "keyword"
39 | },
40 | "files": {
41 | "type": "text",
42 | "analyzer": "standard"
43 | },
44 | "fetchedAt": {
45 | "type": "date",
46 | "format": "epoch_millis"
47 | }
48 | }
49 | }
50 | }'
51 |
52 | # --- Safety Check ---
53 | read -p "This script will DELETE ALL data in SQLite DB (${SQLITE_DB_PATH}), Redis (${REDIS_URI}), and ES Index (${ELASTICSEARCH_NODE}/${ELASTICSEARCH_INDEX}). Are you sure? (y/N): " confirm && [[ $confirm == [yY] || $confirm == [yY][eE][sS] ]] || exit 1
54 |
55 | # --- Actions ---
56 |
57 | # 1. Delete Database Files
58 | if [ "$DB_TYPE" = "sqlite" ]; then
59 | echo "1. Deleting SQLite database files (${SQLITE_DB_PATH}*)..."
60 | # Extract directory from path
61 | DB_DIR=$(dirname "$SQLITE_DB_PATH")
62 | DB_BASE=$(basename "$SQLITE_DB_PATH")
63 | # Remove base file and potential journal/wal files (-v for verbose, -f to ignore non-existent)
64 | rm -vf "$DB_DIR/$DB_BASE"*
65 | echo "SQLite files removed."
66 | else
67 | echo "1. Skipping database file deletion (DB_TYPE is not sqlite)."
68 | fi
69 | echo "---"
70 |
71 | # 2. Clear Redis
72 | echo "2. Clearing Redis database (FLUSHALL on ${REDIS_URI})..."
73 | # Use -u URI if available
74 | # Note: Requires redis-cli that supports -u flag. Might need adjustment based on version.
75 | if redis-cli -u "$REDIS_URI" PING > /dev/null 2>&1; then
76 | redis-cli -u "$REDIS_URI" FLUSHALL
77 | if [ $? -eq 0 ]; then
78 | echo "Redis flushed successfully using URI: $REDIS_URI"
79 | else
80 | echo "Error: Failed to flush Redis using URI: $REDIS_URI"
81 | fi
82 | elif redis-cli PING > /dev/null 2>&1; then
83 | echo "Warning: Could not ping Redis with URI, trying default connection (127.0.0.1:6379)..."
84 | redis-cli FLUSHALL
85 | if [ $? -eq 0 ]; then
86 | echo "Redis flushed successfully using default connection."
87 | else
88 | echo "Error: Failed to flush Redis using default connection."
89 | fi
90 | else
91 | echo "Error: Failed to connect to Redis (tried URI and default). Skipping flush."
92 | fi
93 | echo "---"
94 |
95 | # 3. Delete Elasticsearch Index
96 | echo "3. Deleting Elasticsearch index: ${ELASTICSEARCH_NODE}/${ELASTICSEARCH_INDEX}..."
97 | # Define the simple delete URL
98 | DELETE_URL="${ELASTICSEARCH_NODE}/${ELASTICSEARCH_INDEX}"
99 | # Pass the variable with double quotes (no -g needed now)
100 | DELETE_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -XDELETE "$DELETE_URL")
101 | # Check only for 200 (success) or allow 404 (already deleted) as acceptable
102 | if [ "$DELETE_STATUS" = "200" ]; then
103 | echo "Elasticsearch index deleted successfully. Status: $DELETE_STATUS"
104 | elif [ "$DELETE_STATUS" = "404" ]; then
105 | echo "Elasticsearch index did not exist. Status: $DELETE_STATUS"
106 | else
107 | echo "Error deleting Elasticsearch index. Status: $DELETE_STATUS"
108 | # Use double quotes here too
109 | curl -XDELETE "$DELETE_URL" # Show error output
110 | fi
111 | echo "---"
112 |
113 | # 4. Recreate Elasticsearch Index
114 | echo "4. Recreating Elasticsearch index: ${ELASTICSEARCH_NODE}/${ELASTICSEARCH_INDEX}..."
115 | CREATE_URL="${ELASTICSEARCH_NODE}/${ELASTICSEARCH_INDEX}"
116 | CREATE_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -XPUT "$CREATE_URL" -H 'Content-Type: application/json' -d "$ES_MAPPING")
117 | if [ "$CREATE_STATUS" = "200" ]; then
118 | echo "Elasticsearch index recreated successfully. Status: $CREATE_STATUS"
119 | else
120 | echo "Error recreating Elasticsearch index. Status: $CREATE_STATUS"
121 | curl -XPUT "$CREATE_URL" -H 'Content-Type: application/json' -d "$ES_MAPPING"
122 | fi
123 | echo "---"
124 |
125 | echo "Reset script finished."
--------------------------------------------------------------------------------
/lib/p2p.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const P2PSpider = require('./index'); // Assuming lib/index.js exports the P2PSpider class
4 | const { isInfohashInRedis, storeInfohashInRedis } = require('./redis');
5 | const { broadcastNewMagnet } = require('../services/websocket');
6 | const { P2P_PORT, P2P_HOST } = require('../config/env');
7 |
8 | let p2pInstance = null;
9 |
10 | /**
11 | * Process and store metadata
12 | */
13 | async function processMetadata(metadata, db, redisClient) {
14 | const { infohash, info, magnet } = metadata;
15 |
16 | // Extract data from metadata
17 | const name = info.name ? info.name.toString() : '';
18 |
19 | // Extract files with their sizes
20 | let files = [];
21 | let totalSize = 0;
22 |
23 | if (info.files && Array.isArray(info.files)) {
24 | // Multi-file torrent
25 | files = info.files.map(file => {
26 | const filePath = file.path ? file.path.toString() : '';
27 | const fileSize = file.length || 0;
28 | totalSize += fileSize;
29 | return {
30 | path: filePath,
31 | size: fileSize
32 | };
33 | }).sort((a, b) => a.path.localeCompare(b.path));
34 | } else if (info.name) {
35 | // Single file torrent
36 | const fileSize = info.length || 0;
37 | totalSize += fileSize;
38 | files = [{
39 | path: info.name.toString(),
40 | size: fileSize
41 | }];
42 | }
43 |
44 | const fetchedAt = Date.now();
45 |
46 | try {
47 | // Check if metadata exists in database
48 | const existingMagnet = await db.findOne({ infohash });
49 |
50 | if (!existingMagnet) {
51 | // Save to database
52 | await db.saveMagnet({
53 | name,
54 | infohash,
55 | magnet,
56 | files,
57 | totalSize,
58 | fetchedAt
59 | });
60 | console.log('Added to database:', name);
61 |
62 | // Store in Redis with TTL
63 | await storeInfohashInRedis(infohash, redisClient);
64 |
65 | // Broadcast new magnet via WebSocket
66 | // No need to pass count, websocket service will get it from db instance
67 | await broadcastNewMagnet({
68 | name,
69 | infohash,
70 | files,
71 | totalSize,
72 | fetchedAt
73 | });
74 | } else {
75 | // console.log(`Metadata for infohash ${infohash} already exists in database.`);
76 | // Optionally update fetchedAt timestamp if needed
77 | // await db.updateMagnet({ infohash }, { fetchedAt: Date.now() });
78 | }
79 | } catch (err) {
80 | console.error('Error processing metadata:', err);
81 | // Decide if we should throw or just log
82 | // throw err;
83 | }
84 | }
85 |
86 | /**
87 | * Handle metadata event from P2P Spider
88 | */
89 | async function handleMetadata(metadata, rinfo, db, redisClient) {
90 | const { infohash } = metadata;
91 |
92 | try {
93 | // Check if infohash exists in Redis
94 | if (await isInfohashInRedis(infohash, redisClient)) {
95 | // console.log(`Metadata for infohash ${infohash} has already been seen recently (Redis).`);
96 | return;
97 | }
98 |
99 | // Process metadata
100 | await processMetadata(metadata, db, redisClient);
101 |
102 | } catch (err) {
103 | console.error(`Error handling metadata for ${infohash}:`, err);
104 | }
105 | }
106 |
107 | /**
108 | * Initialize and configure P2P Spider
109 | */
110 | function initializeP2PSpider(db, redisClient) {
111 | if (p2pInstance) {
112 | console.log('P2P Spider already initialized.');
113 | return p2pInstance;
114 | }
115 |
116 | p2pInstance = new P2PSpider({
117 | nodesMaxSize: 250, // Consider making these configurable via env
118 | maxConnections: 500,
119 | timeout: 1000
120 | });
121 |
122 | p2pInstance.on('metadata', async (metadata, rinfo) => {
123 | // Pass db and redisClient to the handler
124 | await handleMetadata(metadata, rinfo, db, redisClient);
125 | });
126 |
127 | // Log errors from the spider
128 | p2pInstance.on('error', (err) => {
129 | console.error('P2P Spider Error:', err);
130 | });
131 |
132 | console.log('P2P Spider Initialized');
133 | return p2pInstance;
134 | }
135 |
136 | /**
137 | * Start listening with the P2P Spider
138 | */
139 | function startP2PSpider() {
140 | if (!p2pInstance) {
141 | console.error('P2P Spider not initialized before starting.');
142 | return;
143 | }
144 | p2pInstance.listen(P2P_PORT, P2P_HOST, () => {
145 | console.log(`P2P Spider is listening on ${P2P_HOST}:${P2P_PORT}!`);
146 | });
147 | }
148 |
149 | /**
150 | * Close the P2P Spider
151 | */
152 | function closeP2PSpider(callback) {
153 | if (p2pInstance) {
154 | p2pInstance.close(callback);
155 | p2pInstance = null; // Clear the instance
156 | } else if (callback) {
157 | callback(); // Call callback even if not initialized
158 | }
159 | }
160 |
161 | module.exports = {
162 | initializeP2PSpider,
163 | startP2PSpider,
164 | closeP2PSpider
165 | };
--------------------------------------------------------------------------------
/public/css/directory-tree.css:
--------------------------------------------------------------------------------
1 | /* Directory tree view styles */
2 | .directory-tree .flex.items-start,
3 | .directory-tree .flex.items-center {
4 | transition: background-color 0.2s ease;
5 | border-radius: 0.25rem;
6 | margin-bottom: 0.125rem;
7 | position: relative;
8 | }
9 | .directory-tree .flex.items-start:hover,
10 | .directory-tree .flex.items-center:hover {
11 | background-color: rgba(156, 163, 175, 0.1);
12 | }
13 | .directory-tree .folder-toggle {
14 | cursor: pointer;
15 | z-index: 1;
16 | /* Improve click area */
17 | padding: 2px;
18 | margin: -2px;
19 | user-select: none;
20 | }
21 | .directory-tree .folder-icon {
22 | transition: transform 0.2s ease, color 0.2s ease;
23 | pointer-events: none; /* Prevent icon from capturing clicks */
24 | }
25 | .directory-tree .folder-toggle:hover .folder-icon {
26 | transform: scale(1.1);
27 | }
28 | .directory-tree .folder-toggle[data-processing="true"] {
29 | pointer-events: none; /* Prevent clicks during processing */
30 | }
31 | .directory-item {
32 | transition: none !important; /* Disable transitions for better reliability */
33 | transform-origin: top left;
34 | max-height: 50px; /* Default height for items */
35 | overflow: hidden;
36 | will-change: transform, opacity, max-height;
37 | pointer-events: auto; /* Ensure items are clickable when visible */
38 | }
39 | .directory-item.collapsed {
40 | max-height: 0 !important;
41 | opacity: 0 !important;
42 | margin-top: 0 !important;
43 | margin-bottom: 0 !important;
44 | padding-top: 0 !important;
45 | padding-bottom: 0 !important;
46 | transform: translateY(-5px) !important;
47 | pointer-events: none !important;
48 | transition: none !important; /* Use no transition when collapsing for instant effect */
49 | display: none !important;
50 | visibility: hidden !important;
51 | height: 0 !important;
52 | position: absolute !important;
53 | z-index: -1 !important;
54 | clip: rect(0, 0, 0, 0) !important; /* Additional way to hide content */
55 | overflow: hidden !important;
56 | }
57 |
58 | /* Define a group container for folder contents */
59 | .folder-contents {
60 | overflow: hidden;
61 | transition: none !important; /* Disable transitions for better reliability */
62 | max-height: none; /* Don't limit height */
63 | opacity: 1;
64 | position: relative;
65 | display: block;
66 | will-change: opacity, display;
67 | }
68 | .folder-contents.collapsed {
69 | max-height: 0 !important;
70 | opacity: 0 !important;
71 | margin: 0 !important;
72 | padding: 0 !important;
73 | overflow: hidden !important;
74 | transition: none !important; /* Use no transition when collapsing for instant effect */
75 | display: none !important;
76 | visibility: hidden !important;
77 | height: 0 !important;
78 | position: absolute !important;
79 | z-index: -1 !important;
80 | clip: rect(0, 0, 0, 0) !important; /* Additional way to hide content */
81 | pointer-events: none !important;
82 | }
83 |
84 | /* Add visual feedback effect when expanding/collapsing */
85 | @keyframes pulse {
86 | 0% { background-color: transparent; }
87 | 50% { background-color: rgba(59, 130, 246, 0.08); }
88 | 100% { background-color: transparent; }
89 | }
90 | .folder-toggle.active {
91 | animation: pulse 0.3s ease; /* Faster feedback */
92 | pointer-events: none; /* Prevent additional clicks during animation */
93 | }
94 |
95 | /* Make folder icons rotate slightly when toggling */
96 | .folder-icon {
97 | transition: transform 0.2s ease;
98 | }
99 | .fa-folder-open {
100 | transform: rotateZ(-5deg);
101 | }
102 | .fa-folder {
103 | transform: rotateZ(0);
104 | }
105 |
106 | /* Tree lines for better visual hierarchy */
107 | .directory-tree .flex.items-start::before,
108 | .directory-tree .flex.items-center::before {
109 | content: '';
110 | position: absolute;
111 | top: 0;
112 | bottom: 0;
113 | left: calc(var(--indent-level, 0.5) * 1rem - 0.75rem);
114 | width: 1px;
115 | background-color: rgba(209, 213, 219, 0.6);
116 | pointer-events: none; /* Ensure line doesn't interfere with clicks */
117 | }
118 | .directory-tree .flex.items-start::after,
119 | .directory-tree .flex.items-center::after {
120 | content: '';
121 | position: absolute;
122 | top: 1rem;
123 | left: calc(var(--indent-level, 0.5) * 1rem - 0.75rem);
124 | width: 0.75rem;
125 | height: 1px;
126 | background-color: rgba(209, 213, 219, 0.6);
127 | pointer-events: none; /* Ensure line doesn't interfere with clicks */
128 | }
129 | /* Don't show vertical line for last items */
130 | .directory-tree .flex.items-start:last-child::before,
131 | .directory-tree .flex.items-start.last-at-level::before,
132 | .directory-tree .flex.items-center:last-child::before,
133 | .directory-tree .flex.items-center.last-at-level::before {
134 | height: 1rem;
135 | }
136 | /* Don't show lines for root items */
137 | .directory-tree > .flex.items-start::before,
138 | .directory-tree > .flex.items-start::after,
139 | .directory-tree > .flex.items-center::before,
140 | .directory-tree > .flex.items-center::after {
141 | display: none;
142 | }
143 |
144 | /* Ensure file size is aligned with file name */
145 | .directory-tree .text-dark-400.text-xs {
146 | display: flex;
147 | align-items: center;
148 | line-height: 1;
149 | }
--------------------------------------------------------------------------------
/views/includes/navbar.ejs:
--------------------------------------------------------------------------------
1 |
2 |
89 |
--------------------------------------------------------------------------------
/services/websocket.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const WebSocket = require('ws');
4 | const { SITE_HOSTNAME } = require('../config/env');
5 |
6 | let wss = null;
7 | let dbInstance = null; // Store db instance
8 |
9 | /**
10 | * Initialize WebSocket server
11 | */
12 | function initializeWebSocket(server, db) {
13 | if (wss) {
14 | console.log('WebSocket server already initialized.');
15 | return wss;
16 | }
17 |
18 | dbInstance = db; // Store database instance for later use
19 | wss = new WebSocket.Server({ server });
20 |
21 | wss.on('connection', async (ws) => {
22 | console.log('WebSocket connection established');
23 |
24 | // Set client properties
25 | ws.isAlive = true;
26 | ws.lastUpdate = 0;
27 |
28 | // Handle pings to keep connection alive
29 | ws.on('pong', () => {
30 | ws.isAlive = true;
31 | });
32 |
33 | // Handle messages
34 | ws.on('message', message => {
35 | // Process messages if needed (e.g., client requests)
36 | console.log('Received ws message:', message);
37 | });
38 |
39 | ws.on('close', () => {
40 | console.log('WebSocket connection closed');
41 | });
42 |
43 | ws.on('error', (error) => {
44 | console.error('WebSocket error:', error);
45 | });
46 |
47 | try {
48 | // Send initial count
49 | await sendCountToClient(ws);
50 | } catch (err) {
51 | console.error('Error in WebSocket connection handler:', err);
52 | }
53 | });
54 |
55 | // Set up ping interval to keep connections alive and clean up dead connections
56 | const pingInterval = setInterval(() => {
57 | wss.clients.forEach(ws => {
58 | if (ws.isAlive === false) {
59 | console.log('Terminating dead WebSocket connection');
60 | return ws.terminate();
61 | }
62 |
63 | ws.isAlive = false;
64 | ws.ping(() => {});
65 | });
66 | }, 30000); // 30 seconds
67 |
68 | wss.on('close', () => {
69 | console.log('WebSocket server closing, clearing ping interval.');
70 | clearInterval(pingInterval);
71 | wss = null; // Clear the instance
72 | dbInstance = null;
73 | });
74 |
75 | console.log('WebSocket server initialized');
76 | return wss;
77 | }
78 |
79 | /**
80 | * Send count to a specific client
81 | */
82 | async function sendCountToClient(ws) {
83 | if (!dbInstance) {
84 | console.error('Cannot send count, DB instance not available in WebSocket service.');
85 | return;
86 | }
87 | try {
88 | // Use cached count from DB instance
89 | const count = dbInstance.totalCount;
90 | if (ws.readyState === WebSocket.OPEN) {
91 | ws.send(JSON.stringify({ eventType: 'count_update', data: { count } }));
92 | }
93 | } catch (err) {
94 | console.error('Error sending count to WebSocket client:', err);
95 | }
96 | }
97 |
98 | /**
99 | * Broadcast a message to all connected clients
100 | */
101 | function broadcastToClients(message) {
102 | if (!wss) return;
103 |
104 | const messageStr = JSON.stringify(message);
105 | const currentTime = Date.now();
106 |
107 | wss.clients.forEach(client => {
108 | if (client.readyState === WebSocket.OPEN) {
109 | // Throttle updates per client (e.g., allow max 1 update per 200ms)
110 | if (currentTime - (client.lastUpdate || 0) > 200) {
111 | client.send(messageStr);
112 | client.lastUpdate = currentTime;
113 | }
114 | }
115 | });
116 | }
117 |
118 | /**
119 | * Broadcast new magnet discovery via WebSocket
120 | */
121 | async function broadcastNewMagnet(magnetData) {
122 | if (!dbInstance) {
123 | console.error('Cannot broadcast magnet, DB instance not available.');
124 | return;
125 | }
126 | try {
127 | const message = {
128 | eventType: 'new_magnet',
129 | // Ensure count is included from the database instance
130 | data: { ...magnetData, count: dbInstance.totalCount }
131 | };
132 | broadcastToClients(message);
133 | } catch (err) {
134 | console.error('Error broadcasting new magnet:', err);
135 | }
136 | }
137 |
138 | /**
139 | * Update all connected clients with latest count
140 | */
141 | async function updateAllClientsCount() {
142 | if (!dbInstance) {
143 | console.error('Cannot update client count, DB instance not available.');
144 | return;
145 | }
146 | try {
147 | const count = dbInstance.totalCount;
148 | broadcastToClients({ eventType: 'count_update', data: { count } });
149 | } catch (err) {
150 | console.error('Error updating WebSocket clients count:', err);
151 | }
152 | }
153 |
154 |
155 | /**
156 | * Get the WebSocket server address based on SITE_HOSTNAME
157 | */
158 | function getWebSocketServerAddress() {
159 | let wsAddress = SITE_HOSTNAME;
160 | if (wsAddress && !wsAddress.startsWith('ws')) {
161 | // Convert http:// to ws:// or https:// to wss://
162 | wsAddress = wsAddress.replace(/^http/, 'ws');
163 | }
164 | return wsAddress;
165 | }
166 |
167 | function getWssInstance() {
168 | return wss;
169 | }
170 |
171 | module.exports = {
172 | initializeWebSocket,
173 | broadcastNewMagnet,
174 | // sendCountToClient, // Likely only needed internally or on connection
175 | updateAllClientsCount,
176 | // broadcastToClients, // Might be internal helper
177 | getWebSocketServerAddress,
178 | getWssInstance
179 | };
--------------------------------------------------------------------------------
/lib/dhtspider.js:
--------------------------------------------------------------------------------
1 | 'use strict'
2 |
3 | var dgram = require('dgram');
4 |
5 | var bencode = require('bencode');
6 |
7 | var utils = require('./utils');
8 | var KTable = require('./ktable');
9 |
10 | var BOOTSTRAP_NODES = [
11 | ['router.bittorrent.com', 6881],
12 | ['dht.transmissionbt.com', 6881],
13 | ['router.utorrent.com', 6881],
14 | ['router.bitcomet.com', 6881],
15 | ['dht.aelitis.com', 6881],
16 | ['bootstrap.jami.net', 6881]
17 | ];
18 | var TID_LENGTH = 4;
19 | var NODES_MAX_SIZE = 4200;
20 | var TOKEN_LENGTH = 2;
21 |
22 | var DHTSpider = function(options) {
23 | this.btclient = options.btclient;
24 | this.address = options.address;
25 | this.port = options.port;
26 | this.udp = dgram.createSocket('udp4');
27 | this.ktable = new KTable(options.nodesMaxSize || NODES_MAX_SIZE);
28 | }
29 |
30 | DHTSpider.prototype.sendKRPC = function(msg, rinfo) {
31 | try {
32 | var buf = bencode.encode(msg);
33 | }
34 | catch (err) {
35 | return;
36 | }
37 | this.udp.send(buf, 0, buf.length, rinfo.port, rinfo.address);
38 | };
39 |
40 | DHTSpider.prototype.onFindNodeResponse = function(nodes) {
41 | var nodes = utils.decodeNodes(nodes);
42 | nodes.forEach(function(node) {
43 | if (node.address != this.address && node.nid != this.ktable.nid
44 | && node.port < 65536 && node.port > 0) {
45 | this.ktable.push(node);
46 | }
47 | }.bind(this));
48 | };
49 |
50 | DHTSpider.prototype.sendFindNodeRequest = function(rinfo, nid) {
51 | var _nid = nid != undefined ? utils.genNeighborID(nid, this.ktable.nid) : this.ktable.nid;
52 | var msg = {
53 | t: utils.randomID().slice(0, TID_LENGTH),
54 | y: 'q',
55 | q: 'find_node',
56 | a: {
57 | id: _nid,
58 | target: utils.randomID()
59 | }
60 | };
61 | this.sendKRPC(msg, rinfo);
62 | };
63 |
64 | DHTSpider.prototype.joinDHTNetwork = function() {
65 | BOOTSTRAP_NODES.forEach(function(node) {
66 | this.sendFindNodeRequest({address: node[0], port: node[1]});
67 | }.bind(this));
68 | };
69 |
70 | DHTSpider.prototype.makeNeighbours = function() {
71 | this.ktable.nodes.forEach(function(node) {
72 | this.sendFindNodeRequest({
73 | address: node.address,
74 | port: node.port
75 | }, node.nid);
76 | }.bind(this));
77 | this.ktable.nodes = [];
78 | };
79 |
80 | DHTSpider.prototype.onGetPeersRequest = function(msg, rinfo) {
81 | try {
82 | var infohash = msg.a.info_hash;
83 | var tid = msg.t;
84 | var nid = msg.a.id;
85 | var token = infohash.slice(0, TOKEN_LENGTH);
86 |
87 | if (tid === undefined || infohash.length != 20 || nid.length != 20) {
88 | throw new Error;
89 | }
90 | }
91 | catch (err) {
92 | return;
93 | }
94 | this.sendKRPC({
95 | t: tid,
96 | y: 'r',
97 | r: {
98 | id: utils.genNeighborID(infohash, this.ktable.nid),
99 | nodes: '',
100 | token: token
101 | }
102 | }, rinfo);
103 | };
104 |
105 | DHTSpider.prototype.onAnnouncePeerRequest = function(msg, rinfo) {
106 | var port;
107 |
108 | try {
109 | var infohash = msg.a.info_hash;
110 | var token = msg.a.token;
111 | var nid = msg.a.id;
112 | var tid = msg.t;
113 |
114 | if (tid == undefined) {
115 | throw new Error;
116 | }
117 | }
118 | catch (err) {
119 | return;
120 | }
121 |
122 | if (infohash.slice(0, TOKEN_LENGTH).toString() != token.toString()) {
123 | return;
124 | }
125 |
126 | if (msg.a.implied_port != undefined && msg.a.implied_port != 0) {
127 | port = rinfo.port;
128 | }
129 | else {
130 | port = msg.a.port || 0;
131 | }
132 |
133 | if (port >= 65536 || port <= 0) {
134 | return;
135 | }
136 |
137 | this.sendKRPC({
138 | t: tid,
139 | y: 'r',
140 | r: {
141 | id: utils.genNeighborID(nid, this.ktable.nid)
142 | }
143 | }, rinfo);
144 |
145 | this.btclient.add({address: rinfo.address, port: port}, infohash);
146 | };
147 |
148 | DHTSpider.prototype.onMessage = function(msg, rinfo) {
149 | try {
150 | var msg = bencode.decode(msg);
151 | if (msg.y == 'r' && msg.r.nodes) {
152 | this.onFindNodeResponse(msg.r.nodes);
153 | }
154 | else if (msg.y == 'q' && msg.q == 'get_peers') {
155 | this.onGetPeersRequest(msg, rinfo);
156 | }
157 | else if (msg.y == 'q' && msg.q == 'announce_peer') {
158 | this.onAnnouncePeerRequest(msg, rinfo);
159 | }
160 | }
161 | catch (err) {
162 | }
163 | };
164 |
165 | DHTSpider.prototype.start = function() {
166 | this.udp.bind(this.port, this.address);
167 |
168 | this.udp.on('listening', function() {
169 | console.log('UDP Server listening on %s:%s', this.address, this.port);
170 | }.bind(this));
171 |
172 | this.udp.on('message', function(msg, rinfo) {
173 | this.onMessage(msg, rinfo);
174 | }.bind(this));
175 |
176 | this.udp.on('error', function() {
177 | // do nothing
178 | }.bind(this));
179 |
180 | setInterval(function() {
181 | if (this.btclient.isIdle()) {
182 | this.joinDHTNetwork();
183 | this.makeNeighbours();
184 | }
185 | }.bind(this), 1000);
186 | };
187 |
188 | exports.start = function(options) {
189 | const spider = new DHTSpider(options);
190 | spider.start();
191 | return spider;
192 | };
193 |
--------------------------------------------------------------------------------
/views/infohash.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%- include('includes/header') %>
4 |
5 | <%- include('includes/navbar') %>
6 |
7 |
8 |
103 |
104 |
105 | <%- include('includes/footer') %>
106 |
107 |
108 |
--------------------------------------------------------------------------------
/lib/wire.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { Duplex } = require('stream');
4 | const crypto = require('crypto');
5 | const BitField = require('bitfield');
6 | const bencode = require('bencode');
7 | const utils = require('./utils');
8 |
9 | const BT_RESERVED = Buffer.from([0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01]);
10 | const BT_PROTOCOL = Buffer.from('BitTorrent protocol');
11 | const PIECE_LENGTH = 2 ** 14;
12 | const MAX_METADATA_SIZE = 10000000;
13 | const BITFIELD_GROW = 1000;
14 | const EXT_HANDSHAKE_ID = 0;
15 | const BT_MSG_ID = 20;
16 |
17 | class Wire extends Duplex {
18 | constructor(infohash) {
19 | super();
20 | this._bitfield = new BitField(0, { grow: BITFIELD_GROW });
21 | this._infohash = infohash;
22 |
23 | this._buffer = [];
24 | this._bufferSize = 0;
25 |
26 | this._next = null;
27 | this._nextSize = 0;
28 |
29 | this._metadata = null;
30 | this._metadataSize = null;
31 | this._numPieces = 0;
32 | this._ut_metadata = null;
33 |
34 | this._onHandshake();
35 | }
36 |
37 | _onMessageLength(buffer) {
38 | if (buffer.length >= 4) {
39 | const length = buffer.readUInt32BE(0);
40 | if (length > 0) {
41 | this._register(length, this._onMessage);
42 | }
43 | }
44 | }
45 |
46 | _onMessage(buffer) {
47 | this._register(4, this._onMessageLength);
48 | if (buffer[0] === BT_MSG_ID) {
49 | this._onExtended(buffer.readUInt8(1), buffer.slice(2));
50 | }
51 | }
52 |
53 | _onExtended(ext, buf) {
54 | if (ext === 0) {
55 | try {
56 | this._onExtHandshake(bencode.decode(buf));
57 | } catch (err) {
58 | this._fail();
59 | }
60 | } else {
61 | this._onPiece(buf);
62 | }
63 | }
64 |
65 | _register(size, next) {
66 | this._nextSize = size;
67 | this._next = next;
68 | }
69 |
70 | end(...args) {
71 | super.end(...args);
72 | }
73 |
74 | _onHandshake() {
75 | this._register(1, buffer => {
76 | if (buffer.length === 0) {
77 | this.end();
78 | return this._fail();
79 | }
80 | const pstrlen = buffer.readUInt8(0);
81 | this._register(pstrlen + 48, handshake => {
82 | const protocol = handshake.slice(0, pstrlen);
83 | if (protocol.toString() !== BT_PROTOCOL.toString()) {
84 | this.end();
85 | this._fail();
86 | return;
87 | }
88 | handshake = handshake.slice(pstrlen);
89 | if (handshake[5] & 0x10) {
90 | this._register(4, this._onMessageLength);
91 | this._sendExtHandshake();
92 | } else {
93 | this._fail();
94 | }
95 | });
96 | });
97 | }
98 |
99 | _onExtHandshake(extHandshake) {
100 | if (!extHandshake.metadata_size || !extHandshake.m.ut_metadata
101 | || extHandshake.metadata_size > MAX_METADATA_SIZE) {
102 | this._fail();
103 | return;
104 | }
105 |
106 | this._metadataSize = extHandshake.metadata_size;
107 | this._numPieces = Math.ceil(this._metadataSize / PIECE_LENGTH);
108 | this._ut_metadata = extHandshake.m.ut_metadata;
109 |
110 | this._requestPieces();
111 | }
112 |
113 | _requestPieces() {
114 | this._metadata = Buffer.alloc(this._metadataSize);
115 | for (let piece = 0; piece < this._numPieces; piece++) {
116 | this._requestPiece(piece);
117 | }
118 | }
119 |
120 | _requestPiece(piece) {
121 | const msg = Buffer.concat([
122 | Buffer.from([BT_MSG_ID]),
123 | Buffer.from([this._ut_metadata]),
124 | bencode.encode({ msg_type: 0, piece })
125 | ]);
126 | this._sendMessage(msg);
127 | }
128 |
129 | _sendPacket(packet) {
130 | this.push(packet);
131 | }
132 |
133 | _sendMessage(msg) {
134 | const buf = Buffer.alloc(4);
135 | buf.writeUInt32BE(msg.length, 0);
136 | this._sendPacket(Buffer.concat([buf, msg]));
137 | }
138 |
139 | sendHandshake() {
140 | const peerID = utils.randomID();
141 | const packet = Buffer.concat([
142 | Buffer.from([BT_PROTOCOL.length]),
143 | BT_PROTOCOL, BT_RESERVED, this._infohash, peerID
144 | ]);
145 | this._sendPacket(packet);
146 | }
147 |
148 | _sendExtHandshake() {
149 | const msg = Buffer.concat([
150 | Buffer.from([BT_MSG_ID]),
151 | Buffer.from([EXT_HANDSHAKE_ID]),
152 | bencode.encode({ m: { ut_metadata: 1 } })
153 | ]);
154 | this._sendMessage(msg);
155 | }
156 |
157 | _onPiece(piece) {
158 | let dict, trailer;
159 | try {
160 | const str = piece.toString();
161 | const trailerIndex = str.indexOf('ee') + 2;
162 | dict = bencode.decode(str.substring(0, trailerIndex));
163 | trailer = piece.slice(trailerIndex);
164 | } catch (err) {
165 | this._fail();
166 | return;
167 | }
168 | if (dict.msg_type !== 1) {
169 | this._fail();
170 | return;
171 | }
172 | if (trailer.length > PIECE_LENGTH) {
173 | this._fail();
174 | return;
175 | }
176 | trailer.copy(this._metadata, dict.piece * PIECE_LENGTH);
177 | this._bitfield.set(dict.piece);
178 | this._checkDone();
179 | }
180 |
181 | _checkDone() {
182 | for (let piece = 0; piece < this._numPieces; piece++) {
183 | if (!this._bitfield.get(piece)) {
184 | return;
185 | }
186 | }
187 | this._onDone(this._metadata);
188 | }
189 |
190 | _onDone(metadata) {
191 | try {
192 | const info = bencode.decode(metadata).info;
193 | if (info) {
194 | metadata = bencode.encode(info);
195 | }
196 | } catch (err) {
197 | this._fail();
198 | return;
199 | }
200 | const infohash = crypto.createHash('sha1').update(metadata).digest('hex');
201 | if (this._infohash.toString('hex') !== infohash) {
202 | this._fail();
203 | return false;
204 | }
205 | this.emit('metadata', { info: bencode.decode(metadata) }, this._infohash);
206 | }
207 |
208 | _fail() {
209 | this.emit('fail');
210 | }
211 |
212 | _write(buf, encoding, next) {
213 | this._bufferSize += buf.length;
214 | this._buffer.push(buf);
215 |
216 | while (this._bufferSize >= this._nextSize) {
217 | const buffer = Buffer.concat(this._buffer);
218 | this._bufferSize -= this._nextSize;
219 | this._buffer = this._bufferSize
220 | ? [buffer.slice(this._nextSize)]
221 | : [];
222 | this._next(buffer.slice(0, this._nextSize));
223 | }
224 |
225 | next(null);
226 | }
227 |
228 | _read() {
229 | // do nothing
230 | }
231 | }
232 |
233 | module.exports = Wire;
234 |
--------------------------------------------------------------------------------
/utils/fileTreeUtils.js:
--------------------------------------------------------------------------------
1 | /**
2 | * File Tree Utilities
3 | * Server-side utilities for processing file path data
4 | */
5 |
6 | /**
7 | * Build a file tree structure from an array of file paths
8 | * @param {Array|String} filePaths - Array of file paths/objects or comma-separated string
9 | * @return {Object} Structured file tree object
10 | */
11 | function buildFileTree(filePaths) {
12 | const fileTree = {};
13 |
14 | // Convert string to array if needed
15 | if (typeof filePaths === 'string') {
16 | // First check if we should convert commas to slashes
17 | if (filePaths.includes(',') && !filePaths.includes('/')) {
18 | filePaths = filePaths.replace(/,/g, '/');
19 | }
20 |
21 | // Then split by separator
22 | filePaths = filePaths.includes('/')
23 | ? filePaths.split(/[\/\\]/).map(f => f.trim()).filter(f => f)
24 | : filePaths.split(',').map(f => f.trim()).filter(f => f);
25 | }
26 |
27 | if (Array.isArray(filePaths)) {
28 | filePaths.forEach(filePathItem => {
29 | // Handle both string paths and {path, size} objects
30 | let filePath, fileSize = 0;
31 |
32 | if (typeof filePathItem === 'object' && filePathItem !== null) {
33 | filePath = filePathItem.path || '';
34 | fileSize = filePathItem.size || 0;
35 | } else {
36 | filePath = filePathItem;
37 | }
38 |
39 | // Pre-process the path - convert commas to slashes if there are no slashes
40 | if (filePath.includes(',') && !filePath.includes('/')) {
41 | filePath = filePath.replace(/,/g, '/');
42 | }
43 |
44 | // Check if we have comma-separated paths instead of slashes
45 | const hasCommas = filePath.includes(',');
46 | const hasPaths = filePath.includes('/');
47 |
48 | // Determine the separator to use (prefer slashes if both exist)
49 | const separator = hasPaths ? '/' : (hasCommas ? ',' : '/');
50 |
51 | // Split the file path into directories
52 | const parts = filePath.split(separator);
53 | let currentLevel = fileTree;
54 |
55 | // For each part of the path, create nested objects
56 | for (let i = 0; i < parts.length; i++) {
57 | const part = parts[i].trim();
58 | if (part === '') continue;
59 |
60 | // If this is the last part (file), store as a file
61 | if (i === parts.length - 1) {
62 | if (!currentLevel.files) currentLevel.files = [];
63 | currentLevel.files.push({
64 | name: part,
65 | size: fileSize
66 | });
67 | } else {
68 | // Otherwise it's a directory
69 | if (!currentLevel.dirs) currentLevel.dirs = {};
70 | if (!currentLevel.dirs[part]) currentLevel.dirs[part] = {};
71 | currentLevel = currentLevel.dirs[part];
72 | }
73 | }
74 | });
75 | }
76 |
77 | return fileTree;
78 | }
79 |
80 | /**
81 | * Get appropriate file icon and color based on file extension
82 | * @param {String} fileName - Name of the file
83 | * @return {Object} Object with fileIcon and iconColor properties
84 | */
85 | function getFileIconInfo(fileName) {
86 | let fileIcon = 'fa-file';
87 | let iconColor = 'text-gray-500';
88 |
89 | const fileExt = fileName.split('.').pop().toLowerCase();
90 |
91 | // Video files
92 | if (['mp4', 'mkv', 'avi', 'mov', 'wmv', 'flv', 'webm'].includes(fileExt)) {
93 | fileIcon = 'fa-file-video';
94 | iconColor = 'text-red-500';
95 | }
96 | // Audio files
97 | else if (['mp3', 'wav', 'ogg', 'flac', 'm4a', 'aac'].includes(fileExt)) {
98 | fileIcon = 'fa-file-audio';
99 | iconColor = 'text-blue-500';
100 | }
101 | // Image files
102 | else if (['jpg', 'jpeg', 'png', 'gif', 'bmp', 'svg', 'webp'].includes(fileExt)) {
103 | fileIcon = 'fa-file-image';
104 | iconColor = 'text-green-500';
105 | }
106 | // Archive files
107 | else if (['zip', 'rar', '7z', 'tar', 'gz', 'bz2'].includes(fileExt)) {
108 | fileIcon = 'fa-file-archive';
109 | iconColor = 'text-yellow-500';
110 | }
111 | // PDF files
112 | else if (fileExt === 'pdf') {
113 | fileIcon = 'fa-file-pdf';
114 | iconColor = 'text-red-600';
115 | }
116 | // Document files
117 | else if (['doc', 'docx', 'txt', 'rtf', 'odt'].includes(fileExt)) {
118 | fileIcon = 'fa-file-alt';
119 | iconColor = 'text-blue-600';
120 | }
121 | // Code or text files
122 | else if (['js', 'py', 'java', 'c', 'cpp', 'h', 'cs', 'php', 'html', 'css', 'xml', 'json', 'md', 'csv', 'log'].includes(fileExt)) {
123 | fileIcon = 'fa-file-code';
124 | iconColor = 'text-purple-600';
125 | }
126 | // Executable files
127 | else if (['exe', 'dll', 'bat', 'sh', 'app', 'dmg', 'deb', 'rpm'].includes(fileExt)) {
128 | fileIcon = 'fa-cog';
129 | iconColor = 'text-gray-600';
130 | }
131 |
132 | return { fileIcon, iconColor };
133 | }
134 |
135 | /**
136 | * Format file size in human-readable format
137 | * @param {Number} size - File size in bytes
138 | * @return {String} Formatted size
139 | */
140 | function formatFileSize(size) {
141 | if (size === 0) return '0 B';
142 |
143 | const units = ['B', 'KB', 'MB', 'GB', 'TB'];
144 | const i = Math.floor(Math.log(size) / Math.log(1024));
145 | return (size / Math.pow(1024, i)).toFixed(i > 0 ? 2 : 0) + ' ' + units[i];
146 | }
147 |
148 | /**
149 | * Render file tree as HTML
150 | * @param {Object} node - File tree node
151 | * @param {String} path - Current path
152 | * @param {Number} level - Current indent level
153 | * @return {String} HTML markup
154 | */
155 | function renderFileTree(node, path = '', level = 0) {
156 | let html = '';
157 | const indent = level * 1.5;
158 |
159 | // Render directories first
160 | if (node.dirs) {
161 | Object.keys(node.dirs).sort().forEach(dir => {
162 | const dirPath = path ? `${path}/${dir}` : dir;
163 | html += '' +
164 | '
' +
165 | ' ' +
166 | '
' +
167 | '
' + dir + '/
' +
168 | '
';
169 | html += renderFileTree(node.dirs[dir], dirPath, level + 1);
170 | });
171 | }
172 |
173 | // Then render files
174 | if (node.files) {
175 | node.files.sort((a, b) => {
176 | const nameA = typeof a === 'object' ? a.name : a;
177 | const nameB = typeof b === 'object' ? b.name : b;
178 | return nameA.localeCompare(nameB);
179 | }).forEach(file => {
180 | // Handle both string files and {name, size} objects
181 | let fileName, fileSize = 0;
182 |
183 | if (typeof file === 'object' && file !== null) {
184 | fileName = file.name || '';
185 | fileSize = file.size || 0;
186 | } else {
187 | fileName = file;
188 | }
189 |
190 | const { fileIcon, iconColor } = getFileIconInfo(fileName);
191 | const formattedSize = formatFileSize(fileSize);
192 |
193 | html += '' +
194 | '
' +
195 | ' ' +
196 | '
' +
197 | '
' + fileName + '
' +
198 | '
' + formattedSize + '
' +
199 | '
';
200 | });
201 | }
202 |
203 | return html;
204 | }
205 |
206 | module.exports = {
207 | buildFileTree,
208 | getFileIconInfo,
209 | renderFileTree,
210 | formatFileSize
211 | };
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # p2pspider - DHT Spider
2 |
3 | [](https://github.com/thejordanprice/p2pspider/issues)
4 | [](https://github.com/thejordanprice/p2pspider/stargazers)
5 | [](https://github.com/thejordanprice/p2pspider/network)
6 | [](https://github.com/thejordanprice/p2pspider/blob/master/LICENSE)
7 | [](https://twitter.com/intent/tweet?text=Wow:&url=https%3A%2F%2Fgithub.com%2Fthejordanprice%2Fp2pspider)
8 |
9 | A daemon that crawls the BitTorrent DHT network and an Express web application that provides a searchable database of magnet links with real-time updates through WebSockets.
10 |
11 | ### Intro
12 |
13 | DHT Spider can index over 1 million magnets per 24 hours on modest hardware (2GB of RAM and around 2MB/s connection). It's resource-intensive and will use available CPU and RAM, which can be controlled via the 'ecosystem.json' file. On 2GB RAM, it's recommended to use 8 instances of the daemon and 2 of the webserver, all limited at 175MB.
14 |
15 | ###### Screenshots
16 |
17 | 
18 |
19 | 
20 |
21 | ### Getting Started
22 |
23 | ```
24 | # Install dependencies
25 | npm install
26 |
27 | # Set up configuration
28 | cp .env.sample .env
29 | # Edit .env file as needed
30 |
31 | # Run the application
32 | npm start # Start the unified application (both crawler and web interface)
33 |
34 | # Alternatively, use PM2 for process management
35 | npm install -g pm2
36 | npm run start:pm2 # Uses the ecosystem.json file
37 | pm2 monit
38 | ```
39 |
40 | ### Configuration
41 |
42 | **You will need to have port 6881 (or your configured port) open to the internet for the DHT crawler to function properly.**
43 |
44 | The application can be configured through the `.env` file:
45 |
46 | ```
47 | # Database and server configuration
48 | REDIS_URI=redis://127.0.0.1:6379
49 | MONGO_URI=mongodb://127.0.0.1/magnetdb
50 | SITE_HOSTNAME=http://127.0.0.1:8080
51 | SITE_NAME=DHT Spider
52 | SITE_PORT=8080
53 |
54 | # Database options: "mongodb" or "sqlite"
55 | DB_TYPE=sqlite
56 |
57 | # Redis options: "true" or "false"
58 | USE_REDIS=false
59 |
60 | # SQLite database file location (only used if DB_TYPE=sqlite)
61 | SQLITE_PATH=./data/magnet.db
62 |
63 | # Elasticsearch options: "true" or "false"
64 | USE_ELASTICSEARCH=false
65 |
66 | # Elasticsearch connection
67 | ELASTICSEARCH_NODE=http://localhost:9200
68 | ELASTICSEARCH_INDEX=magnets
69 |
70 | # Component control options: "true" or "false"
71 | RUN_DAEMON=true
72 | RUN_WEBSERVER=true
73 | ```
74 |
75 | You can also fine-tune the crawler performance in the daemon.js file:
76 |
77 | ```javascript
78 | const p2p = P2PSpider({
79 | nodesMaxSize: 250,
80 | maxConnections: 500,
81 | timeout: 1000
82 | });
83 | ```
84 |
85 | It's not recommended to change the `nodesMaxSize` or `maxConnections`, but adjusting the `timeout` may increase indexing speed. Higher timeout values may require more RAM; the maximum recommended value is 5000ms.
86 |
87 | #### Component Control
88 |
89 | DHT Spider now allows you to run the daemon and webserver components independently:
90 |
91 | - **RUN_DAEMON**: Set to "true" to run the P2P Spider daemon, or "false" to disable it
92 | - **RUN_WEBSERVER**: Set to "true" to run the web server, or "false" to disable it
93 |
94 | This flexibility allows you to:
95 | - Run only the daemon for dedicated crawling
96 | - Run only the webserver for serving existing data
97 | - Run both components together (default behavior)
98 |
99 | Example usage:
100 | ```bash
101 | # Run both components (default)
102 | node app.js
103 |
104 | # Run only the daemon
105 | RUN_WEBSERVER=false node app.js
106 |
107 | # Run only the webserver
108 | RUN_DAEMON=false node app.js
109 | ```
110 |
111 | #### Database and Redis Configuration
112 |
113 | DHT Spider supports both MongoDB and SQLite as database options, and Redis usage can be toggled on/off:
114 |
115 | - **DB_TYPE**: Choose between "mongodb" or "sqlite" as your database
116 | - **USE_REDIS**: Set to "true" to use Redis for caching recent infohashes, or "false" to disable Redis
117 | - **SQLITE_PATH**: Path where the SQLite database file will be created (only used when DB_TYPE=sqlite)
118 |
119 | SQLite is ideal for smaller deployments with reduced dependencies, while MongoDB is better for large-scale operations. Redis provides caching to prevent duplicate processing of recently seen infohashes.
120 |
121 | #### Elasticsearch Configuration
122 |
123 | DHT Spider now includes Elasticsearch integration for powerful full-text search capabilities:
124 |
125 | - **USE_ELASTICSEARCH**: Set to "true" to enable Elasticsearch integration
126 | - **ELASTICSEARCH_NODE**: URL of your Elasticsearch server (default: http://localhost:9200)
127 | - **ELASTICSEARCH_INDEX**: Name of the Elasticsearch index to use (default: magnets)
128 |
129 | To bulk index existing data into Elasticsearch, run:
130 | ```bash
131 | node utils/bulkIndexToElasticsearch.js
132 | ```
133 |
134 | Elasticsearch provides significantly improved search performance and relevance, especially for large datasets. When enabled, search queries will use Elasticsearch instead of database queries.
135 |
136 | ### Features
137 |
138 | - Real-time DHT network crawling and magnet link indexing
139 | - WebSocket-based live updates on the web interface
140 | - Searchable database of discovered magnet links
141 | - Statistics page with database information
142 | - Support for both MongoDB and SQLite databases
143 | - Elasticsearch integration for powerful full-text search
144 | - Redis caching for improved performance
145 | - Responsive web interface with modern design
146 |
147 | ### Protocols
148 |
149 | [bep_0005](http://www.bittorrent.org/beps/bep_0005.html), [bep_0003](http://www.bittorrent.org/beps/bep_0003.html), [bep_0010](http://www.bittorrent.org/beps/bep_0010.html), [bep_0009](http://www.bittorrent.org/beps/bep_0009.html)
150 |
151 | ### Notes
152 |
153 | Cluster mode does not work on Windows. On Linux and other UNIX-like operating systems, multiple instances can listen on the same UDP port, which is not possible on Windows due to operating system limitations.
154 |
155 | ### Notice
156 |
157 | Please don't share the data DHT Spider crawls to the internet. Because sometimes it discovers sensitive/copyrighted/adult material.
158 |
159 | ### Performance Optimization
160 |
161 | To maximize performance, DHT Spider now includes several optimizations:
162 |
163 | #### 1. Redis Caching
164 | Enable Redis by setting `USE_REDIS=true` in your `.env` file to significantly reduce database load:
165 | ```
166 | # Redis options: "true" or "false"
167 | USE_REDIS=true
168 | ```
169 |
170 | #### 2. Production Mode
171 | Run the application in production mode for better performance:
172 | ```bash
173 | npm run start:prod # For the web server
174 | npm run daemon:prod # For the DHT crawler
175 |
176 | # Or with PM2 (recommended for production)
177 | pm2 start ecosystem.json
178 | ```
179 |
180 | #### 3. Optimized PM2 Configuration
181 | The included `ecosystem.json` is configured for optimal performance:
182 | - Web server runs in cluster mode with multiple instances
183 | - DHT crawler runs in a single instance to avoid duplicate crawling
184 | - Memory limits prevent excessive resource usage
185 |
186 | #### 4. WebSocket Optimizations
187 | The WebSocket server includes:
188 | - Message batching to reduce overhead
189 | - Client connection health monitoring
190 | - Throttled broadcasts to prevent excessive updates
191 |
192 | #### 5. Elasticsearch Search Optimization
193 | When dealing with large datasets, enable Elasticsearch for improved search performance:
194 | ```
195 | # Elasticsearch options: "true" or "false"
196 | USE_ELASTICSEARCH=true
197 | ```
198 |
199 | #### Monitoring Performance
200 | Monitor system resources during operation:
201 | ```bash
202 | pm2 monit
203 | ```
204 |
205 | If the application is still slow:
206 | 1. Increase server resources (RAM/CPU)
207 | 2. Use a CDN for static assets
208 | 3. Consider using a dedicated Redis server
209 | 4. Consider using a dedicated Elasticsearch cluster
210 | 5. Scale horizontally with a load balancer
211 |
212 | ## License
213 |
214 | MIT
215 |
--------------------------------------------------------------------------------
/views/index.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%- include('includes/header') %>
4 |
5 | <%- include('includes/navbar') %>
6 |
7 |
8 |
9 |
10 |
11 |
12 | Loading...
13 |
15 |
16 |
17 |
18 |
19 |
40 |
41 |
84 |
85 |
86 |
87 |
88 | <%- include('includes/footer') %>
89 |
90 |
185 |
186 |
--------------------------------------------------------------------------------
/models/elasticsearch/index.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const { Client } = require('@elastic/elasticsearch');
4 | require('dotenv').config();
5 |
6 | // Elasticsearch configuration
7 | const USE_ELASTICSEARCH = process.env.USE_ELASTICSEARCH === 'true';
8 | const ELASTICSEARCH_NODE = process.env.ELASTICSEARCH_NODE || 'http://localhost:9200';
9 | const ELASTICSEARCH_INDEX = process.env.ELASTICSEARCH_INDEX || 'magnets';
10 |
11 | let client = null;
12 | let isConnected = false;
13 |
14 | /**
15 | * Initialize Elasticsearch connection and create index if it doesn't exist
16 | */
17 | async function initialize() {
18 | if (!USE_ELASTICSEARCH) {
19 | console.log('Elasticsearch is disabled in configuration.');
20 | return false;
21 | }
22 |
23 | try {
24 | console.log(`Connecting to Elasticsearch at ${ELASTICSEARCH_NODE}...`);
25 | client = new Client({
26 | node: ELASTICSEARCH_NODE,
27 | maxRetries: 3,
28 | requestTimeout: 30000
29 | });
30 |
31 | // Test the connection
32 | const info = await client.info();
33 | console.log(`Elasticsearch connected to ${info.name} cluster running on ${info.version.number}`);
34 |
35 | // Create index if it doesn't exist
36 | const indexExists = await client.indices.exists({ index: ELASTICSEARCH_INDEX });
37 |
38 | if (!indexExists) {
39 | console.log(`Creating Elasticsearch index: ${ELASTICSEARCH_INDEX}...`);
40 | await client.indices.create({
41 | index: ELASTICSEARCH_INDEX,
42 | body: {
43 | mappings: {
44 | properties: {
45 | name: {
46 | type: 'text',
47 | analyzer: 'standard',
48 | fields: {
49 | keyword: {
50 | type: 'keyword',
51 | ignore_above: 256
52 | }
53 | }
54 | },
55 | infohash: {
56 | type: 'keyword'
57 | },
58 | magnet: {
59 | type: 'keyword'
60 | },
61 | files: {
62 | type: 'text',
63 | analyzer: 'standard'
64 | },
65 | fetchedAt: {
66 | type: 'date',
67 | format: 'epoch_millis'
68 | }
69 | }
70 | }
71 | }
72 | });
73 | console.log(`Created Elasticsearch index: ${ELASTICSEARCH_INDEX}`);
74 | } else {
75 | console.log(`Using existing Elasticsearch index: ${ELASTICSEARCH_INDEX}`);
76 | }
77 |
78 | isConnected = true;
79 | return true;
80 | } catch (error) {
81 | console.error('Elasticsearch initialization error:', error);
82 | isConnected = false;
83 | return false;
84 | }
85 | }
86 |
87 | /**
88 | * Index a document in Elasticsearch
89 | * @param {Object} document - Document to index
90 | * @returns {Promise} - Elasticsearch response
91 | */
92 | async function indexDocument(document) {
93 | if (!USE_ELASTICSEARCH || !isConnected || !client) return null;
94 |
95 | try {
96 | if (!document || !document.infohash) {
97 | console.error('Invalid document for indexing:', document);
98 | return null;
99 | }
100 |
101 | // Process files to ensure they're in a consistent format for indexing
102 | let processedFiles;
103 |
104 | if (document.files) {
105 | if (Array.isArray(document.files)) {
106 | // If already an array, keep as is
107 | processedFiles = document.files;
108 | } else if (typeof document.files === 'string') {
109 | // If JSON string, try to parse it
110 | try {
111 | const parsed = JSON.parse(document.files);
112 | processedFiles = Array.isArray(parsed) ? parsed : document.files.split(',').map(f => f.trim()).filter(f => f);
113 | } catch (e) {
114 | // If not valid JSON, treat as comma-separated string
115 | processedFiles = document.files.split(',').map(f => f.trim()).filter(f => f);
116 | }
117 | } else {
118 | // Fallback
119 | processedFiles = [String(document.files)];
120 | }
121 | } else {
122 | processedFiles = [];
123 | }
124 |
125 | // Transform processedFiles into a single string of paths for indexing
126 | let filesString = '';
127 | if (Array.isArray(processedFiles)) {
128 | // Check if the first element looks like a file object (has path/size)
129 | if (processedFiles.length > 0 && typeof processedFiles[0] === 'object' && processedFiles[0] !== null && ('path' in processedFiles[0] || 'size' in processedFiles[0])) {
130 | // Extract paths and join with newline
131 | filesString = processedFiles.map(file => (typeof file === 'object' && file !== null && file.path) ? file.path : String(file)).join('\n');
132 | } else {
133 | // Assume it's an array of strings already (or simple values)
134 | filesString = processedFiles.map(String).join('\n');
135 | }
136 | } else if (typeof processedFiles === 'string') {
137 | // It might already be a string from the previous processing steps
138 | filesString = processedFiles;
139 | } // If processedFiles was something else (e.g., empty array), filesString remains ''
140 |
141 | // Make sure infohash is used as the document ID for deduplication
142 | const result = await client.index({
143 | index: ELASTICSEARCH_INDEX,
144 | id: document.infohash,
145 | document: {
146 | name: document.name || '',
147 | infohash: document.infohash,
148 | magnet: document.magnet || '',
149 | files: filesString, // Use the transformed string here
150 | fetchedAt: document.fetchedAt || Date.now()
151 | },
152 | refresh: true // Make document immediately searchable
153 | });
154 |
155 | return result;
156 | } catch (error) {
157 | console.error('Elasticsearch indexing error:', error);
158 | return null;
159 | }
160 | }
161 |
162 | /**
163 | * Search documents in Elasticsearch
164 | * @param {String} query - Search query
165 | * @param {Number} page - Page number (0-based)
166 | * @param {Number} size - Number of results per page
167 | * @returns {Promise} - Search results with count and items
168 | */
169 | async function search(query, page = 0, size = 10) {
170 | if (!USE_ELASTICSEARCH || !isConnected || !client) return null;
171 |
172 | try {
173 | // Check if query is an infohash (40 hex chars)
174 | const isInfohash = /^[a-f0-9]{40}$/i.test(query);
175 |
176 | let searchQuery;
177 | if (isInfohash) {
178 | // Direct infohash lookup (exact match)
179 | searchQuery = {
180 | term: {
181 | infohash: query.toLowerCase()
182 | }
183 | };
184 | } else {
185 | // Text search with boosting for name field
186 | searchQuery = {
187 | multi_match: {
188 | query: query,
189 | fields: ['name^3', 'files'],
190 | type: 'best_fields',
191 | fuzziness: 'AUTO'
192 | }
193 | };
194 | }
195 |
196 | const result = await client.search({
197 | index: ELASTICSEARCH_INDEX,
198 | body: {
199 | query: searchQuery,
200 | sort: [
201 | { _score: 'desc' },
202 | { fetchedAt: 'desc' }
203 | ]
204 | },
205 | from: page * size,
206 | size: size
207 | });
208 |
209 | // Process results to ensure file data is in the right format
210 | const processedResults = result.hits.hits.map(hit => {
211 | const source = hit._source;
212 |
213 | // Ensure files is always an array
214 | if (source.files) {
215 | if (!Array.isArray(source.files)) {
216 | // If files is not an array, try to parse it or convert it
217 | if (typeof source.files === 'string') {
218 | // If it's a JSON string, try to parse it
219 | try {
220 | const parsed = JSON.parse(source.files);
221 | source.files = Array.isArray(parsed) ? parsed : [source.files];
222 | } catch (e) {
223 | // If parsing fails, treat it as a comma-separated string
224 | source.files = source.files.split(',').map(f => f.trim()).filter(f => f);
225 | }
226 | } else {
227 | // Fallback to a simple array with the original value
228 | source.files = [String(source.files)];
229 | }
230 | }
231 | } else {
232 | // If files is missing or null, initialize as empty array
233 | source.files = [];
234 | }
235 |
236 | return {
237 | ...source,
238 | score: hit._score
239 | };
240 | });
241 |
242 | return {
243 | count: result.hits.total.value,
244 | results: processedResults
245 | };
246 | } catch (error) {
247 | console.error('Elasticsearch search error:', error);
248 | return null;
249 | }
250 | }
251 |
252 | /**
253 | * Get total count of documents in the index
254 | * @returns {Promise} - Total count of documents
255 | */
256 | async function count() {
257 | if (!USE_ELASTICSEARCH || !isConnected || !client) return 0;
258 |
259 | try {
260 | const result = await client.count({
261 | index: ELASTICSEARCH_INDEX
262 | });
263 |
264 | return result.count;
265 | } catch (error) {
266 | console.error('Elasticsearch count error:', error);
267 | return 0;
268 | }
269 | }
270 |
271 | /**
272 | * Check if Elasticsearch is enabled and connected
273 | * @returns {Boolean} - True if Elasticsearch is enabled and connected
274 | */
275 | function isElasticsearchEnabled() {
276 | return USE_ELASTICSEARCH && isConnected;
277 | }
278 |
279 | module.exports = {
280 | initialize,
281 | indexDocument,
282 | search,
283 | count,
284 | isElasticsearchEnabled
285 | };
--------------------------------------------------------------------------------
/views/search.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%- include('includes/header') %>
4 |
5 | <%- include('includes/navbar') %>
6 |
7 |
8 |
9 | <% if (typeof pagesdebug !== 'undefined') { %>
10 |
<%= pagesdebug %>
11 | <% } %>
12 |
13 | <% if (typeof pages !== 'undefined') { %>
14 |
15 | <% if (pages.query) { %>
16 |
17 |
18 |
19 |
20 | Search: <%= pages.query %>
21 |
22 |
23 |
24 |
25 | <%= pages.results.toLocaleString() %>
26 | results found
27 |
28 |
29 |
30 | <% } %>
31 |
32 |
33 | <% if (pages.current != 0) { %>
34 |
36 |
37 | Previous
38 |
39 | <% } %>
40 | <% if (pages.available >= pages.next) { %>
41 |
43 | Next
44 |
45 |
46 | <% } %>
47 |
48 |
49 | <% } %>
50 |
51 | <% if (typeof results !== 'undefined' && results.length > 0) { %>
52 |
53 | <% results.forEach(function(item) { %>
54 |
55 |
56 |
57 |
<%= item.name %>
58 |
59 |
60 |
61 | Infohash:
62 | <%= item.infohash %>
63 | <% if (item.totalSize && item.formattedTotalSize) { %>
64 | Size: <%= item.formattedTotalSize %>
65 | <% } %>
66 |
67 |
68 | <% if (item.fileTree && Object.keys(item.fileTree).length > 0 &&
69 | ((item.files && Array.isArray(item.files) && item.files.length > 0) ||
70 | (typeof item.files === 'string' && item.files.trim() !== ''))) { %>
71 |
72 |
73 |
74 |
75 |
76 |
77 | Files
78 |
79 |
80 |
81 |
82 | <% if (item.treeHtml && item.treeHtml.trim() !== '') { %>
83 | <%- item.treeHtml %>
84 | <% } else { %>
85 |
Processing directory tree...
86 | <% } %>
87 |
88 | <% if (item.hasMoreFiles) { %>
89 |
99 | <% } %>
100 |
101 |
102 | <% } else if (item.files &&
103 | ((Array.isArray(item.files) && item.files.length > 0) ||
104 | (typeof item.files === 'string' && item.files.trim() !== ''))) { %>
105 | <%
106 | // Fallback for when file tree processing failed
107 | const displayFiles = Array.isArray(item.files) ? item.files : item.files.split(',');
108 | const filestring = Array.isArray(displayFiles) ? displayFiles.join('\n') : displayFiles;
109 | %>
110 |
111 |
<%= filestring %>
112 | <% if (item.hasMoreFiles) { %>
113 |
119 | <% } %>
120 |
121 | <% } %>
122 |
123 |
135 |
136 |
137 | <% }); %>
138 |
139 | <% } %>
140 |
141 | <% if (typeof pages !== 'undefined') { %>
142 |
143 | <% if (pages.current != 0) { %>
144 |
146 |
147 | Previous
148 |
149 | <% } %>
150 | <% if (pages.available >= pages.next) { %>
151 |
153 | Next
154 |
155 |
156 | <% } %>
157 |
158 | <% } %>
159 |
160 | <% if (typeof timer !== 'undefined') { %>
161 |
162 |
163 |
164 | Query time:
165 | <%= timer %> ms
166 | <% if (typeof searchSource !== 'undefined') { %>
167 | |
168 |
169 | <% if (searchSource === 'elasticsearch') { %>
170 |
171 | Elasticsearch
172 | <% } else { %>
173 |
174 | Database
175 | <% } %>
176 |
177 | <% } %>
178 |
179 |
180 | <% } %>
181 |
182 |
183 |
184 | <%- include('includes/footer') %>
185 |
186 |
200 |
201 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | ## [1.1.1] - 2024-04-12
6 |
7 | ### Refactoring & Enhancements
8 | - Refactored `reset_data.sh` to simplify Elasticsearch index deletion logic, improve URL handling (escaping and quoting), and enhance status reporting for clearer output.
9 | - Improved file handling logic for Elasticsearch indexing, transforming `processedFiles` into a string of file paths for better data consistency.
10 | - Refactored WebSocket message handling in `index.ejs` to differentiate between event types ('new_magnet', 'count_update') and improve logging.
11 |
12 | ## [1.1.0] - 2024-04-11
13 |
14 | ### Major Refactoring
15 | - Deconstructed monolithic `app.js` into modular components:
16 | - `config/env.js`: Centralized environment variable loading, constants, and validation.
17 | - `lib/database.js`: Handles database initialization logic.
18 | - `lib/redis.js`: Manages Redis client initialization, connection, helpers, and shutdown.
19 | - `lib/p2p.js`: Encapsulates P2P Spider initialization, event handling (`metadata`, `error`), processing, and shutdown.
20 | - `services/websocket.js`: Manages WebSocket server setup, connection handling, broadcasting, and client updates.
21 | - `config/express.js`: Configures the Express application, middleware (compression, cache, logging, static files, body parsing), view engine, and routing setup.
22 | - Refactored `app.js` to serve as the main application bootstrap file, orchestrating the initialization and startup of the different modules (Database, Redis, P2P, WebSocket, Express).
23 | - Improved modularity and separation of concerns across the codebase.
24 | - Enhanced graceful shutdown logic in `app.js` to properly close P2P connections, Redis clients, and the HTTP server.
25 |
26 | ### Improvements
27 | - Enhanced code organization, readability, and maintainability.
28 | - Simplified the main application entry point (`app.js`).
29 | - Clearer responsibility delegation to dedicated modules.
30 |
31 | ## [1.0.13] - 2025-04-10
32 |
33 | ### Feature Enhancements
34 | - Added file size extraction from torrent metadata in processMetadata function
35 | - Updated MongoDB schema and SQLite database to support file size storage
36 | - Added totalSize field to store and display the complete torrent size
37 | - Enhanced fileTreeUtils.js with file size formatting utilities
38 | - Updated UI in all views (search, latest, infohash) to display file sizes
39 | - Added file size formatting helper function for both server and client-side use
40 | - Improved WebSocket handling to support file size information in real-time updates
41 |
42 | ### UI Improvements
43 | - Fixed vertical alignment issues between filenames and file sizes in directory tree
44 | - Updated CSS selectors to target both items-start and items-center classes
45 | - Enhanced file tree rendering for better visual consistency
46 | - Fixed folder toggle functionality to work with both directory and file elements
47 | - Improved client-side JS to handle both string files and object files with name/size
48 |
49 | ## [1.0.12] - 2025-04-10
50 |
51 | ### UI Enhancements
52 | - Improved file display on infohash detail pages to show all files instead of truncated list
53 | - Modified processFilesForDisplay function to conditionally skip file limits on infohash pages
54 | - Enhanced file tree rendering logic for better handling of large file lists
55 |
56 | ## [1.0.11] - 2025-04-09
57 |
58 | ### Directory Tree Improvements
59 | - Fixed directory tree display on search results page to work with all data sources
60 | - Enhanced handling of comma-separated file paths in the file tree processing
61 | - Improved file tree processing with consistent approach for all data formats
62 | - Added a central processFilesForDisplay helper function for code consistency
63 | - Enhanced client-side tree initialization to better handle dynamic content
64 | - Updated Elasticsearch integration to properly process file data for tree display
65 | - Improved error handling in directory tree rendering when data is incomplete
66 |
67 | ## [1.0.10] - 2025-04-09
68 |
69 | ### UI Enhancements
70 | - Refactored directory tree initialization and state management for enhanced reliability
71 | - Implemented promise-based approach for waiting on directory tree initialization
72 | - Added processing state management to prevent user interactions during folder operations
73 | - Enhanced folder state checks to prevent visual glitches and ensure consistency
74 | - Improved DOM event handling for better responsiveness with dynamically loaded content
75 | - Integrated MutationObserver to automatically initialize directory trees added to the DOM
76 | - Updated CSS to disable transitions during operations for improved performance
77 | - Enhanced error handling with detailed logging and retry mechanisms
78 | - Removed unused collapse/expand buttons from infohash.ejs and search.ejs for cleaner UI
79 |
80 | ## [1.0.9] - 2025-04-09
81 |
82 | ### Performance Improvements
83 | - Optimized /latest page loading speed and rendering performance
84 | - Reduced default page size from 25 to 15 items for faster initial load
85 | - Increased cache duration for latest page results from 5 to 15 minutes
86 | - Improved file display with optimized field projection in database queries
87 | - Enhanced client-side WebSocket initialization with delayed loading
88 |
89 | ## [1.0.8] - 2025-04-09
90 |
91 | ### New Features
92 | - Added environment variable controls for independent daemon and webserver operation
93 | - `RUN_DAEMON=true/false` to control P2P Spider daemon
94 | - `RUN_WEBSERVER=true/false` to control web server
95 | - Both components can now run independently or together
96 |
97 | ## [1.0.7] - 2025-04-09
98 |
99 | ### UI Enhancements
100 | - Enhanced file display logic in latest.ejs for improved handling of magnetData.files
101 | - Added file count limiting in directory tree with "more files" link for better UI performance
102 | - Refactored directory tree initialization with IIFE pattern to prevent global scope pollution
103 | - Implemented retry capability for dynamic content loading in directory tree component
104 | - Updated event listeners for directory controls to work within specific tree containers
105 | - Enhanced error handling and folder icon management in file browser components
106 |
107 | ## [1.0.6] - 2025-04-09
108 |
109 | ### Bug Fixes
110 | - Fixed stylesheet inclusion issue on search page where directory-tree.css was loaded outside the head block
111 | - Properly moved directory-tree.css link to the header include file for better HTML structure
112 |
113 | ## [1.0.5] - 2025-04-08
114 |
115 | ### UI Enhancements
116 | - Implemented tree structure for file paths in magnetController.js and associated views
117 | - Added collapsible directory tree with visual feedback for better user interaction
118 | - Enhanced folder interaction with "Collapse All" and "Expand All" buttons
119 | - Improved rendering performance by limiting displayed files
120 | - Added CSS for directory tree styling and JavaScript for dynamic functionality
121 | - Integrated file tree utilities for better file path management
122 |
123 | ### Performance Improvements
124 | - Updated cache durations for improved performance in magnetController.js
125 | - Enhanced rendering logic in views for better performance with large file lists
126 | - Optimized file display with support for both tree view and simple list formats
127 |
128 | ## [1.0.4] - 2025-04-08
129 |
130 | ### Connectivity Improvements
131 | - Updated tracker URLs in magnetController.js to include additional and updated torrent trackers for improved connectivity
132 |
133 | ### Database Configuration
134 | - Updated database configuration to use SQLite as default and set fallback MongoDB URI
135 | - Simplified local development setup with SQLite as the default database
136 | - Enhanced compatibility across different environments
137 |
138 | ### Development Improvements
139 | - Updated .gitignore to include additional database and environment files
140 | - Added entries for database shared memory and write-ahead log files
141 | - Organized system files for clarity
142 |
143 | ## [1.0.3] - 2025-04-08
144 |
145 | ### Performance & Stability
146 | - Fixed deadlock issues occurring during resource-intensive search operations
147 | - Implemented comprehensive timeout handling for database and cache operations
148 | - Enhanced memory management in the caching system:
149 | - Added LRU (Least Recently Used) eviction policy to prevent memory leaks
150 | - Implemented cache size limits with automatic pruning
151 | - Added periodic cleanup of expired cache items
152 | - Improved database handling:
153 | - Enhanced SQLite configuration with WAL mode for better concurrency
154 | - Added query timeouts to prevent long-running operations from blocking
155 | - Optimized connection handling for high-load scenarios
156 | - Redis improvements:
157 | - Added robust connection management with automatic reconnection
158 | - Implemented connection health checks to detect and recover from zombied connections
159 | - Added timeout handling for Redis operations
160 |
161 | ### Search Enhancements
162 | - Optimized search handling for large datasets with better error recovery
163 | - Improved handling of resource-intensive search queries
164 | - Added graceful fallbacks for search operations that exceed timeout thresholds
165 |
166 | ## [1.0.2] - 2025-04-08
167 |
168 | ### Added
169 | - Integrated Elasticsearch for powerful full-text search capabilities
170 | - Added configuration options for Elasticsearch in `.env` file
171 | - Created bulk indexing utility for migrating existing data to Elasticsearch
172 | - Enhanced search functionality to use Elasticsearch when available
173 |
174 | ## [1.0.1] - 2025-03-30
175 |
176 | ### Bug Fixes
177 | - Fixed inconsistent page titles in search and infohash pages that were showing "Tordex" instead of the configured site name
178 |
179 | ## [1.0.0] - 2025-03-30
180 |
181 | ### Major Architectural Changes
182 | - Migrated from PUG templates to EJS templates
183 | - Consolidated architecture from separate `daemon.js` and `webserver.js` into a unified `app.js`
184 | - Added support for SQLite as an alternative to MongoDB
185 | - Implemented Redis for caching recently seen infohashes
186 |
187 | ### Frontend Enhancements
188 | - Implemented Tailwind CSS for modern, responsive design
189 | - Added comprehensive favicon support with light/dark variants
190 | - Improved font system:
191 | - Added Google Fonts (Inter, Manrope) for improved typography
192 | - Integrated FontAwesome icons via local files
193 | - Enhanced real-time updates via WebSocket implementation
194 | - Interface improvements:
195 | - Enhanced search functionality
196 | - Improved stats visualization
197 | - Better mobile responsiveness
198 |
199 | ### Backend Improvements
200 | - Performance optimizations:
201 | - WebSocket message batching to reduce overhead
202 | - Connection health monitoring
203 | - Throttled broadcasts to prevent excessive updates
204 | - DHT Spider enhancements:
205 | - Improved error handling in `dhtspider.js`
206 | - Optimized index.js for better resource usage
207 | - Database layer enhancements:
208 | - Abstracted database operations through unified interface
209 | - Added SQLite support alongside MongoDB
210 | - Enhanced query performance
211 |
212 | ### Configuration & Deployment
213 | - Environment configuration:
214 | - Added `.env` support with `.env.sample` template
215 | - Improved configuration validation
216 | - Process management:
217 | - Enhanced PM2 integration with optimized ecosystem.json
218 | - Added production mode configuration
219 | - Documentation:
220 | - Added performance optimization guidelines
221 | - Improved setup instructions
222 | - Added screenshots
223 |
224 | ### Security & Stability
225 | - Improved error handling throughout the application
226 | - Better handling of connection failures and reconnection logic
227 | - Added memory limits and improved resource allocation
228 |
229 | ### Removed Features
230 | - Removed separate daemon.js and webserver.js in favor of unified app.js
231 | - Cleaned up temporary font files and generator templates
--------------------------------------------------------------------------------
/views/statistics.ejs:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%- include('includes/header') %>
4 |
5 | <%- include('includes/navbar') %>
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 | Statistics
14 |
15 |
16 | Our database is constantly growing as we discover more magnets from the DHT swarm.
17 |
18 |
19 |
20 |
129 |
130 |
131 |
132 |
133 | <%- include('includes/footer') %>
134 |
135 |
240 |
241 |
--------------------------------------------------------------------------------
/models/db.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const mongoose = require('mongoose');
4 | const sqlite3 = require('sqlite3').verbose();
5 | const fs = require('fs');
6 | const path = require('path');
7 | const ensureDataDir = require('../utils/ensureDataDir');
8 |
9 | // Database configuration
10 | const DB_TYPE = process.env.DB_TYPE || 'sqlite';
11 | const MONGO_URI = process.env.MONGO_URI || 'mongodb://127.0.0.1/magnetdb';
12 | const SQLITE_PATH = process.env.SQLITE_PATH || './data/magnet.db';
13 |
14 | // Ensure data directory exists for SQLite
15 | if (DB_TYPE === 'sqlite') {
16 | ensureDataDir(SQLITE_PATH);
17 | }
18 |
19 | // Define Mongoose Schema
20 | const magnetSchema = new mongoose.Schema({
21 | name: { type: String, index: true },
22 | infohash: { type: String, index: true },
23 | magnet: String,
24 | files: [{
25 | path: String,
26 | size: Number
27 | }],
28 | totalSize: { type: Number, default: 0 },
29 | fetchedAt: { type: Number, default: Date.now }
30 | });
31 |
32 | const Magnet = mongoose.model('Magnet', magnetSchema);
33 |
34 | // Global database instance
35 | let dbInstance = null;
36 |
37 | // Database interface class
38 | class Database {
39 | constructor() {
40 | this.type = DB_TYPE;
41 | this.db = null;
42 | this.connected = false;
43 | this.totalCount = 0;
44 | this.lastCountUpdate = 0;
45 |
46 | // Only set as global instance if none exists
47 | if (!dbInstance) {
48 | dbInstance = this;
49 | }
50 | }
51 |
52 | async connect() {
53 | const elasticsearch = require('./elasticsearch');
54 |
55 | if (this.type === 'mongodb') {
56 | try {
57 | // Set connected first so routes can work immediately
58 | this.connected = true;
59 |
60 | // Connect to MongoDB
61 | await mongoose.connect(MONGO_URI);
62 | console.log('MongoDB has connected.');
63 |
64 | // Initialize counter in background
65 | this.initializeCounter().then(() => {
66 | console.log('MongoDB counter initialized');
67 | }).catch(err => {
68 | console.error('Error initializing MongoDB counter:', err);
69 | });
70 |
71 | // Initialize Elasticsearch in background if enabled
72 | elasticsearch.initialize().catch(err => {
73 | console.error('Error initializing Elasticsearch:', err);
74 | });
75 |
76 | return;
77 | } catch (err) {
78 | console.error('MongoDB connection error:', err);
79 | this.connected = false; // Reset if connection fails
80 | throw err;
81 | }
82 | } else if (this.type === 'sqlite') {
83 | return new Promise((resolve, reject) => {
84 | // First set connected so routes can work immediately
85 | this.connected = true;
86 |
87 | // Configure SQLite for better concurrency
88 | this.db = new sqlite3.Database(SQLITE_PATH,
89 | // Use WAL mode for better concurrency
90 | sqlite3.OPEN_READWRITE | sqlite3.OPEN_CREATE | sqlite3.OPEN_FULLMUTEX,
91 | (err) => {
92 | if (err) {
93 | console.error('SQLite connection error:', err);
94 | this.connected = false; // Reset if connection fails
95 | reject(err);
96 | return;
97 | }
98 |
99 | console.log('SQLite has connected.');
100 |
101 | // Set pragmas for better performance
102 | this.db.serialize(() => {
103 | // Enable WAL mode for better concurrency
104 | this.db.run('PRAGMA journal_mode = WAL;');
105 | // Set a reasonable busy timeout
106 | this.db.run('PRAGMA busy_timeout = 5000;');
107 | // Increase cache size for better performance
108 | this.db.run('PRAGMA cache_size = -20000;'); // ~20MB cache
109 | // Set synchronous mode to NORMAL for better performance
110 | this.db.run('PRAGMA synchronous = NORMAL;');
111 |
112 | // Run table creation
113 | this.setupSQLiteTables().then(() => {
114 | // Initialize counter in background without waiting for result
115 | this.initializeCounter().then(() => {
116 | console.log('SQLite counter initialized');
117 | }).catch(err => {
118 | console.error('Error initializing SQLite counter:', err);
119 | });
120 |
121 | // Initialize Elasticsearch in background if enabled
122 | elasticsearch.initialize().catch(err => {
123 | console.error('Error initializing Elasticsearch:', err);
124 | });
125 |
126 | resolve();
127 | }).catch(err => {
128 | this.connected = false; // Reset if setup fails
129 | reject(err);
130 | });
131 | });
132 | }
133 | );
134 | });
135 | } else {
136 | throw new Error(`Unsupported database type: ${this.type}`);
137 | }
138 | }
139 |
140 | // Helper method to set up SQLite tables without blocking main initialization
141 | async setupSQLiteTables() {
142 | return new Promise((resolve, reject) => {
143 | this.db.run(`
144 | CREATE TABLE IF NOT EXISTS magnets (
145 | id INTEGER PRIMARY KEY AUTOINCREMENT,
146 | name TEXT,
147 | infohash TEXT UNIQUE,
148 | magnet TEXT,
149 | files TEXT,
150 | totalSize INTEGER DEFAULT 0,
151 | fetchedAt INTEGER
152 | )
153 | `, (err) => {
154 | if (err) {
155 | console.error('SQLite table creation error:', err);
156 | reject(err);
157 | return;
158 | }
159 |
160 | this.db.run('CREATE INDEX IF NOT EXISTS idx_infohash ON magnets(infohash)', (err) => {
161 | if (err) {
162 | console.error('SQLite index creation error:', err);
163 | reject(err);
164 | return;
165 | }
166 |
167 | this.db.run('CREATE INDEX IF NOT EXISTS idx_name ON magnets(name)', (err) => {
168 | if (err) {
169 | console.error('SQLite index creation error:', err);
170 | reject(err);
171 | return;
172 | }
173 |
174 | // Add an index for fetchedAt to improve 'latest' page performance
175 | this.db.run('CREATE INDEX IF NOT EXISTS idx_fetchedAt ON magnets(fetchedAt DESC)', (err) => {
176 | if (err) {
177 | console.error('SQLite index creation error:', err);
178 | reject(err);
179 | return;
180 | }
181 |
182 | // Add an index for totalSize
183 | this.db.run('CREATE INDEX IF NOT EXISTS idx_totalSize ON magnets(totalSize)', (err) => {
184 | if (err) {
185 | console.error('SQLite index creation error:', err);
186 | reject(err);
187 | return;
188 | }
189 |
190 | resolve();
191 | });
192 | });
193 | });
194 | });
195 | });
196 | });
197 | }
198 |
199 | // This method initializes the counter without blocking the connection
200 | async initializeCounter() {
201 | try {
202 | if (this.type === 'mongodb') {
203 | this.totalCount = await Magnet.countDocuments({});
204 | } else {
205 | this.totalCount = await new Promise((resolve, reject) => {
206 | this.db.get('SELECT COUNT(*) as count FROM magnets', [], (err, row) => {
207 | if (err) {
208 | console.error('SQLite count error:', err);
209 | reject(err);
210 | } else {
211 | resolve(row ? row.count : 0);
212 | }
213 | });
214 | });
215 | }
216 |
217 | this.lastCountUpdate = Date.now();
218 | console.log(`Initial document count: ${this.totalCount}`);
219 | } catch (err) {
220 | console.error('Error initializing counter:', err);
221 | // Set default count to 0 if there's an error
222 | this.totalCount = 0;
223 | this.lastCountUpdate = Date.now();
224 | }
225 | }
226 |
227 | async findOne(query) {
228 | if (!this.connected) await this.connect();
229 |
230 | if (this.type === 'mongodb') {
231 | return await Magnet.findOne(query).exec();
232 | } else {
233 | return new Promise((resolve, reject) => {
234 | this.db.get('SELECT * FROM magnets WHERE infohash = ?', [query.infohash], (err, row) => {
235 | if (err) {
236 | reject(err);
237 | } else {
238 | if (row) {
239 | // Convert the files string back to array
240 | row.files = row.files ? JSON.parse(row.files) : [];
241 | }
242 | resolve(row);
243 | }
244 | });
245 | });
246 | }
247 | }
248 |
249 | async saveMagnet(magnetData) {
250 | if (!this.connected) await this.connect();
251 |
252 | const elasticsearch = require('./elasticsearch');
253 |
254 | if (this.type === 'mongodb') {
255 | const magnetDoc = new Magnet(magnetData);
256 | const result = await magnetDoc.save();
257 | if (result) {
258 | this.totalCount++; // Increment counter on successful save
259 |
260 | // Index in Elasticsearch if enabled
261 | if (elasticsearch.isElasticsearchEnabled()) {
262 | elasticsearch.indexDocument(magnetData).catch(err => {
263 | console.error('Error indexing in Elasticsearch:', err);
264 | });
265 | }
266 | }
267 | return result;
268 | } else {
269 | return new Promise((resolve, reject) => {
270 | const { name, infohash, magnet, files, totalSize, fetchedAt } = magnetData;
271 | const filesJson = JSON.stringify(files || []);
272 |
273 | this.db.run(
274 | 'INSERT INTO magnets (name, infohash, magnet, files, totalSize, fetchedAt) VALUES (?, ?, ?, ?, ?, ?)',
275 | [name, infohash, magnet, filesJson, totalSize || 0, fetchedAt],
276 | function(err) {
277 | if (err) {
278 | // Handle UNIQUE constraint error
279 | if (err.code === 'SQLITE_CONSTRAINT') {
280 | resolve(null); // Already exists
281 | } else {
282 | reject(err);
283 | }
284 | } else {
285 | this.totalCount++; // Increment counter on successful save
286 |
287 | // Index in Elasticsearch if enabled
288 | if (elasticsearch.isElasticsearchEnabled()) {
289 | elasticsearch.indexDocument(magnetData).catch(err => {
290 | console.error('Error indexing in Elasticsearch:', err);
291 | });
292 | }
293 |
294 | resolve({ id: this.lastID, ...magnetData });
295 | }
296 | }.bind(this) // Bind to access this.totalCount
297 | );
298 | });
299 | }
300 | }
301 |
302 | async countDocuments(query = {}) {
303 | if (!this.connected) await this.connect();
304 |
305 | // If it's a complex query or it's been over an hour since last full count, do a real count
306 | const isComplexQuery = Object.keys(query).length > 0;
307 | const shouldRefreshCount = Date.now() - this.lastCountUpdate > 3600000; // 1 hour
308 |
309 | if (isComplexQuery) {
310 | // For complex queries, we still need to do a full count
311 | if (this.type === 'mongodb') {
312 | return await Magnet.countDocuments(query);
313 | } else {
314 | return new Promise((resolve, reject) => {
315 | const whereClause = this.buildWhereClause(query);
316 | const sql = whereClause
317 | ? `SELECT COUNT(*) as count FROM magnets WHERE ${whereClause}`
318 | : 'SELECT COUNT(*) as count FROM magnets';
319 |
320 | this.db.get(sql, [], (err, row) => {
321 | if (err) {
322 | reject(err);
323 | } else {
324 | resolve(row.count);
325 | }
326 | });
327 | });
328 | }
329 | } else {
330 | // For empty queries requesting total count, use the cached counter
331 | if (shouldRefreshCount) {
332 | // Periodically refresh the total count to ensure accuracy
333 | if (this.type === 'mongodb') {
334 | this.totalCount = await Magnet.countDocuments({});
335 | } else {
336 | this.totalCount = await new Promise((resolve, reject) => {
337 | this.db.get('SELECT COUNT(*) as count FROM magnets', [], (err, row) => {
338 | if (err) reject(err);
339 | else resolve(row.count);
340 | });
341 | });
342 | }
343 | this.lastCountUpdate = Date.now();
344 | console.log(`Refreshed document count: ${this.totalCount}`);
345 | }
346 |
347 | return this.totalCount;
348 | }
349 | }
350 |
351 | async find(query, options = {}) {
352 | if (!this.connected) await this.connect();
353 |
354 | if (this.type === 'mongodb') {
355 | let mongoQuery = Magnet.find(query);
356 |
357 | if (options.sort) mongoQuery = mongoQuery.sort(options.sort);
358 | if (options.limit) mongoQuery = mongoQuery.limit(options.limit);
359 | if (options.skip) mongoQuery = mongoQuery.skip(options.skip);
360 | if (options.projection) mongoQuery = mongoQuery.select(options.projection);
361 |
362 | // Use lean() to get plain JavaScript objects instead of Mongoose documents
363 | // This significantly improves performance by skipping document hydration
364 | return await mongoQuery.lean().exec();
365 | } else {
366 | // Convert MongoDB-style query to SQLite
367 | const whereClause = this.buildWhereClause(query);
368 | const { sort, limit, skip, projection } = options;
369 |
370 | // Optimize the fields selection for SQLite
371 | let fieldSelection = '*';
372 | if (projection) {
373 | // Convert MongoDB-style projection to SQLite column selection
374 | const fields = [];
375 | for (const field in projection) {
376 | if (projection[field] === 1 || projection[field] === true) {
377 | fields.push(field);
378 | }
379 | }
380 | if (fields.length > 0) {
381 | // Always include id to ensure we have a primary key
382 | if (!fields.includes('id')) {
383 | fields.unshift('id');
384 | }
385 | fieldSelection = fields.join(', ');
386 | }
387 | }
388 |
389 | let sql = `SELECT ${fieldSelection} FROM magnets`;
390 | if (whereClause) sql += ` WHERE ${whereClause}`;
391 |
392 | if (sort) {
393 | const sortField = Object.keys(sort)[0];
394 | const sortOrder = sort[sortField] === 1 ? 'ASC' : 'DESC';
395 | sql += ` ORDER BY ${sortField} ${sortOrder}`;
396 | }
397 |
398 | if (limit) sql += ` LIMIT ${limit}`;
399 | if (skip) sql += ` OFFSET ${skip}`;
400 |
401 | try {
402 | // Use our new timeout method
403 | const rows = await this.querySQLiteWithTimeout('all', sql, [], 15000);
404 |
405 | // Convert files string to array for each row
406 | rows.forEach(row => {
407 | if (row.files && typeof row.files === 'string') {
408 | try {
409 | row.files = JSON.parse(row.files);
410 | } catch (e) {
411 | row.files = [];
412 | }
413 | }
414 | });
415 | return rows;
416 | } catch (err) {
417 | console.error('SQLite query error:', err);
418 | // Return empty array rather than crashing
419 | return [];
420 | }
421 | }
422 | }
423 |
424 | buildWhereClause(query) {
425 | if (Object.keys(query).length === 0) return '';
426 |
427 | const clauses = [];
428 | for (const key in query) {
429 | const value = query[key];
430 |
431 | if (typeof value === 'string') {
432 | // Check if value contains % for LIKE queries
433 | if (value.includes('%')) {
434 | clauses.push(`${key} LIKE '${value}'`);
435 | } else {
436 | clauses.push(`${key} = '${value}'`);
437 | }
438 | } else if (typeof value === 'number') {
439 | clauses.push(`${key} = ${value}`);
440 | }
441 | }
442 |
443 | return clauses.join(' AND ');
444 | }
445 |
446 | async removeMagnet(query) {
447 | if (!this.connected) await this.connect();
448 |
449 | if (this.type === 'mongodb') {
450 | const result = await Magnet.deleteOne(query);
451 | if (result.deletedCount > 0) {
452 | this.totalCount--;
453 | }
454 | return result;
455 | } else {
456 | return new Promise((resolve, reject) => {
457 | const whereClause = this.buildWhereClause(query);
458 | if (!whereClause) {
459 | resolve({ deleted: 0 }); // Safety check
460 | return;
461 | }
462 |
463 | this.db.run(
464 | `DELETE FROM magnets WHERE ${whereClause}`,
465 | function(err) {
466 | if (err) {
467 | reject(err);
468 | } else {
469 | const deleted = this.changes;
470 | if (deleted > 0) {
471 | this.totalCount -= deleted;
472 | }
473 | resolve({ deleted });
474 | }
475 | }.bind(this) // Bind to access this.totalCount
476 | );
477 | });
478 | }
479 | }
480 |
481 | // Utility method for forcing a refresh of the counter
482 | async refreshCounter() {
483 | if (!this.connected) await this.connect();
484 |
485 | if (this.type === 'mongodb') {
486 | this.totalCount = await Magnet.countDocuments({});
487 | } else {
488 | this.totalCount = await new Promise((resolve, reject) => {
489 | this.db.get('SELECT COUNT(*) as count FROM magnets', [], (err, row) => {
490 | if (err) reject(err);
491 | else resolve(row.count);
492 | });
493 | });
494 | }
495 |
496 | this.lastCountUpdate = Date.now();
497 | console.log(`Refreshed document count: ${this.totalCount}`);
498 | return this.totalCount;
499 | }
500 |
501 | // Helper method to execute SQLite queries with a timeout
502 | async querySQLiteWithTimeout(method, sql, params = [], timeout = 10000) {
503 | return new Promise((resolve, reject) => {
504 | // Add query start time tracking for performance monitoring
505 | const startTime = Date.now();
506 | const timer = setTimeout(() => {
507 | console.error(`SQLite query timeout after ${timeout}ms: ${sql}`);
508 | reject(new Error(`SQLite query timeout after ${timeout}ms: ${sql}`));
509 | }, timeout);
510 |
511 | this.db[method](sql, params, (err, result) => {
512 | clearTimeout(timer);
513 | // Log slow queries for debugging
514 | const queryTime = Date.now() - startTime;
515 | if (queryTime > 500) { // Log queries taking more than 500ms
516 | console.warn(`Slow SQLite query (${queryTime}ms): ${sql}`);
517 | }
518 |
519 | if (err) {
520 | reject(err);
521 | } else {
522 | resolve(result);
523 | }
524 | });
525 | });
526 | }
527 | }
528 |
529 | // Export the class
530 | module.exports = {
531 | Database,
532 |
533 | // Function to get existing database instance or create a new one
534 | getDatabase: () => {
535 | if (!dbInstance) {
536 | dbInstance = new Database();
537 | }
538 | return dbInstance;
539 | }
540 | };
--------------------------------------------------------------------------------
/RELEASE_NOTES.md:
--------------------------------------------------------------------------------
1 | # P2P Spider v1.1.1 Release Notes
2 |
3 | This release includes several refactoring efforts and enhancements focused on improving script robustness, data consistency in indexing, and WebSocket communication clarity.
4 |
5 | ## Key Enhancements
6 |
7 | - **`reset_data.sh` Improvements**:
8 | - Simplified the logic for deleting Elasticsearch indices.
9 | - Improved handling of URLs by properly escaping special characters and using double quotes, enhancing command execution consistency and error handling.
10 | - Enhanced status messages to clearly differentiate between successful index deletions and attempts to delete non-existent indices.
11 | - **Elasticsearch Indexing**:
12 | - Refactored file handling logic to better process file paths.
13 | - Transformed the `processedFiles` data into a structured string of file paths before indexing, ensuring better data consistency within Elasticsearch documents.
14 | - **WebSocket Handling**:
15 | - Updated the client-side logic (`index.ejs`) to more clearly distinguish between 'new_magnet' and 'count_update' message types.
16 | - Added logging for unexpected message types to aid debugging.
17 |
18 | ## Benefits
19 |
20 | - Increased robustness and clarity of the `reset_data.sh` script.
21 | - Improved data consistency for file paths stored in Elasticsearch.
22 | - Clearer and more maintainable WebSocket message handling on the client-side.
23 |
24 | ## Upgrading
25 |
26 | No database schema changes or manual configuration updates are required for this version.
27 |
28 | 1. Pull the latest changes from the repository:
29 | ```bash
30 | git pull origin master # Or your main branch name
31 | ```
32 | 2. Restart the application:
33 | ```bash
34 | npm start # Or your usual start command (e.g., pm2 restart app)
35 | ```
36 |
37 | ---
38 |
39 | # P2P Spider v1.1.0 Release Notes
40 |
41 | This release focuses on a major internal refactoring of the codebase to improve modularity, maintainability, and overall structure. While there are no significant new user-facing features, these changes lay a foundation for future development and enhance the project's robustness.
42 |
43 | ## Major Refactoring
44 |
45 | The core `app.js` file, which previously handled many different responsibilities, has been significantly refactored. Its logic has been broken down and moved into dedicated modules:
46 |
47 | - **Configuration (`config/`)**: Environment variables (`env.js`) and Express app setup (`express.js`) are now centralized in the `config` directory.
48 | - **Core Libraries (`lib/`)**: Database initialization (`database.js`), Redis client management (`redis.js`), and P2P Spider logic (`p2p.js`, `index.js`) are now organized within the `lib` directory.
49 | - **Services (`services/`)**: WebSocket server logic (`websocket.js`) is now handled by a dedicated service module.
50 |
51 | The main `app.js` file now acts as a streamlined orchestrator, responsible for initializing these modules and starting the application services (Web Server, P2P Daemon) based on the configuration.
52 |
53 | ## Key Benefits of the Refactoring
54 |
55 | - **Improved Modularity**: Code is now organized into logical, single-responsibility modules.
56 | - **Enhanced Maintainability**: Easier to understand, modify, and debug specific parts of the application.
57 | - **Better Readability**: The codebase structure is clearer and the main entry point is simplified.
58 | - **Increased Robustness**: Clearer separation of concerns reduces the chance of unintended side effects.
59 | - **Improved Shutdown**: Graceful shutdown process has been enhanced to reliably close all components (HTTP server, P2P Spider, Redis client).
60 |
61 | ## Upgrading
62 |
63 | This update involves significant changes to the internal file structure.
64 |
65 | 1. Pull the latest changes from the repository:
66 | ```bash
67 | git pull origin master # Or your main branch name
68 | ```
69 | 2. Install/update dependencies (if any changes were made to package.json, although none were in this refactor):
70 | ```bash
71 | npm install
72 | ```
73 | 3. Restart the application:
74 | ```bash
75 | npm start # Or your usual start command (e.g., pm2 restart app)
76 | ```
77 |
78 | No database schema changes or manual configuration updates are required for this version.
79 |
80 | ---
81 |
82 | # P2P Spider v1.0.13 Release Notes
83 |
84 | We're pleased to announce the release of P2P Spider v1.0.13, which enhances the metadata extraction capabilities with file size tracking and improves the file tree display UI.
85 |
86 | ## What's New
87 |
88 | ### File Size Tracking
89 | - Added file size extraction from torrent metadata
90 | - Updated the database schema to store individual file sizes and total torrent size
91 | - Enhanced the UI to display file sizes in human-readable format (B, KB, MB, GB, TB)
92 | - Improved file tree view to show individual file sizes
93 | - Added total torrent size display on all views (search, latest, infohash)
94 |
95 | ### File Tree UI Improvements
96 | - Fixed vertical alignment issues in file tree display
97 | - Enhanced CSS to ensure consistent alignment between filenames and file sizes
98 | - Updated folder toggle functionality to work with both directory and file elements
99 | - Improved tree rendering for better visual consistency
100 |
101 | ## Benefits
102 | - Better understanding of content size before downloading
103 | - More comprehensive metadata for torrents
104 | - Enhanced file browsing experience with detailed size information
105 | - Improved search experience with at-a-glance size information
106 | - More useful metadata for archival and analysis purposes
107 | - Consistent visual alignment across all UI elements
108 |
109 | ## Upgrading
110 | This update requires:
111 | - Pull the latest changes from the repository
112 | - Database schema updates will be automatically applied
113 | - Restart the application
114 |
115 | ---
116 |
117 | # P2P Spider v1.0.12 Release Notes
118 |
119 | We're pleased to announce the release of P2P Spider v1.0.12, which improves the file listing experience on individual infohash pages.
120 |
121 | ## What's New
122 |
123 | ### Enhanced Infohash File Listings
124 | - Improved file display on infohash detail pages to show all files instead of truncated list
125 | - Modified processFilesForDisplay function to conditionally skip file limits on infohash pages
126 | - Enhanced file tree rendering logic for better handling of large file lists
127 |
128 | ## Benefits
129 | - Complete file listing for better exploration of torrents on dedicated infohash pages
130 | - Improved user experience when examining detailed content of specific torrents
131 | - Better visualization of complex file structures for comprehensive torrent assessment
132 | - Consistent browsing experience across all pages with appropriate file detail level
133 |
134 | ## Upgrading
135 | This update requires:
136 | - Pull the latest changes from the repository
137 | - No database schema changes are required
138 | - Restart the application
139 |
140 | ---
141 |
142 | # P2P Spider v1.0.11 Release Notes
143 |
144 | We're pleased to announce the release of P2P Spider v1.0.11, which improves the directory tree display functionality to work consistently across all data sources.
145 |
146 | ## What's New
147 |
148 | ### Directory Tree Display Improvements
149 | - Fixed directory tree display on search results page to properly show hierarchical file structure
150 | - Enhanced handling of comma-separated file paths by converting them to proper directory structure
151 | - Improved file tree processing to work consistently with both Elasticsearch and database results
152 | - Refactored file processing code for better maintainability and consistency across all pages
153 | - Enhanced client-side tree initialization to handle different data formats seamlessly
154 |
155 | ## Benefits
156 | - Consistent directory tree display across all pages (search, latest, and infohash)
157 | - Improved user experience when browsing search results with complex file structures
158 | - More reliable file navigation regardless of data source (Elasticsearch or database)
159 | - Better visual representation of folder structures
160 | - Enhanced compatibility across different file path formats
161 |
162 | ## Upgrading
163 | This update requires:
164 | - Pull the latest changes from the repository
165 | - No database schema changes are required
166 | - Restart the application
167 |
168 | ---
169 |
170 | # P2P Spider v1.0.10 Release Notes
171 |
172 | We're pleased to announce the release of P2P Spider v1.0.10, which focuses on enhancing the directory tree functionality for a more reliable and responsive user experience.
173 |
174 | ## What's New
175 |
176 | ### Directory Tree Enhancements
177 | - Completely refactored directory tree initialization and state management to prevent duplicate initializations
178 | - Implemented a promise-based approach for waiting on directory trees to ensure proper loading
179 | - Added processing state management to prevent user interactions during folder operations
180 | - Improved folder open/close logic to eliminate visual glitches and ensure consistency
181 | - Enhanced DOM event handling for better responsiveness with dynamically loaded content
182 | - Added MutationObserver to automatically initialize trees added to the DOM
183 | - Updated CSS to disable transitions during operations for better performance
184 | - Improved error handling and retry mechanisms for more reliable initialization
185 |
186 | ## Benefits
187 | - More reliable directory tree functionality across the application
188 | - Better visual consistency when opening and closing folders
189 | - Improved user experience with proper state management during operations
190 | - Enhanced performance with optimized transitions and event handling
191 | - More robust handling of dynamically loaded content
192 | - Prevented duplicate event handlers and initialization issues
193 |
194 | ## Upgrading
195 | This update requires:
196 | - Pull the latest changes from the repository
197 | - No database schema changes are required
198 | - Restart the application
199 |
200 | ---
201 |
202 | # P2P Spider v1.0.9 Release Notes
203 |
204 | We're pleased to announce the release of P2P Spider v1.0.9, which focuses on significant performance improvements to the /latest page for a faster and more responsive user experience.
205 |
206 | ## What's New
207 |
208 | ### Latest Page Performance Optimization
209 | - Reduced default page size from 25 to 15 items for faster initial rendering
210 | - Increased cache duration for latest page results from 5 to 15 minutes for better resource utilization
211 | - Implemented optimized database field projection to minimize data transfer
212 | - Enhanced file display with more efficient handling of large file lists
213 | - Improved client-side WebSocket handling with delayed initialization
214 |
215 | ## Benefits
216 | - Significantly faster loading times for the Latest Discoveries page
217 | - Reduced server load and improved resource utilization
218 | - Better performance on mobile devices and slower connections
219 | - More responsive real-time updates via optimized WebSocket handling
220 | - Enhanced overall user experience when browsing new content
221 |
222 | ## Upgrading
223 | This update requires:
224 | - Pull the latest changes from the repository
225 | - No database schema changes are required
226 | - Restart the application
227 |
228 | ---
229 |
230 | # P2P Spider v1.0.8 Release Notes
231 |
232 | We're pleased to announce the release of P2P Spider v1.0.8, which introduces flexible control over the daemon and webserver components, allowing them to run independently or together.
233 |
234 | ## What's New
235 |
236 | ### Component Independence
237 | - Added environment variable controls for independent operation of daemon and webserver
238 | - New configuration options in .env file:
239 | - `RUN_DAEMON=true/false` - Controls whether the P2P Spider daemon runs
240 | - `RUN_WEBSERVER=true/false` - Controls whether the web server runs
241 | - Both components can now be run independently or together based on your needs
242 |
243 | ## Benefits
244 | - More flexible deployment options
245 | - Reduced resource usage when only one component is needed
246 | - Better control over system resources
247 | - Easier debugging and maintenance of individual components
248 |
249 | ## Usage Examples
250 | 1. Run both daemon and webserver (default):
251 | ```bash
252 | node app.js
253 | ```
254 |
255 | 2. Run only the daemon:
256 | ```bash
257 | RUN_WEBSERVER=false node app.js
258 | ```
259 |
260 | 3. Run only the webserver:
261 | ```bash
262 | RUN_DAEMON=false node app.js
263 | ```
264 |
265 | ---
266 |
267 | # P2P Spider v1.0.7 Release Notes
268 |
269 | We're pleased to announce the release of P2P Spider v1.0.7, which enhances the file browsing experience and improves the robustness of the directory tree component.
270 |
271 | ## What's New
272 |
273 | ### Enhanced File Browsing Experience
274 | - Improved handling of file data in latest.ejs with better validation and error handling
275 | - Added intelligent file count limiting with "more files" link for cleaner UI presentation
276 | - Enhanced directory tree component with better container-specific interactions
277 | - Improved error recovery with retry capability for dynamic content loading
278 |
279 | ### Directory Tree Component Improvements
280 | - Refactored directory tree JavaScript with IIFE pattern for proper encapsulation
281 | - Enhanced folder icon management for more consistent visual experience
282 | - Updated event handling for better component-level isolation
283 | - Improved error handling throughout the file browser components
284 |
285 | ## Benefits
286 | - More reliable file browsing experience with better error handling
287 | - Improved performance when navigating torrents with large file counts
288 | - Enhanced code organization with better encapsulation of component logic
289 | - Consistent behavior across different parts of the application
290 |
291 | ## Upgrading
292 |
293 | This update requires:
294 | - Pull the latest changes from the repository
295 | - No database schema changes are required
296 | - Restart the application
297 |
298 | ---
299 |
300 | # P2P Spider v1.0.6 Release Notes
301 |
302 | We're pleased to announce the release of P2P Spider v1.0.6, which addresses a minor HTML structure issue on the search page.
303 |
304 | ## What's New
305 |
306 | ### HTML Structure Fix
307 | - Fixed an issue where the directory-tree.css stylesheet was being loaded outside the HTML head block
308 | - Properly relocated the stylesheet link to the header include file
309 | - Improved page load reliability and rendering consistency
310 |
311 | ## Benefits
312 | - More efficient HTML structure and proper stylesheet loading
313 | - Ensures consistent styling across different browsers
314 | - Maintains HTML standards compliance
315 |
316 | ## Upgrading
317 |
318 | This update requires:
319 | - Pull the latest changes from the repository
320 | - No database schema changes are required
321 | - Restart the application
322 |
323 | ---
324 |
325 | # P2P Spider v1.0.5 Release Notes
326 |
327 | We're pleased to announce the release of P2P Spider v1.0.5, which delivers significant UI enhancements for file browsing and improvements to rendering performance.
328 |
329 | ## What's New
330 |
331 | ### Enhanced File Navigation
332 | - Implemented a tree structure for file paths across all relevant views:
333 | - Added an interactive, collapsible directory tree for intuitive file navigation
334 | - Introduced "Collapse All" and "Expand All" buttons for improved user control
335 | - Enhanced visual feedback during folder interactions for better user experience
336 | - Improved file path management with dedicated file tree utilities
337 | - Added support for both tree view and simple list formats to accommodate different user preferences
338 |
339 | ### Performance Optimizations
340 | - Updated cache durations in magnetController.js for faster response times
341 | - Enhanced rendering logic to handle large file lists more efficiently
342 | - Optimized file display to limit initially displayed files for faster page loading
343 | - Improved overall rendering performance with better structured file data
344 |
345 | ## Benefits
346 | - More intuitive navigation of torrent file structures
347 | - Significantly improved user experience when browsing torrents with many files
348 | - Faster page loading and smoother interactions
349 | - Better visual hierarchy for complex file structures
350 |
351 | ## Upgrading
352 |
353 | This update requires:
354 | - Pull the latest changes from the repository
355 | - No database schema changes are required
356 | - Restart the application
357 |
358 | ---
359 |
360 | # P2P Spider v1.0.4 Release Notes
361 |
362 | We're pleased to announce the upcoming release of P2P Spider v1.0.4, which focuses on connectivity improvements, configuration enhancements, and better development experience.
363 |
364 | ## What's New
365 |
366 | ### Connectivity Improvements
367 | - Updated tracker URLs in magnetController.js to include additional and updated torrent trackers
368 | - Enhanced torrent discovery by adding more reliable trackers
369 | - Updated outdated tracker URLs to their current endpoints
370 | - Improved connection success rates for DHT operations
371 |
372 | ### Database Configuration
373 | - Updated database configuration to use SQLite as default with fallback MongoDB URI
374 | - Simplified local development setup with SQLite as the default database
375 | - Enhanced compatibility across different environments
376 | - Maintained MongoDB support for production deployments
377 |
378 | ### Development Experience
379 | - Updated .gitignore to include additional database and environment files
380 | - Added entries for SQLite shared memory and write-ahead log files
381 | - Organized system files for clarity
382 | - Improved cleanliness of repository for developers
383 |
384 | ## Upgrading
385 |
386 | This update requires:
387 | - Pull the latest changes from the repository
388 | - No database schema changes are required
389 | - Restart the application
390 |
391 | ---
392 |
393 | # P2P Spider v1.0.3 Release Notes
394 |
395 | We're pleased to announce the release of P2P Spider v1.0.3, which focuses on significant performance improvements and stability enhancements, particularly for resource-intensive operations.
396 |
397 | ## What's New
398 |
399 | ### Performance & Stability Improvements
400 | - Implemented robust timeout mechanisms for database and Redis operations to prevent deadlocks
401 | - Added LRU eviction policy to the memory cache system with size limits to prevent memory leaks
402 | - Enhanced SQLite configuration for better concurrency handling using WAL mode
403 | - Improved Redis connection handling with automatic health checks and recovery
404 | - Added graceful error handling for long-running queries that could cause system instability
405 |
406 | ### Search Optimization
407 | - Optimized search functionality for large datasets with improved query timeouts
408 | - Enhanced pagination handling for better performance with large result sets
409 | - Implemented graceful fallbacks for search operations to prevent system lockups
410 |
411 | ## Benefits
412 | - Significantly improved stability under heavy load and with large datasets
413 | - Better memory management preventing resource exhaustion
414 | - More responsive search experience, even for uncommon search terms
415 | - Reduced likelihood of application deadlocks during resource-intensive operations
416 |
417 | ## Upgrading
418 |
419 | This update requires:
420 | - Pull the latest changes from the repository
421 | - Install any dependencies if needed: `npm install`
422 | - No database schema changes are required
423 | - Restart the application
424 |
425 | ---
426 |
427 | # P2P Spider v1.0.2 Release Notes
428 |
429 | We're excited to announce the release of P2P Spider v1.0.2, which introduces Elasticsearch integration for significantly improved search capabilities.
430 |
431 | ## What's New
432 |
433 | ### Elasticsearch Integration
434 | - Added full Elasticsearch support for powerful and efficient full-text search
435 | - Implemented configuration options in the `.env` file for easy setup
436 | - Created a bulk indexing utility to migrate existing data to Elasticsearch
437 | - Enhanced search functionality to use Elasticsearch when available
438 |
439 | ### Benefits
440 | - Significantly faster search performance, especially for large datasets
441 | - Improved search relevance with better ranking of results
442 | - Support for fuzzy matching and advanced search capabilities
443 | - Scalable search infrastructure for growing magnet collections
444 |
445 | ## How to Use
446 |
447 | 1. Install Elasticsearch on your system or use a managed service
448 | 2. Update your `.env` file with the following settings:
449 | ```
450 | USE_ELASTICSEARCH=true
451 | ELASTICSEARCH_NODE=http://localhost:9200
452 | ELASTICSEARCH_INDEX=magnets
453 | ```
454 | 3. To migrate existing data to Elasticsearch, run:
455 | ```
456 | node utils/bulkIndexToElasticsearch.js
457 | ```
458 |
459 | ## Upgrading
460 |
461 | This update requires:
462 | - Pull the latest changes from the repository
463 | - Install the new dependencies: `npm install`
464 | - Update your `.env` file with Elasticsearch configuration
465 | - Restart the application
466 |
467 | ---
468 |
469 | # P2P Spider v1.0.1 Release Notes
470 |
471 | We're pleased to announce the release of P2P Spider v1.0.1, a maintenance update that fixes UI consistency issues across the application.
472 |
473 | ## What's New
474 |
475 | ### Bug Fixes
476 | - Fixed inconsistent page titles in search and infohash pages that were showing "Tordex" instead of the configured site name
477 | - All pages now correctly display the site name configured in your environment settings
478 | - This provides a more consistent and professional user experience
479 |
480 | ## Upgrading
481 |
482 | This is a minor update focused on UI consistency. Upgrading requires:
483 | - Pull the latest changes from the repository
484 | - Restart the application
485 |
486 | No database changes or configuration updates are required for this release.
487 |
488 | ---
489 |
490 | # P2P Spider v1.0.0 Release Notes
491 |
492 | We're excited to announce the official 1.0.0 release of P2P Spider, a powerful DHT network crawler and magnet link indexer with a modern web interface.
493 |
494 | ## What's New
495 |
496 | ### Unified Architecture
497 | - Consolidated codebase with a single entry point (`app.js`) replacing the separate daemon and web server
498 | - Complete migration from PUG to EJS templates for better maintainability
499 |
500 | ### Enhanced Database Support
501 | - Added SQLite support as an alternative to MongoDB
502 | - Redis integration for caching recently seen infohashes
503 | - Improved database abstraction layer for better performance
504 |
505 | ### Modern User Interface
506 | - Fully responsive design using Tailwind CSS
507 | - Real-time updates through optimized WebSocket implementation
508 | - Enhanced search functionality and statistics visualization
509 | - Improved typography with Google Fonts (Inter, Manrope)
510 |
511 | ### Performance & Stability
512 | - Optimized WebSocket communication with message batching
513 | - Enhanced DHT spider with improved error handling
514 | - Better connection failure recovery
515 | - Resource usage optimizations with memory limits
516 |
517 | ### Deployment Improvements
518 | - Environment configuration via `.env` files
519 | - Enhanced PM2 integration for production deployments
520 | - Comprehensive documentation
521 |
522 | ## Upgrading
523 |
524 | When upgrading from previous versions, please note:
525 | - Configuration has moved to `.env` files (see `.env.sample` for reference)
526 | - The database schema has been updated for better performance
527 | - Process management now uses the included `ecosystem.json` file
528 |
529 | ## Documentation
530 |
531 | See the [README.md](README.md) for complete setup and configuration instructions.
--------------------------------------------------------------------------------