├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── config.yml │ ├── documentation_request.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md ├── docker-hub-setup.md └── workflows │ ├── docker-build.yml │ └── docker-publish.yml ├── .gitignore ├── CLAUDE.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.rootless ├── ISSUE_45_SOLUTION.md ├── LICENSE ├── README.md ├── REPO-DOCUMENTATION.md ├── SECURITY.md ├── TODO-LIST.md ├── check-frontend.py ├── db └── schema.sql ├── doc ├── CARDDAV-IMPLEMENTATION-PLAN.md ├── CARDDAV-TECHNICAL-SPEC.md ├── DATABASE-MIGRATIONS.md ├── DATABASE-TRANSACTIONS.md ├── DAV-CLIENT-SETUP.md ├── DAV-IMPLEMENTATION-PLAN.md ├── DAV-INTEGRATION.md ├── FILE-SYSTEM-SAFETY.md ├── LTO-OPTIMIZATIONS.md ├── OIDC-ARCHITECTURE.md ├── OIDC-CONFIG-EXAMPLES.md ├── OIDC-INTEGRATION.md ├── POSTGRESQL-BEST-PRACTICES.md ├── README-AUTH.md ├── SHARE-INTEGRATION.md ├── TRASH-FEATURE-SUMMARY.md ├── WEBDAV-INTEGRATION-GUIDE.md ├── WEBDAV-TECHNICAL-SPEC.md └── images │ └── Captura de pantalla 2025-03-23 230739.png ├── docker-compose.yml ├── identifier.sh ├── migrations ├── 20250408000000_initial_schema.sql ├── 20250408000001_default_users.sql ├── 20250413000000_caldav_schema.sql └── 20250415000000_carddav_schema.sql ├── rootless-compose.yml ├── scripts └── reset_admin.sql ├── src ├── application │ ├── adapters │ │ ├── caldav_adapter.rs │ │ ├── mod.rs │ │ └── webdav_adapter.rs │ ├── dtos │ │ ├── address_book_dto.rs │ │ ├── calendar_dto.rs │ │ ├── contact_dto.rs │ │ ├── favorites_dto.rs │ │ ├── file_dto.rs │ │ ├── folder_dto.rs │ │ ├── i18n_dto.rs │ │ ├── mod.rs │ │ ├── pagination.rs │ │ ├── recent_dto.rs │ │ ├── search_dto.rs │ │ ├── share_dto.rs │ │ ├── trash_dto.rs │ │ └── user_dto.rs │ ├── mod.rs │ ├── ports │ │ ├── auth_ports.rs │ │ ├── calendar_ports.rs │ │ ├── carddav_ports.rs │ │ ├── favorites_ports.rs │ │ ├── file_ports.rs │ │ ├── inbound.rs │ │ ├── mod.rs │ │ ├── outbound.rs │ │ ├── recent_ports.rs │ │ ├── share_ports.rs │ │ ├── storage_ports.rs │ │ └── trash_ports.rs │ ├── services │ │ ├── auth_application_service.rs │ │ ├── batch_operations.rs │ │ ├── calendar_service.rs │ │ ├── contact_service.rs │ │ ├── favorites_service.rs │ │ ├── file_management_service.rs │ │ ├── file_retrieval_service.rs │ │ ├── file_service.rs │ │ ├── file_upload_service.rs │ │ ├── file_use_case_factory.rs │ │ ├── folder_service.rs │ │ ├── i18n_application_service.rs │ │ ├── mod.rs │ │ ├── recent_service.rs │ │ ├── search_service.rs │ │ ├── share_service.rs │ │ ├── storage_mediator.rs │ │ ├── storage_usage_service.rs │ │ ├── trash_service.rs │ │ └── trash_service_test.rs │ └── transactions │ │ ├── mod.rs │ │ └── storage_transaction.rs ├── bin │ └── migrate.rs ├── common │ ├── auth_factory.rs │ ├── cache.rs │ ├── config.rs │ ├── db.rs │ ├── di.rs │ ├── errors.rs │ └── mod.rs ├── domain │ ├── entities │ │ ├── calendar.rs │ │ ├── calendar_event.rs │ │ ├── contact.rs │ │ ├── file.rs │ │ ├── folder.rs │ │ ├── mod.rs │ │ ├── session.rs │ │ ├── share.rs │ │ ├── trashed_item.rs │ │ └── user.rs │ ├── mod.rs │ ├── repositories │ │ ├── address_book_repository.rs │ │ ├── calendar_event_repository.rs │ │ ├── calendar_repository.rs │ │ ├── contact_repository.rs │ │ ├── file_repository.rs │ │ ├── folder_repository.rs │ │ ├── mod.rs │ │ ├── session_repository.rs │ │ ├── share_repository.rs │ │ ├── trash_repository.rs │ │ └── user_repository.rs │ └── services │ │ ├── auth_service.rs │ │ ├── i18n_service.rs │ │ ├── mod.rs │ │ └── path_service.rs ├── infrastructure │ ├── mod.rs │ ├── repositories │ │ ├── file_fs_read_repository.rs │ │ ├── file_fs_repository.rs │ │ ├── file_fs_repository_trash.rs │ │ ├── file_fs_write_repository.rs │ │ ├── file_metadata_manager.rs │ │ ├── file_path_resolver.rs │ │ ├── folder_fs_repository.rs │ │ ├── folder_fs_repository_trash.rs │ │ ├── mod.rs │ │ ├── parallel_file_processor.rs │ │ ├── pg │ │ │ ├── address_book_pg_repository.rs │ │ │ ├── calendar_event_pg_repository.rs │ │ │ ├── calendar_pg_repository.rs │ │ │ ├── contact_group_pg_repository.rs │ │ │ ├── contact_pg_repository.rs │ │ │ ├── mod.rs │ │ │ ├── session_pg_repository.rs │ │ │ ├── transaction_utils.rs │ │ │ └── user_pg_repository.rs │ │ ├── share_fs_repository.rs │ │ └── trash_fs_repository.rs │ └── services │ │ ├── buffer_pool.rs │ │ ├── cache_manager.rs │ │ ├── compression_service.rs │ │ ├── file_metadata_cache.rs │ │ ├── file_system_i18n_service.rs │ │ ├── file_system_utils.rs │ │ ├── id_mapping_optimizer.rs │ │ ├── id_mapping_service.rs │ │ ├── mod.rs │ │ ├── trash_cleanup_service.rs │ │ └── zip_service.rs ├── interfaces │ ├── api │ │ ├── handlers │ │ │ ├── auth_handler.rs │ │ │ ├── batch_handler.rs │ │ │ ├── caldav_handler.rs │ │ │ ├── caldav_handler.rs.orig │ │ │ ├── caldav_handler.rs.rej │ │ │ ├── carddav_handler.rs │ │ │ ├── favorites_handler.rs │ │ │ ├── file_handler.rs │ │ │ ├── folder_handler.rs │ │ │ ├── i18n_handler.rs │ │ │ ├── mod.rs │ │ │ ├── recent_handler.rs │ │ │ ├── search_handler.rs │ │ │ ├── share_handler.rs │ │ │ ├── trash_handler.rs │ │ │ └── webdav_handler.rs │ │ ├── mod.rs │ │ └── routes.rs │ ├── middleware │ │ ├── auth.rs │ │ ├── cache.rs │ │ ├── mod.rs │ │ ├── redirect.rs │ │ └── test_cache.rs │ ├── mod.rs │ └── web │ │ └── mod.rs ├── lib.rs └── main.rs ├── static ├── css │ ├── auth.css │ ├── favorites.css │ ├── fileViewer.css │ ├── inlineViewer.css │ ├── recent.css │ └── style.css ├── favicon.ico ├── identifier.sh ├── index.html ├── js │ ├── app.js │ ├── auth.js │ ├── components │ │ └── sharedView.js │ ├── contextMenus.js │ ├── favorites.js │ ├── fileOperations.js │ ├── fileRenderer.js │ ├── fileSharing.js │ ├── fileViewer.js │ ├── i18n.js │ ├── inlineViewer.js │ ├── languageSelector.js │ ├── recent.js │ ├── search.js │ ├── shared.js │ └── ui.js ├── locales │ ├── en.json │ ├── es.json │ └── zh.json ├── login.html ├── logo │ └── logo-plain.svg ├── oxicloud-logo.svg ├── shared.html ├── sw.js └── test.html └── storage ├── .trash └── trash_index.json ├── file_ids.json └── folder_ids.json /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '[BUG] ' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Description 11 | 12 | A clear and concise description of what the bug is. 13 | 14 | ## To Reproduce 15 | 16 | Steps to reproduce the behavior: 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | ## Expected Behavior 23 | 24 | A clear and concise description of what you expected to happen. 25 | 26 | ## Screenshots 27 | 28 | If applicable, add screenshots to help explain your problem. 29 | 30 | ## Environment 31 | 32 | - OS: [e.g. Ubuntu 22.04, Windows 11] 33 | - Browser: [e.g. Chrome 120, Firefox 121] 34 | - OxiCloud Version: [e.g. 0.1.0] 35 | - Rust Version: [e.g. 1.70.0] 36 | 37 | ## Additional Context 38 | 39 | Add any other context about the problem here. 40 | 41 | ## Possible Solution 42 | 43 | If you have ideas on how to fix the issue, please describe them here. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: OxiCloud Documentation 4 | url: https://github.com/DioCrafts/oxicloud/wiki 5 | about: Please check the documentation before opening an issue -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation request 3 | about: Request improvements or additions to documentation 4 | title: '[DOCS] ' 5 | labels: documentation 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## What documentation needs to be improved or added? 11 | 12 | Clearly describe which part of the documentation needs to be improved or what new documentation is needed. 13 | 14 | ## Why is this documentation important? 15 | 16 | Explain why this documentation would be valuable to users or contributors. 17 | 18 | ## Suggested content 19 | 20 | If you have ideas about what should be included in this documentation, please share them here. 21 | 22 | ## Additional context 23 | 24 | Add any other context, examples from other projects, or screenshots about the documentation request here. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '[FEATURE] ' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Problem Statement 11 | 12 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 13 | 14 | ## Proposed Solution 15 | 16 | A clear and concise description of what you want to happen. 17 | 18 | ## Alternative Solutions 19 | 20 | A clear and concise description of any alternative solutions or features you've considered. 21 | 22 | ## Additional Context 23 | 24 | Add any other context or screenshots about the feature request here. 25 | 26 | ## User Impact 27 | 28 | Describe how this feature would benefit users of OxiCloud. 29 | 30 | ## Implementation Ideas 31 | 32 | If you have ideas about how this might be implemented, please share them here. -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | 4 | 5 | ## Related Issue 6 | 7 | 8 | 9 | ## Type of Change 10 | 11 | Please check the option that best describes your change: 12 | 13 | - [ ] Bug fix (non-breaking change which fixes an issue) 14 | - [ ] New feature (non-breaking change which adds functionality) 15 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 16 | - [ ] This change requires a documentation update 17 | - [ ] Performance improvement 18 | - [ ] Code refactoring 19 | - [ ] Documentation update 20 | 21 | ## How Has This Been Tested? 22 | 23 | 24 | 25 | ## Checklist: 26 | 27 | - [ ] My code follows the style guidelines of this project 28 | - [ ] I have performed a self-review of my own code 29 | - [ ] I have commented my code, particularly in hard-to-understand areas 30 | - [ ] I have made corresponding changes to the documentation 31 | - [ ] My changes generate no new warnings 32 | - [ ] I have added tests that prove my fix is effective or that my feature works 33 | - [ ] New and existing unit tests pass locally with my changes 34 | - [ ] Any dependent changes have been merged and published in downstream modules -------------------------------------------------------------------------------- /.github/docker-hub-setup.md: -------------------------------------------------------------------------------- 1 | # Configuración para Docker Hub y GitHub Actions 2 | 3 | Este documento explica cómo configurar los secretos necesarios para publicar imágenes de Docker en Docker Hub usando GitHub Actions. 4 | 5 | ## Requisitos previos 6 | 7 | 1. Una cuenta en [Docker Hub](https://hub.docker.com/) 8 | 2. Un repositorio en Docker Hub donde subir la imagen 9 | 3. Un token de acceso personal (PAT) de Docker Hub 10 | 11 | ## Pasos para configurar los secretos en GitHub 12 | 13 | 1. Genera un token de acceso en Docker Hub 14 | - Inicia sesión en [Docker Hub](https://hub.docker.com/) 15 | - Ve a tu perfil (esquina superior derecha) → Account Settings → Security 16 | - Haz clic en "New Access Token" 17 | - Proporciona una descripción como "GitHub Actions" 18 | - Selecciona los permisos apropiados (normalmente "Read, Write, Delete") 19 | - Haz clic en "Generate" 20 | - **IMPORTANTE**: Copia el token generado, ya que no podrás verlo de nuevo 21 | 22 | 2. Configura los secretos en tu repositorio de GitHub 23 | - Ve a tu repositorio en GitHub 24 | - Haz clic en "Settings" → "Secrets and variables" → "Actions" 25 | - Haz clic en "New repository secret" 26 | - Añade los siguientes secretos: 27 | - Nombre: `DOCKERHUB_USERNAME` 28 | Valor: Tu nombre de usuario de Docker Hub 29 | - Nombre: `DOCKERHUB_TOKEN` 30 | Valor: El token de acceso que generaste en el paso anterior 31 | 32 | ## Uso 33 | 34 | Una vez configurados los secretos, los flujos de trabajo de GitHub Actions podrán autenticarse con Docker Hub y publicar imágenes. 35 | 36 | Cuando crees una nueva [release en GitHub](https://docs.github.com/es/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release), el flujo de trabajo `docker-publish.yml` se activará automáticamente y: 37 | 38 | 1. Construirá la imagen de Docker 39 | 2. La etiquetará con el número de versión de la release 40 | 3. La subirá a Docker Hub 41 | 42 | ## Verificación 43 | 44 | Para verificar que la configuración está correcta: 45 | 46 | 1. Crea una nueva release en GitHub 47 | 2. Ve a la pestaña "Actions" y observa el progreso del flujo de trabajo 48 | 3. Una vez completado, verifica que la imagen aparezca en tu repositorio de Docker Hub 49 | 50 | ## Notas adicionales 51 | 52 | - Para entornos de producción, considera usar un usuario de servicio en Docker Hub en lugar de tu cuenta personal 53 | - Rota regularmente los tokens de acceso para mayor seguridad 54 | - Considera agregar escaneo de vulnerabilidades en las imágenes como parte del flujo de trabajo -------------------------------------------------------------------------------- /.github/workflows/docker-build.yml: -------------------------------------------------------------------------------- 1 | name: Docker Build and Test 2 | 3 | # Trigger on push to main branch or on pull requests 4 | on: 5 | push: 6 | branches: [ "main", "dev" ] 7 | pull_request: 8 | branches: [ "main", "dev" ] 9 | 10 | jobs: 11 | # Build and test Docker image but don't push 12 | build-and-test: 13 | name: Build and Test Docker Image 14 | runs-on: ubuntu-latest 15 | timeout-minutes: 120 # Aumentado a 2 horas para dar más tiempo a la compilación 16 | 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | 21 | # Set up Docker Buildx 22 | - name: Set up Docker Buildx 23 | uses: docker/setup-buildx-action@v3 24 | 25 | # Extract metadata (tags, labels) for Docker 26 | - name: Extract metadata (tags, labels) for Docker 27 | id: meta 28 | uses: docker/metadata-action@v5 29 | with: 30 | images: test/oxicloud 31 | tags: | 32 | type=ref,event=branch 33 | type=ref,event=pr 34 | type=sha 35 | 36 | # Build Docker image but don't push 37 | - name: Build Docker image 38 | uses: docker/build-push-action@v5 39 | with: 40 | context: . 41 | push: false 42 | load: true 43 | tags: test/oxicloud:test 44 | # Caché eliminado para evitar problemas de expiración de token 45 | 46 | # Run some basic tests against the built image 47 | - name: Test Docker image 48 | run: | 49 | docker run --rm test/oxicloud:test --version || true 50 | docker run --rm test/oxicloud:test --help || true 51 | 52 | # Verify the image structure 53 | echo "✅ Checking Docker image layers and size" 54 | docker image inspect test/oxicloud:test 55 | 56 | echo "✅ Docker build and test completed successfully" 57 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker Hub Release 2 | 3 | on: 4 | release: 5 | types: [published] 6 | workflow_dispatch: 7 | inputs: 8 | version: 9 | description: 'Versión del release' 10 | required: false 11 | default: '0.1.0-rc1' 12 | 13 | jobs: 14 | build-and-push: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | arch: [amd64, arm64] 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v3 25 | 26 | - name: Login to DockerHub 27 | uses: docker/login-action@v3 28 | with: 29 | username: ${{ secrets.DOCKERHUB_USERNAME }} 30 | password: ${{ secrets.DOCKERHUB_TOKEN }} 31 | 32 | - name: Set version tag 33 | id: version 34 | run: | 35 | if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then 36 | echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV 37 | else 38 | echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV 39 | fi 40 | 41 | - name: Build and Push Docker Image 42 | uses: docker/build-push-action@v5 43 | with: 44 | context: . 45 | platforms: linux/${{ matrix.arch }} 46 | push: true 47 | tags: | 48 | diocrafts/oxicloud:${{ env.VERSION }} 49 | cache-from: type=gha 50 | cache-to: type=gha,mode=max 51 | build-args: | 52 | VERSION=${{ env.VERSION }} 53 | 54 | # Crear el manifiesto multi-arch una vez todas las imágenes estén listas 55 | manifest: 56 | needs: build-and-push 57 | runs-on: ubuntu-latest 58 | steps: 59 | - name: Set version tag 60 | run: | 61 | if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then 62 | echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_ENV 63 | else 64 | echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV 65 | fi 66 | 67 | - name: Set up Docker Buildx 68 | uses: docker/setup-buildx-action@v3 69 | 70 | - name: Login to DockerHub 71 | uses: docker/login-action@v3 72 | with: 73 | username: ${{ secrets.DOCKERHUB_USERNAME }} 74 | password: ${{ secrets.DOCKERHUB_TOKEN }} 75 | 76 | - name: Create and Push Manifest 77 | run: | 78 | docker buildx imagetools create \ 79 | -t diocrafts/oxicloud:${{ env.VERSION }} \ 80 | diocrafts/oxicloud:${{ env.VERSION }}@linux/amd64 \ 81 | diocrafts/oxicloud:${{ env.VERSION }}@linux/arm64 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | /target/ 3 | **/target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | # Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # MSVC Windows builds of rustc generate these, which store debugging information 13 | *.pdb 14 | 15 | # Rust debug symbols 16 | *.dSYM/ 17 | *.su 18 | *.idb 19 | 20 | # Build cache 21 | .cargo/ 22 | 23 | # Visual Studio Code directory 24 | .vscode/ 25 | 26 | # JetBrains IDEs 27 | .idea/ 28 | 29 | # MacOS specific 30 | .DS_Store 31 | .AppleDouble 32 | .LSOverride 33 | 34 | # Linux specific 35 | *~ 36 | .directory 37 | .Trash-* 38 | 39 | # Windows specific 40 | Thumbs.db 41 | ehthumbs.db 42 | Desktop.ini 43 | 44 | # Node.js (if used for frontend) 45 | node_modules/ 46 | npm-debug.log 47 | 48 | # Environment variables 49 | .env 50 | .env.local 51 | .env.development.local 52 | .env.test.local 53 | .env.production.local 54 | 55 | # Log files 56 | *.log 57 | logs/ 58 | 59 | # Temporary files 60 | *.tmp 61 | *.bak 62 | *.swp 63 | *.swo 64 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md 2 | 3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. 4 | 5 | ## Build/Lint/Test Commands 6 | 7 | ### Building 8 | - Build debug version: `cargo build` 9 | - Build release version: `cargo build --release` 10 | - Run the application: `cargo run` 11 | 12 | ### Testing 13 | - Run all tests: `cargo test` 14 | - Run a specific test: `cargo test test_name` 15 | - Run tests for a specific module: `cargo test module_name` 16 | - Run tests with feature flags: `cargo test --features test_utils` 17 | 18 | ### Linting 19 | - Run clippy linting: `cargo clippy` 20 | - Format code: `cargo fmt` 21 | 22 | ## Code Style Guidelines 23 | 24 | ### Architecture 25 | - This project follows a hexagonal/clean architecture pattern: 26 | - `application`: Contains services (use cases), DTOs, and ports 27 | - `domain`: Contains core entities, repositories (interfaces), and domain services 28 | - `infrastructure`: Contains concrete implementations of repository interfaces 29 | - `interfaces`: Contains HTTP/API handlers and routes 30 | 31 | ### Error Handling 32 | - Use the `DomainError` type for domain-level errors 33 | - Use the `AppError` type for API/HTTP-level errors 34 | - Use the `ErrorContext` trait to add context to errors from external crates 35 | - Follow the error factory pattern for creating common error types 36 | 37 | ### Naming Conventions 38 | - Types and structs: PascalCase 39 | - Functions and methods: snake_case 40 | - Constants and statics: SCREAMING_SNAKE_CASE 41 | - Modules and files: snake_case 42 | - Use descriptive names that express intent 43 | 44 | ### Testing 45 | - Use mock objects for dependencies in unit tests 46 | - Use the `#[tokio::test]` attribute for async tests 47 | - Include both positive and negative test cases 48 | - Follow the Arrange-Act-Assert pattern in tests 49 | 50 | ### Imports 51 | - Group imports by source: 52 | 1. Standard library imports 53 | 2. External crate imports 54 | 3. Local crate imports (with `crate::` prefix) 55 | - Use explicit imports (no glob imports except in tests) 56 | 57 | ### Documentation 58 | - Document public API functions and types with doc comments 59 | - Include examples where helpful 60 | - Document error cases and conditions -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in the repo. 2 | # Unless a later match takes precedence, they will be requested for 3 | # review when someone opens a pull request. 4 | * @DioCrafts 5 | 6 | # Specific file and directory ownership 7 | /src/domain/ @DioCrafts 8 | /src/application/ @DioCrafts 9 | /src/infrastructure/ @DioCrafts 10 | /src/interfaces/ @DioCrafts 11 | /static/ @DioCrafts 12 | /db/ @DioCrafts 13 | *.md @DioCrafts -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct.html) -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to OxiCloud 2 | 3 | Thank you for your interest in contributing to OxiCloud! This document provides guidelines and instructions for contributing to the project. 4 | 5 | ## Code of Conduct 6 | 7 | By participating in this project, you agree to maintain a respectful and inclusive environment for everyone. Please be considerate of other contributors and users. 8 | 9 | ## How to Contribute 10 | 11 | There are many ways to contribute to OxiCloud: 12 | 13 | 1. Reporting bugs 14 | 2. Suggesting enhancements 15 | 3. Writing documentation 16 | 4. Improving code 17 | 5. Adding translations 18 | 19 | ### Reporting Bugs 20 | 21 | If you find a bug, please create an issue with the following information: 22 | 23 | - A clear, descriptive title 24 | - Steps to reproduce the issue 25 | - Expected behavior 26 | - Actual behavior 27 | - Screenshots if applicable 28 | - System information (OS, browser, etc.) 29 | 30 | ### Suggesting Enhancements 31 | 32 | Enhancement suggestions are welcome! Please include: 33 | 34 | - A clear and detailed explanation of the feature 35 | - Why this feature would be useful to most OxiCloud users 36 | - Possible implementation approaches if you have ideas 37 | 38 | ### Pull Request Process 39 | 40 | 1. Fork the repository 41 | 2. Create your feature branch (`git checkout -b feature/something-useful`) 42 | 3. Make your changes 43 | 4. Run the tests and linters to ensure your code meets our quality standards: 44 | ```bash 45 | cargo test 46 | cargo clippy 47 | cargo fmt --check 48 | ``` 49 | 5. Commit your changes with a descriptive message 50 | 6. Push to your branch 51 | 7. Create a Pull Request to the `dev` branch 52 | 53 | ### Development Guidelines 54 | 55 | Please follow the development guidelines in the [CLAUDE.md](CLAUDE.md) file, which include: 56 | 57 | - Code style guidelines 58 | - Architecture principles 59 | - Testing requirements 60 | - Documentation standards 61 | 62 | ## Development Environment Setup 63 | 64 | Follow the setup instructions in the [README.md](README.md) to set up your development environment. 65 | 66 | ## Getting Help 67 | 68 | If you need help with the contribution process or have questions, feel free to: 69 | 70 | - Open an issue with your question 71 | - Reach out to the maintainers 72 | 73 | Thank you for contributing to OxiCloud! -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "oxicloud" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | 7 | [dependencies] 8 | axum = { version = "0.8.3", features = ["multipart", "http1", "tokio", "macros"] } 9 | tokio = { version = "1.44.2", features = ["full"] } 10 | tokio-util = { version = "0.7.14", features = ["io", "codec"] } 11 | tokio-stream = { version = "0.1.17", features = ["fs"] } 12 | bytes = "1.10.1" 13 | tempfile = "3.19.1" 14 | tower = "0.5.2" 15 | tower-http = { version = "0.6.2", features = ["fs", "compression-gzip", "trace", "cors", "add-extension", "request-id"] } 16 | flate2 = "1.1.1" 17 | zip = "2.6.1" 18 | tracing = "0.1.41" 19 | tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } 20 | chrono = { version = "0.4.40", features = ["serde"] } 21 | http-body = "1.0.1" 22 | serde = { version = "1.0.219", features = ["derive"] } 23 | serde_json = "1.0.140" 24 | futures = "0.3.31" 25 | async-stream = "0.3.6" 26 | mime_guess = "2.0.5" 27 | uuid = { version = "1.16.0", features = ["v4", "serde"] } 28 | async-trait = "0.1.88" 29 | thiserror = "2.0.12" 30 | reqwest = { version = "0.12.15", features = ["json", "multipart"] } 31 | mockall = { version = "0.13.1", optional = true } 32 | rand = "0.9.0" 33 | pin-project-lite = "0.2.16" 34 | sqlx = { version = "0.8.3", features = ["postgres", "runtime-tokio", "tls-rustls", "chrono", "uuid", "json"] } 35 | anyhow = "1.0.97" 36 | jsonwebtoken = "9.3.1" 37 | argon2 = "0.5.3" 38 | rand_core = { version = "0.6.4", features = ["std"] } 39 | time = "0.3.41" 40 | axum-server = "0.7.2" 41 | hyper = { version = "1.6.0", features = ["full"] } 42 | url = "2.5.4" 43 | quick-xml = "0.37.4" 44 | http-body-util = "0.1.3" 45 | openssl = { version = "0.10.72", features = ["vendored"] } 46 | icalendar = "0.16.13" 47 | 48 | [features] 49 | default = [] 50 | test_utils = ["mockall"] 51 | migrations = ["sqlx/migrate"] 52 | 53 | [[bin]] 54 | name = "migrate" 55 | path = "src/bin/migrate.rs" 56 | required-features = ["migrations"] 57 | 58 | [profile.release] 59 | lto = "fat" 60 | codegen-units = 1 61 | opt-level = 3 62 | panic = "abort" 63 | strip = true 64 | 65 | [profile.dev] 66 | opt-level = 1 67 | debug = true 68 | 69 | [profile.bench] 70 | lto = "fat" 71 | codegen-units = 1 72 | opt-level = 3 73 | 74 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Stage 1: Cache dependencies 2 | FROM rust:1.85-alpine AS cacher 3 | WORKDIR /app 4 | RUN apk --no-cache upgrade && \ 5 | apk add --no-cache musl-dev pkgconfig postgresql-dev gcc perl make 6 | COPY Cargo.toml Cargo.lock ./ 7 | # Create a minimal project to download and cache dependencies 8 | RUN mkdir -p src && \ 9 | echo 'fn main() { println!("Dummy build for caching dependencies"); }' > src/main.rs && \ 10 | cargo build --release && \ 11 | @@ -13,41 +22,68 @@ RUN mkdir -p src && \ 12 | # Stage 2: Build the application 13 | FROM rust:1.85-alpine AS builder 14 | WORKDIR /app 15 | RUN apk --no-cache upgrade && \ 16 | apk add --no-cache musl-dev pkgconfig postgresql-dev gcc perl make 17 | # Copy cached dependencies 18 | COPY --from=cacher /app/target target 19 | COPY --from=cacher /usr/local/cargo /usr/local/cargo 20 | # Copy ALL files needed for compilation, including static files 21 | COPY src src 22 | COPY static static 23 | COPY db db 24 | COPY Cargo.toml Cargo.lock ./ 25 | # Build with all optimizations 26 | ENV DATABASE_URL="postgres://postgres:postgres@postgres/oxicloud" 27 | RUN cargo build --release 28 | 29 | # Stage 3: Create minimal final image 30 | FROM alpine:3.21.3 31 | # Install only necessary runtime dependencies and update packages 32 | RUN apk --no-cache upgrade && \ 33 | apk add --no-cache libgcc ca-certificates libpq tzdata 34 | 35 | # Copy only the compiled binary 36 | COPY --from=builder /app/target/release/oxicloud /usr/local/bin/ 37 | 38 | # Copy static files and other resources needed at runtime 39 | COPY static /app/static 40 | COPY db /app/db 41 | 42 | # Create storage directory with proper permissions 43 | RUN mkdir -p /app/storage && chmod 777 /app/storage 44 | 45 | # Set proper permissions 46 | RUN chmod +x /usr/local/bin/oxicloud 47 | 48 | # Set working directory 49 | WORKDIR /app 50 | 51 | # Run the application 52 | CMD ["oxicloud"] 53 | -------------------------------------------------------------------------------- /Dockerfile.rootless: -------------------------------------------------------------------------------- 1 | # Stage 1: Builder – compile the application 2 | FROM rust:1.82-alpine AS builder 3 | 4 | # Install build dependencies 5 | RUN apk add --no-cache musl-dev pkgconfig openssl-dev postgresql-dev 6 | 7 | # Create a non-root user with UID 10001 (we use the same UID across stages) 8 | RUN adduser -D -u 10001 oxicloud 9 | 10 | WORKDIR /app 11 | 12 | # Copy dependency files first to leverage Docker cache for dependency compilation 13 | COPY Cargo.toml Cargo.lock ./ 14 | 15 | # Prepare dummy source to build dependencies (improves caching) 16 | RUN mkdir -p src && \ 17 | echo "fn main() {}" > src/main.rs && \ 18 | touch src/lib.rs && \ 19 | cargo build --release && \ 20 | rm -rf src 21 | 22 | # Copy the actual source code and additional files 23 | COPY src ./src 24 | COPY static ./static 25 | COPY db ./db 26 | 27 | # Build the actual application and strip debug symbols for a smaller binary 28 | RUN cargo build --release && \ 29 | strip target/release/oxicloud 30 | 31 | # Stage 2: Runtime – only include what is necessary to run the app 32 | FROM alpine:3.21.3 33 | 34 | # Install runtime dependencies and clean up cache 35 | RUN apk add --no-cache libgcc openssl ca-certificates tzdata && \ 36 | rm -rf /var/cache/apk/* 37 | 38 | # Create a non-root user with the same UID (10001) for consistent file ownership 39 | RUN adduser -D -u 10001 oxicloud 40 | 41 | # Create application directories, assign proper permissions 42 | WORKDIR /app 43 | RUN mkdir -p /app/static /app/storage /app/db && \ 44 | chown -R oxicloud:oxicloud /app 45 | 46 | # Copy the built binary from the builder stage and additional runtime files 47 | COPY --from=builder /app/target/release/oxicloud /app/oxicloud 48 | COPY --from=builder /app/static /app/static 49 | COPY --from=builder /app/db /app/db 50 | 51 | # Ensure all files are owned by the non-root user 52 | RUN chown -R oxicloud:oxicloud /app 53 | 54 | # Set the non-root user for running the application 55 | USER oxicloud 56 | 57 | # Expose the port the application listens on 58 | EXPOSE 8086 8085 59 | 60 | # Run the binary in release mode 61 | CMD ["./oxicloud", "--release"] 62 | -------------------------------------------------------------------------------- /ISSUE_45_SOLUTION.md: -------------------------------------------------------------------------------- 1 | # Solución para el error "El usuario 'admin' ya existe" 2 | 3 | ## Problema 4 | 5 | Cuando se intenta crear un usuario administrador en una instalación nueva de OxiCloud, aparece el siguiente error: 6 | 7 | ``` 8 | oxicloud-1 | 2025-04-12T10:47:26.643669Z ERROR oxicloud::interfaces::api::handlers::auth_handler: Registration failed for user admin: Already Exists: El usuario 'admin' ya existe 9 | ``` 10 | 11 | Este error ocurre porque las migraciones de la base de datos ya crean un usuario administrador por defecto como parte del proceso de inicialización. 12 | 13 | ## Solución implementada 14 | 15 | Hemos mejorado el sistema para que maneje mejor el registro de usuarios administradores: 16 | 17 | 1. **En una instalación nueva**: 18 | - Si registras cualquier usuario como administrador (sea cual sea su nombre), el sistema detectará que es una instalación nueva y eliminará automáticamente el usuario admin predeterminado. 19 | - Esto te permite crear tu propio usuario administrador con el nombre que prefieras desde el principio. 20 | 21 | 2. **En un sistema en uso**: 22 | - No se permite crear nuevos usuarios administradores desde la página de registro una vez que ya existe un administrador en el sistema 23 | - Esto previene la creación no autorizada de usuarios con permisos elevados 24 | - El sistema tampoco permite tener múltiples usuarios con el mismo nombre (incluido "admin") 25 | 26 | ### Detalles técnicos 27 | 28 | La solución implementa: 29 | 30 | 1. Detección inteligente de instalaciones nuevas basada en: 31 | - Verificación del número total de usuarios en el sistema 32 | - Verificación del número de usuarios administradores 33 | 34 | 2. Reconocimiento de usuarios administradores: 35 | - Un usuario es administrador si su nombre es "admin" 36 | - Un usuario es administrador si se proporciona un rol "admin" explícitamente 37 | - Los administradores reciben automáticamente una cuota de 100GB 38 | 39 | 3. Eliminación segura del usuario admin predeterminado: 40 | - Se detecta al inicio del registro si es una instalación nueva 41 | - Se elimina el admin predeterminado antes de continuar con el registro 42 | 43 | 4. Prevención de creación de múltiples administradores: 44 | - Una vez que existe un usuario administrador en el sistema, no se permite crear más administradores desde la página de registro 45 | - Solo se puede crear un administrador desde la página de registro durante la instalación inicial 46 | - Esto protege el sistema contra la creación no autorizada de usuarios con privilegios elevados 47 | 48 | ## Cómo usar esta funcionalidad 49 | 50 | ### En una instalación nueva: 51 | 52 | 1. Inicia OxiCloud por primera vez (las migraciones crearán automáticamente un usuario admin predeterminado) 53 | 2. Ve a la pantalla de registro y crea un usuario con: 54 | - **Nombre de usuario**: Cualquier nombre que prefieras (por ejemplo, "torrefacto") 55 | - **Contraseña**: La que tú quieras 56 | - **Email**: Tu correo electrónico 57 | 3. El sistema detectará automáticamente que se trata de una instalación nueva 58 | 4. Si es un usuario administrador (porque el nombre es "admin" o porque explícitamente quieres que sea admin), el sistema eliminará el admin predeterminado antes de continuar 59 | 5. Tu nuevo usuario se creará y podrás iniciar sesión con él 60 | 61 | ### Si necesitas restablecer el usuario administrador: 62 | 63 | Si ya tienes un sistema en uso y necesitas restablecer el usuario administrador: 64 | 65 | #### Opción 1: Usar el script proporcionado 66 | ```bash 67 | cat scripts/reset_admin.sql | docker exec -i oxicloud-postgres-1 psql -U postgres -d oxicloud 68 | ``` 69 | 70 | #### Opción 2: Hacerlo manualmente 71 | ```bash 72 | docker exec -it oxicloud-postgres-1 psql -U postgres -d oxicloud 73 | ``` 74 | 75 | ```sql 76 | SET search_path TO auth; 77 | DELETE FROM auth.users WHERE username = 'admin'; 78 | SELECT username, email, role FROM auth.users; 79 | ``` 80 | 81 | Luego registra un nuevo usuario admin a través de la interfaz web. 82 | 83 | ## Nota técnica 84 | 85 | El usuario administrador predeterminado se crea durante las migraciones con estos valores: 86 | 87 | ```sql 88 | INSERT INTO auth.users ( 89 | id, 90 | username, 91 | email, 92 | password_hash, 93 | role, 94 | storage_quota_bytes 95 | ) VALUES ( 96 | '00000000-0000-0000-0000-000000000000', 97 | 'admin', 98 | 'admin@oxicloud.local', 99 | '$argon2id$v=19$m=65536,t=3,p=4$c2FsdHNhbHRzYWx0c2FsdA$H3VxE8LL2qPT31DM3loTg6D+O4MSc2sD7GjlQ5h7Jkw', -- Admin123! 100 | 'admin', 101 | 107374182400 -- 100GB for admin 102 | ); 103 | ``` 104 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 OxiCloud Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | The following versions of OxiCloud are currently supported with security updates: 6 | 7 | | Version | Supported | 8 | | ------- | ------------------ | 9 | | Latest | :white_check_mark: | 10 | 11 | ## Reporting a Vulnerability 12 | 13 | The OxiCloud team takes security issues seriously. We appreciate your efforts to responsibly disclose your findings and will make every effort to acknowledge your contributions. 14 | 15 | To report a security vulnerability, please follow these steps: 16 | 17 | 1. **DO NOT** disclose the vulnerability publicly (e.g., in GitHub issues) 18 | 2. Email details of the vulnerability to the project maintainers 19 | 3. Include as much information as possible, such as: 20 | - A clear description of the vulnerability 21 | - Steps to reproduce the issue 22 | - Potential impact 23 | - Suggested fixes if available 24 | 25 | ## What to Expect 26 | 27 | After submitting a vulnerability report, you can expect the following: 28 | 29 | 1. **Acknowledgment**: The team will acknowledge receipt of your report within 3 business days 30 | 2. **Assessment**: We'll evaluate the vulnerability and determine its impact 31 | 3. **Plan**: We'll develop a plan to address the vulnerability 32 | 4. **Fix & Release**: Once fixed, we'll release an update 33 | 5. **Recognition**: With your permission, we'll acknowledge your contribution in the release notes 34 | 35 | ## Security Best Practices for OxiCloud Users 36 | 37 | - Keep your OxiCloud installation updated to the latest version 38 | - Use strong, unique passwords for all user accounts 39 | - Configure proper file permissions 40 | - Regularly back up your data 41 | - Consider running OxiCloud behind a reverse proxy with HTTPS 42 | - Implement IP restrictions where appropriate 43 | 44 | Thank you for helping keep OxiCloud and its users secure! -------------------------------------------------------------------------------- /check-frontend.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import requests 3 | import json 4 | 5 | SERVER_URL = "http://localhost:8086" 6 | 7 | print("Checking which files are visible from the frontend...") 8 | 9 | def make_request(url, method="GET", params=None): 10 | print(f"\n[{method}] {url}") 11 | 12 | try: 13 | if method == "GET": 14 | response = requests.get(url, params=params) 15 | else: 16 | return None 17 | 18 | if response.status_code == 200: 19 | if response.headers.get('Content-Type', '').startswith('application/json'): 20 | return response.json() 21 | else: 22 | return response.text[:100] + "..." 23 | else: 24 | return f"Error: {response.status_code} - {response.text}" 25 | except Exception as e: 26 | return f"Exception: {str(e)}" 27 | 28 | # List files at root 29 | print("Files at root level:") 30 | root_files = make_request(f"{SERVER_URL}/api/files") 31 | if isinstance(root_files, list): 32 | for file in root_files: 33 | print(f"- {file.get('name')} (ID: {file.get('id')})") 34 | else: 35 | print(f"Error: {root_files}") 36 | 37 | # List files in folder-storage:1 38 | print("\nFiles in folder-storage:1:") 39 | folder_files = make_request(f"{SERVER_URL}/api/files", params={"folder_id": "folder-storage:1"}) 40 | if isinstance(folder_files, list): 41 | for file in folder_files: 42 | print(f"- {file.get('name')} (ID: {file.get('id')})") 43 | else: 44 | print(f"Error: {folder_files}") 45 | 46 | # Check file_ids.json and folder_ids.json 47 | print("\nContents of file_ids.json:") 48 | try: 49 | with open("./storage/file_ids.json", "r") as f: 50 | file_ids = json.load(f) 51 | print(json.dumps(file_ids, indent=2)) 52 | except Exception as e: 53 | print(f"Error reading file_ids.json: {e}") 54 | 55 | print("\nContents of folder_ids.json:") 56 | try: 57 | with open("./storage/folder_ids.json", "r") as f: 58 | folder_ids = json.load(f) 59 | print(json.dumps(folder_ids, indent=2)) 60 | except Exception as e: 61 | print(f"Error reading folder_ids.json: {e}") -------------------------------------------------------------------------------- /doc/LTO-OPTIMIZATIONS.md: -------------------------------------------------------------------------------- 1 | # Link Time Optimization (LTO) in OxiCloud 2 | 3 | ## Overview 4 | 5 | OxiCloud uses Link Time Optimization (LTO) to significantly improve runtime performance. LTO is a technique that allows the compiler to perform optimizations across module boundaries during the linking phase, which can lead to better inlining, dead code elimination, and overall more efficient binaries. 6 | 7 | ## Implemented Optimizations 8 | 9 | This project uses the following optimization settings: 10 | 11 | ### Release Profile 12 | ```toml 13 | [profile.release] 14 | lto = "fat" # Full cross-module optimization 15 | codegen-units = 1 # Maximum optimization but slower compile time 16 | opt-level = 3 # Maximum optimization level 17 | panic = "abort" # Smaller binary size by removing panic unwinding 18 | strip = true # Removes debug symbols for smaller binary 19 | ``` 20 | 21 | ### Development Profile 22 | ```toml 23 | [profile.dev] 24 | opt-level = 1 # Light optimization for faster build time 25 | debug = true # Keep debug information for development 26 | ``` 27 | 28 | ### Benchmark Profile 29 | ```toml 30 | [profile.bench] 31 | lto = "fat" # Full optimization for benchmarks 32 | codegen-units = 1 # Maximum optimization 33 | opt-level = 3 # Maximum optimization level 34 | ``` 35 | 36 | ## Performance Improvements 37 | 38 | The optimizations typically result in: 39 | 40 | 1. **Smaller binary size**: Removing unused code and metadata 41 | 2. **Faster execution**: Better inlining and code optimizations 42 | 3. **Reduced memory usage**: More efficient code layout and execution 43 | 44 | ## LTO Options Explained 45 | 46 | - **fat**: Also known as "full" LTO, performs optimizations across all crate boundaries. Maximum optimization but longest compile time. 47 | - **thin**: A faster version of LTO that trades some optimization for compile speed. Good for development. 48 | - **off**: No cross-module optimization. 49 | 50 | ## Build Time Impact 51 | 52 | While LTO provides runtime performance benefits, it increases compilation time. For OxiCloud, we chose: 53 | 54 | - Development builds: Minimal LTO (`opt-level = 1`) for faster iteration 55 | - Release builds: Full LTO for maximum end-user performance 56 | - Benchmark builds: Full LTO to measure actual optimized performance 57 | 58 | ## Measuring the Impact 59 | 60 | To measure the impact of these optimizations, run our benchmarks: 61 | 62 | ```bash 63 | # Run benchmarks with all optimizations 64 | cargo bench 65 | 66 | # Compare with non-optimized build (remove for comparison only) 67 | RUSTFLAGS="-C lto=off" cargo bench 68 | ``` 69 | 70 | ## When to Adjust Settings 71 | 72 | Consider adjusting these settings if: 73 | 74 | 1. You need faster compile times during development 75 | 2. You're experiencing unexpected runtime behavior 76 | 3. You want to experiment with optimization/binary size tradeoffs 77 | 78 | For most users, the default settings should provide a good balance of performance and usability. -------------------------------------------------------------------------------- /doc/README-AUTH.md: -------------------------------------------------------------------------------- 1 | # OxiCloud Authentication System 2 | 3 | This document describes the authentication system for OxiCloud, a file storage system built with Rust and PostgreSQL. 4 | 5 | ## Overview 6 | 7 | OxiCloud uses a standard JWT (JSON Web Token) authentication system with the following features: 8 | 9 | - User registration and login 10 | - Role-based access control (Admin/User) 11 | - JWT token with refresh capabilities 12 | - Secure password hashing with Argon2id 13 | - User storage quotas 14 | - File and folder ownership 15 | 16 | ## API Endpoints 17 | 18 | The authentication API is available at the `/api/auth` endpoint: 19 | 20 | - **POST /api/auth/register** - Register a new user 21 | - **POST /api/auth/login** - Login and get tokens 22 | - **POST /api/auth/refresh** - Refresh access token 23 | - **GET /api/auth/me** - Get current user information 24 | - **PUT /api/auth/change-password** - Change user password 25 | - **POST /api/auth/logout** - Logout and invalidate refresh token 26 | 27 | ## Request/Response Examples 28 | 29 | ### Register 30 | 31 | **Request:** 32 | ```json 33 | POST /api/auth/register 34 | { 35 | "username": "testuser", 36 | "email": "test@example.com", 37 | "password": "SecurePassword123" 38 | } 39 | ``` 40 | 41 | **Response:** 42 | ```json 43 | 201 Created 44 | { 45 | "userId": "d290f1ee-6c54-4b01-90e6-d701748f0851", 46 | "username": "testuser", 47 | "email": "test@example.com" 48 | } 49 | ``` 50 | 51 | ### Login 52 | 53 | **Request:** 54 | ```json 55 | POST /api/auth/login 56 | { 57 | "username": "testuser", 58 | "password": "SecurePassword123" 59 | } 60 | ``` 61 | 62 | **Response:** 63 | ```json 64 | 200 OK 65 | { 66 | "accessToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", 67 | "refreshToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", 68 | "expiresIn": 3600 69 | } 70 | ``` 71 | 72 | ### Refresh Token 73 | 74 | **Request:** 75 | ```json 76 | POST /api/auth/refresh 77 | { 78 | "refreshToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." 79 | } 80 | ``` 81 | 82 | **Response:** 83 | ```json 84 | 200 OK 85 | { 86 | "accessToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", 87 | "refreshToken": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", 88 | "expiresIn": 3600 89 | } 90 | ``` 91 | 92 | ### Get Current User 93 | 94 | **Request:** 95 | ``` 96 | GET /api/auth/me 97 | Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... 98 | ``` 99 | 100 | **Response:** 101 | ```json 102 | 200 OK 103 | { 104 | "id": "d290f1ee-6c54-4b01-90e6-d701748f0851", 105 | "username": "testuser", 106 | "email": "test@example.com", 107 | "role": "user", 108 | "storageQuota": 10737418240, 109 | "storageUsed": 1048576, 110 | "createdAt": "2023-01-01T12:00:00Z" 111 | } 112 | ``` 113 | 114 | ### Change Password 115 | 116 | **Request:** 117 | ```json 118 | PUT /api/auth/change-password 119 | Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... 120 | { 121 | "oldPassword": "SecurePassword123", 122 | "newPassword": "NewSecurePassword456" 123 | } 124 | ``` 125 | 126 | **Response:** 127 | ``` 128 | 200 OK 129 | ``` 130 | 131 | ### Logout 132 | 133 | **Request:** 134 | ``` 135 | POST /api/auth/logout 136 | Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... 137 | ``` 138 | 139 | **Response:** 140 | ``` 141 | 200 OK 142 | ``` 143 | 144 | ## Testing the Authentication System 145 | 146 | 1. Start PostgreSQL and create the database: 147 | ```bash 148 | createdb oxicloud 149 | psql -d oxicloud -f db/schema.sql 150 | ``` 151 | 152 | 2. Set environment variables for authentication: 153 | ```bash 154 | source test-auth-env.sh 155 | ``` 156 | 157 | 3. Start the OxiCloud server: 158 | ```bash 159 | cargo run 160 | ``` 161 | 162 | 4. Run the authentication test script: 163 | ```bash 164 | ./test-auth-api.sh 165 | ``` 166 | 167 | ## Database Schema 168 | 169 | The authentication system uses the following tables: 170 | 171 | - `users` - Store user information 172 | - `sessions` - Store refresh token sessions 173 | - `file_ownership` - Track file ownership 174 | - `folder_ownership` - Track folder ownership 175 | 176 | ## Implementation Details 177 | 178 | - **Password Hashing**: Argon2id with memory cost of 65536 (64MB), time cost of 3, and 4 parallelism 179 | - **JWT Secret**: Configured via environment variable `OXICLOUD_JWT_SECRET` 180 | - **Token Expiry**: Access token expires in 1 hour, refresh token in 30 days (configurable) 181 | - **Database Connection**: PostgreSQL with connection pooling 182 | - **Middleware**: Auth middleware for protected routes 183 | 184 | ## Security Considerations 185 | 186 | - Passwords are never stored in plain text, only as Argon2id hashes 187 | - JWT tokens are signed with a secret key 188 | - Refresh tokens can be revoked to force logout 189 | - Rate limiting should be implemented for login attempts 190 | - Password policy requires at least 8 characters 191 | - Regular security audits recommended 192 | 193 | ## Future Improvements 194 | 195 | - Email verification for new registrations 196 | - Password reset functionality 197 | - Enhanced password policy 198 | - Two-factor authentication 199 | - OAuth integration for social logins 200 | - Session management UI -------------------------------------------------------------------------------- /doc/TRASH-FEATURE-SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Trash Feature Implementation Summary 2 | 3 | This document summarizes the implementation of the trash/recycle bin feature in OxiCloud. 4 | 5 | ## Architecture Overview 6 | 7 | The trash feature is implemented following the hexagonal architecture (clean architecture) principles of OxiCloud: 8 | 9 | 1. **Domain Layer** (`/src/domain/`): 10 | - Entities: `TrashedItem` representing files and folders in the trash bin 11 | - Repository interfaces: `TrashRepository` defining operations for trash management 12 | 13 | 2. **Application Layer** (`/src/application/`): 14 | - DTOs: `TrashedItemDto` for data transfer between layers 15 | - Ports: `TrashUseCase` defining the operations available to clients 16 | - Services: `TrashService` implementing the trash use cases 17 | 18 | 3. **Infrastructure Layer** (`/src/infrastructure/`): 19 | - Repositories: `TrashFsRepository` for filesystem-based trash storage 20 | - Extensions to existing repositories: `FileRepositoryTrash` and `FolderRepositoryTrash` 21 | - Services: `TrashCleanupService` for automatic cleanup of expired trash items 22 | 23 | 4. **Interface Layer** (`/src/interfaces/`): 24 | - API handlers: `trash_handler.rs` providing HTTP endpoints for trash operations 25 | - Routes: Updated `routes.rs` to include trash-related endpoints 26 | 27 | ## Key Features 28 | 29 | 1. **Soft Deletion**: Moving files and folders to trash instead of immediate permanent deletion 30 | 2. **Per-User Trash**: Each user has their own isolated trash bin 31 | 3. **Retention Policy**: Items are automatically deleted after a configurable time period 32 | 4. **Restoration**: Items can be restored to their original location 33 | 5. **Permanent Deletion**: Items can be permanently deleted before the retention period expires 34 | 6. **Empty Trash**: All items in the trash can be permanently deleted at once 35 | 36 | ## API Endpoints 37 | 38 | The trash feature exposes the following REST API endpoints: 39 | 40 | - `GET /api/trash`: List all items in the user's trash bin 41 | - `DELETE /api/files/trash/:file_id`: Move a file to trash 42 | - `DELETE /api/folders/trash/:folder_id`: Move a folder to trash 43 | - `POST /api/trash/:trash_id/restore`: Restore an item from trash to its original location 44 | - `DELETE /api/trash/:trash_id`: Permanently delete an item from trash 45 | - `DELETE /api/trash/empty`: Empty the entire trash bin 46 | 47 | ## Testing 48 | 49 | The trash feature includes comprehensive testing: 50 | 51 | 1. **Unit Tests**: Testing the `TrashService` application service 52 | - Test moving files and folders to trash 53 | - Test restoring items from trash 54 | - Test permanent deletion 55 | - Test empty trash operation 56 | 57 | 2. **Integration Tests**: Python script to test the API endpoints 58 | - End-to-end testing of all trash operations 59 | - Verification of proper behavior for moving, listing, restoring, and deleting 60 | 61 | 3. **Shell Script**: For manual testing and demonstration 62 | - Individual tests for each operation 63 | - Visual feedback of successful operations 64 | 65 | ## Configuration 66 | 67 | The trash feature can be configured via environment variables: 68 | 69 | - `TRASH_ENABLED`: Enable/disable the trash feature (default: true) 70 | - `TRASH_RETENTION_DAYS`: Number of days to keep items in trash before automatic deletion (default: 30) 71 | 72 | ## Implementation Details 73 | 74 | 1. **Physical File Storage**: When items are moved to trash, they are physically moved to a `.trash` directory 75 | 2. **Metadata Storage**: Information about trashed items is stored in a separate database table or file 76 | 3. **User Isolation**: Trash items are isolated by user ID to prevent access to other users' trash 77 | 4. **Automatic Cleanup**: A background job runs periodically to clean up expired trash items 78 | 5. **Transaction Safety**: Operations are designed to be atomic and safe, with proper error handling 79 | 80 | ## Future Enhancements 81 | 82 | Potential improvements for the trash feature: 83 | 84 | 1. **Trash Quotas**: Limit the amount of storage a user can use for trash 85 | 2. **Batch Operations**: Add support for trashing, restoring, or deleting multiple items at once 86 | 3. **Storage Optimization**: Implement deduplication for trashed items to save storage space 87 | 4. **Version Control**: Keep track of file versions when moving to trash 88 | 5. **Scheduled Cleanup**: Allow users to configure custom retention periods 89 | 6. **Trash Monitoring**: Add metrics and alerts for trash usage and cleanup operations -------------------------------------------------------------------------------- /doc/images/Captura de pantalla 2025-03-23 230739.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DioCrafts/OxiCloud/de60474c6cc8ebc3396e93303faa895c7a35630a/doc/images/Captura de pantalla 2025-03-23 230739.png -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: postgres:17.4-alpine 4 | restart: always 5 | environment: 6 | POSTGRES_USER: postgres 7 | POSTGRES_PASSWORD: postgres 8 | POSTGRES_DB: oxicloud 9 | # ports: 10 | # - "5432:5432" 11 | networks: 12 | - oxicloud 13 | volumes: 14 | - pg_data:/var/lib/postgresql/data 15 | - ./db/schema.sql:/docker-entrypoint-initdb.d/10-schema.sql 16 | healthcheck: 17 | test: ["CMD-SHELL", "pg_isready -U postgres"] 18 | interval: 5s 19 | timeout: 5s 20 | retries: 5 21 | 22 | oxicloud: 23 | image: oxicloud 24 | restart: always 25 | build: 26 | context: . 27 | dockerfile: Dockerfile 28 | ports: 29 | - "8086:8086" 30 | - "8085:8085" 31 | networks: 32 | - oxicloud 33 | depends_on: 34 | - postgres 35 | environment: 36 | - "OXICLOUD_DB_CONNECTION_STRING=postgres://postgres:postgres@postgres/oxicloud" 37 | - "DATABASE_URL=postgres://postgres:postgres@postgres/oxicloud" 38 | volumes: 39 | - storage_data:/app/storage 40 | 41 | networks: 42 | oxicloud: 43 | driver: bridge 44 | 45 | volumes: 46 | pg_data: 47 | storage_data: 48 | -------------------------------------------------------------------------------- /identifier.sh: -------------------------------------------------------------------------------- 1 | while IFS= read -r -d '' file; do 2 | if grep -Iq . "$file"; then 3 | echo "===== $file =====" 4 | cat "$file" 5 | echo -e "\n" 6 | fi 7 | done < <(find . -type f -print0) 8 | 9 | -------------------------------------------------------------------------------- /migrations/20250408000001_default_users.sql: -------------------------------------------------------------------------------- 1 | -- Migration 002: Default Users 2 | 3 | -- Check if admin user already exists before creating it 4 | DO $$ 5 | BEGIN 6 | IF NOT EXISTS (SELECT 1 FROM auth.users WHERE username = 'admin') THEN 7 | -- Create admin user (password: Admin123!) 8 | INSERT INTO auth.users ( 9 | id, 10 | username, 11 | email, 12 | password_hash, 13 | role, 14 | storage_quota_bytes 15 | ) VALUES ( 16 | '00000000-0000-0000-0000-000000000000', 17 | 'admin', 18 | 'admin@oxicloud.local', 19 | '$argon2id$v=19$m=65536,t=3,p=4$c2FsdHNhbHRzYWx0c2FsdA$H3VxE8LL2qPT31DM3loTg6D+O4MSc2sD7GjlQ5h7Jkw', -- Admin123! 20 | 'admin', 21 | 107374182400 -- 100GB for admin 22 | ); 23 | END IF; 24 | END; 25 | $$; 26 | 27 | -- Check if test user already exists before creating it 28 | DO $$ 29 | BEGIN 30 | IF NOT EXISTS (SELECT 1 FROM auth.users WHERE username = 'test') THEN 31 | -- Create test user (password: test123) 32 | INSERT INTO auth.users ( 33 | id, 34 | username, 35 | email, 36 | password_hash, 37 | role, 38 | storage_quota_bytes 39 | ) VALUES ( 40 | '11111111-1111-1111-1111-111111111111', 41 | 'test', 42 | 'test@oxicloud.local', 43 | '$argon2id$v=19$m=65536,t=3,p=4$c2FsdHNhbHRzYWx0c2FsdA$ZG17Z7SFKhs9zWYbuk08CkHpyiznnZapYnxN5Vi62R4', -- test123 44 | 'user', 45 | 10737418240 -- 10GB for test user 46 | ); 47 | END IF; 48 | END; 49 | $$; -------------------------------------------------------------------------------- /migrations/20250413000000_caldav_schema.sql: -------------------------------------------------------------------------------- 1 | -- OxiCloud CalDAV Schema Migration 2 | -- Migration 003: CalDAV Schema 3 | 4 | -- Create schema for CalDAV-related tables 5 | CREATE SCHEMA IF NOT EXISTS caldav; 6 | 7 | -- Calendar table 8 | CREATE TABLE IF NOT EXISTS caldav.calendars ( 9 | id UUID PRIMARY KEY, 10 | name VARCHAR(255) NOT NULL, 11 | owner_id VARCHAR(36) NOT NULL REFERENCES auth.users(id) ON DELETE CASCADE, 12 | description TEXT, 13 | color VARCHAR(50), 14 | is_public BOOLEAN NOT NULL DEFAULT FALSE, 15 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 16 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 17 | UNIQUE(owner_id, name) 18 | ); 19 | 20 | -- Calendar properties for custom properties (for extended CalDAV support) 21 | CREATE TABLE IF NOT EXISTS caldav.calendar_properties ( 22 | id SERIAL PRIMARY KEY, 23 | calendar_id UUID NOT NULL REFERENCES caldav.calendars(id) ON DELETE CASCADE, 24 | name VARCHAR(255) NOT NULL, 25 | value TEXT NOT NULL, 26 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 27 | UNIQUE(calendar_id, name) 28 | ); 29 | 30 | -- Calendar events table 31 | CREATE TABLE IF NOT EXISTS caldav.calendar_events ( 32 | id UUID PRIMARY KEY, 33 | calendar_id UUID NOT NULL REFERENCES caldav.calendars(id) ON DELETE CASCADE, 34 | summary VARCHAR(255) NOT NULL, 35 | description TEXT, 36 | location TEXT, 37 | start_time TIMESTAMP WITH TIME ZONE NOT NULL, 38 | end_time TIMESTAMP WITH TIME ZONE NOT NULL, 39 | all_day BOOLEAN NOT NULL DEFAULT FALSE, 40 | rrule TEXT, -- Recurrence rule 41 | ical_uid VARCHAR(255) NOT NULL, -- UID from iCalendar format 42 | ical_data TEXT NOT NULL, -- Complete iCalendar data 43 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 44 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 45 | UNIQUE(calendar_id, ical_uid) 46 | ); 47 | 48 | -- Calendar sharing table 49 | CREATE TABLE IF NOT EXISTS caldav.calendar_shares ( 50 | id SERIAL PRIMARY KEY, 51 | calendar_id UUID NOT NULL REFERENCES caldav.calendars(id) ON DELETE CASCADE, 52 | user_id VARCHAR(36) NOT NULL REFERENCES auth.users(id) ON DELETE CASCADE, 53 | access_level VARCHAR(50) NOT NULL, -- 'read', 'write', 'owner' 54 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 55 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 56 | UNIQUE(calendar_id, user_id) 57 | ); 58 | 59 | -- Create indexes for efficient querying 60 | CREATE INDEX IF NOT EXISTS idx_calendar_owner ON caldav.calendars(owner_id); 61 | CREATE INDEX IF NOT EXISTS idx_calendar_public ON caldav.calendars(is_public); 62 | CREATE INDEX IF NOT EXISTS idx_calendar_properties_calendar ON caldav.calendar_properties(calendar_id); 63 | CREATE INDEX IF NOT EXISTS idx_calendar_event_calendar ON caldav.calendar_events(calendar_id); 64 | CREATE INDEX IF NOT EXISTS idx_calendar_event_time_range ON caldav.calendar_events(start_time, end_time); 65 | CREATE INDEX IF NOT EXISTS idx_calendar_event_uid ON caldav.calendar_events(ical_uid); 66 | CREATE INDEX IF NOT EXISTS idx_calendar_shares_calendar ON caldav.calendar_shares(calendar_id); 67 | CREATE INDEX IF NOT EXISTS idx_calendar_shares_user ON caldav.calendar_shares(user_id); 68 | 69 | COMMENT ON TABLE caldav.calendars IS 'Stores calendar information for CalDAV support'; 70 | COMMENT ON TABLE caldav.calendar_properties IS 'Stores custom properties for calendars'; 71 | COMMENT ON TABLE caldav.calendar_events IS 'Stores calendar events with iCalendar data'; 72 | COMMENT ON TABLE caldav.calendar_shares IS 'Tracks calendar sharing between users'; -------------------------------------------------------------------------------- /migrations/20250415000000_carddav_schema.sql: -------------------------------------------------------------------------------- 1 | -- Create the carddav schema if it doesn't exist 2 | CREATE SCHEMA IF NOT EXISTS carddav; 3 | 4 | -- Address books table 5 | CREATE TABLE IF NOT EXISTS carddav.address_books ( 6 | id UUID PRIMARY KEY, 7 | name VARCHAR(255) NOT NULL, 8 | owner_id VARCHAR(36) NOT NULL REFERENCES auth.users(id) ON DELETE CASCADE, 9 | description TEXT, 10 | color VARCHAR(50), 11 | is_public BOOLEAN NOT NULL DEFAULT FALSE, 12 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 13 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 14 | UNIQUE(owner_id, name) 15 | ); 16 | 17 | -- Address book sharing 18 | CREATE TABLE IF NOT EXISTS carddav.address_book_shares ( 19 | address_book_id UUID NOT NULL REFERENCES carddav.address_books(id) ON DELETE CASCADE, 20 | user_id VARCHAR(36) NOT NULL REFERENCES auth.users(id) ON DELETE CASCADE, 21 | can_write BOOLEAN NOT NULL DEFAULT FALSE, 22 | PRIMARY KEY(address_book_id, user_id) 23 | ); 24 | 25 | -- Contacts table 26 | CREATE TABLE IF NOT EXISTS carddav.contacts ( 27 | id UUID PRIMARY KEY, 28 | address_book_id UUID NOT NULL REFERENCES carddav.address_books(id) ON DELETE CASCADE, 29 | uid VARCHAR(255) NOT NULL, 30 | full_name VARCHAR(255), 31 | first_name VARCHAR(255), 32 | last_name VARCHAR(255), 33 | nickname VARCHAR(255), 34 | email JSONB, 35 | phone JSONB, 36 | address JSONB, 37 | organization VARCHAR(255), 38 | title VARCHAR(255), 39 | notes TEXT, 40 | photo_url TEXT, 41 | birthday DATE, 42 | anniversary DATE, 43 | vcard TEXT NOT NULL, 44 | etag VARCHAR(255) NOT NULL, 45 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 46 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 47 | UNIQUE(address_book_id, uid) 48 | ); 49 | 50 | -- Contact groups 51 | CREATE TABLE IF NOT EXISTS carddav.contact_groups ( 52 | id UUID PRIMARY KEY, 53 | address_book_id UUID NOT NULL REFERENCES carddav.address_books(id) ON DELETE CASCADE, 54 | name VARCHAR(255) NOT NULL, 55 | created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 56 | updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP 57 | ); 58 | 59 | -- Group memberships 60 | CREATE TABLE IF NOT EXISTS carddav.group_memberships ( 61 | group_id UUID NOT NULL REFERENCES carddav.contact_groups(id) ON DELETE CASCADE, 62 | contact_id UUID NOT NULL REFERENCES carddav.contacts(id) ON DELETE CASCADE, 63 | PRIMARY KEY(group_id, contact_id) 64 | ); 65 | 66 | -- Create indexes for better performance 67 | CREATE INDEX IF NOT EXISTS idx_contacts_address_book_id ON carddav.contacts(address_book_id); 68 | CREATE INDEX IF NOT EXISTS idx_contacts_uid ON carddav.contacts(uid); 69 | CREATE INDEX IF NOT EXISTS idx_contacts_updated_at ON carddav.contacts(updated_at); 70 | CREATE INDEX IF NOT EXISTS idx_address_books_owner_id ON carddav.address_books(owner_id); 71 | CREATE INDEX IF NOT EXISTS idx_group_memberships_group_id ON carddav.group_memberships(group_id); 72 | CREATE INDEX IF NOT EXISTS idx_group_memberships_contact_id ON carddav.group_memberships(contact_id); -------------------------------------------------------------------------------- /rootless-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: postgres:17.4-alpine 4 | restart: always 5 | environment: 6 | POSTGRES_USER: postgres 7 | POSTGRES_PASSWORD: postgres 8 | POSTGRES_DB: oxicloud 9 | # ports: 10 | # - "5432:5432" 11 | networks: 12 | - oxicloud 13 | volumes: 14 | - pg_data:/var/lib/postgresql/data 15 | - ./db/schema.sql:/docker-entrypoint-initdb.d/10-schema.sql 16 | healthcheck: 17 | test: ["CMD-SHELL", "pg_isready -U postgres"] 18 | interval: 5s 19 | timeout: 5s 20 | retries: 5 21 | 22 | oxicloud: 23 | image: oxicloud-rootless 24 | restart: always 25 | build: 26 | context: . 27 | dockerfile: Dockerfile.rootless 28 | ports: 29 | - "8086:8086" 30 | - "8085:8085" 31 | networks: 32 | - oxicloud 33 | depends_on: 34 | - postgres 35 | environment: 36 | - "OXICLOUD_DB_CONNECTION_STRING=postgres://postgres:postgres@postgres/oxicloud" 37 | # Ensure the container runs with the non-root user (UID:GID 10001:10001) 38 | user: "10001:10001" 39 | volumes: 40 | - storage_data:/app/storage 41 | 42 | networks: 43 | oxicloud: 44 | driver: bridge 45 | 46 | volumes: 47 | pg_data: 48 | storage_data: 49 | -------------------------------------------------------------------------------- /scripts/reset_admin.sql: -------------------------------------------------------------------------------- 1 | -- Script to safely reset the admin user in OxiCloud 2 | -- Run this script to delete the existing admin user if you're having issues creating one 3 | 4 | -- Set the correct schema 5 | SET search_path TO auth; 6 | 7 | -- Delete the admin user if it exists 8 | DELETE FROM auth.users WHERE username = 'admin'; 9 | 10 | -- Check if the user was deleted 11 | SELECT 'Admin user has been removed successfully. You can now create a new admin user.' AS message 12 | WHERE NOT EXISTS (SELECT 1 FROM auth.users WHERE username = 'admin'); 13 | 14 | -- Check if there are still users in the system 15 | SELECT 'Warning: No users remain in the system. You should register a new admin user.' AS warning 16 | WHERE NOT EXISTS (SELECT 1 FROM auth.users LIMIT 1); 17 | 18 | -- Output remaining users for verification 19 | SELECT username, email, role FROM auth.users ORDER BY role, username; -------------------------------------------------------------------------------- /src/application/adapters/mod.rs: -------------------------------------------------------------------------------- 1 | //! Adapters module for translating between external protocols and internal models 2 | 3 | pub mod webdav_adapter; 4 | pub mod caldav_adapter; 5 | -------------------------------------------------------------------------------- /src/application/dtos/address_book_dto.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use serde::{Deserialize, Serialize}; 3 | use crate::domain::entities::contact::AddressBook; 4 | 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct AddressBookDto { 7 | pub id: String, 8 | pub name: String, 9 | pub owner_id: String, 10 | pub description: Option, 11 | pub color: Option, 12 | pub is_public: bool, 13 | pub created_at: DateTime, 14 | pub updated_at: DateTime, 15 | } 16 | 17 | impl Default for AddressBookDto { 18 | fn default() -> Self { 19 | Self { 20 | id: uuid::Uuid::new_v4().to_string(), 21 | name: "Default Address Book".to_string(), 22 | owner_id: "default".to_string(), 23 | description: None, 24 | color: None, 25 | is_public: false, 26 | created_at: Utc::now(), 27 | updated_at: Utc::now(), 28 | } 29 | } 30 | } 31 | 32 | impl From for AddressBookDto { 33 | fn from(book: AddressBook) -> Self { 34 | Self { 35 | id: book.id.to_string(), 36 | name: book.name, 37 | owner_id: book.owner_id, 38 | description: book.description, 39 | color: book.color, 40 | is_public: book.is_public, 41 | created_at: book.created_at, 42 | updated_at: book.updated_at, 43 | } 44 | } 45 | } 46 | 47 | #[derive(Debug, Clone, Serialize, Deserialize)] 48 | pub struct CreateAddressBookDto { 49 | pub name: String, 50 | pub owner_id: String, 51 | pub description: Option, 52 | pub color: Option, 53 | pub is_public: Option, 54 | } 55 | 56 | #[derive(Debug, Clone, Serialize, Deserialize)] 57 | pub struct UpdateAddressBookDto { 58 | pub name: Option, 59 | pub description: Option, 60 | pub color: Option, 61 | pub is_public: Option, 62 | pub user_id: String, // Current user making the update 63 | } 64 | 65 | #[derive(Debug, Clone, Serialize, Deserialize)] 66 | pub struct ShareAddressBookDto { 67 | pub address_book_id: String, 68 | pub user_id: String, 69 | pub can_write: bool, 70 | } 71 | 72 | #[derive(Debug, Clone, Serialize, Deserialize)] 73 | pub struct UnshareAddressBookDto { 74 | pub address_book_id: String, 75 | pub user_id: String, 76 | } -------------------------------------------------------------------------------- /src/application/dtos/favorites_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use chrono::{DateTime, Utc}; 3 | 4 | /// DTO for favorites item 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct FavoriteItemDto { 7 | /// Unique identifier for the favorite entry 8 | pub id: String, 9 | 10 | /// User ID who owns this favorite 11 | pub user_id: String, 12 | 13 | /// ID of the favorited item (file or folder) 14 | pub item_id: String, 15 | 16 | /// Type of the item ('file' or 'folder') 17 | pub item_type: String, 18 | 19 | /// When the item was added to favorites 20 | pub created_at: DateTime, 21 | } -------------------------------------------------------------------------------- /src/application/dtos/file_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Serialize, Deserialize}; 2 | use crate::domain::entities::file::File; 3 | 4 | /// DTO for file responses 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct FileDto { 7 | /// File ID 8 | pub id: String, 9 | 10 | /// File name 11 | pub name: String, 12 | 13 | /// Path to the file (relative) 14 | pub path: String, 15 | 16 | /// Size in bytes 17 | pub size: u64, 18 | 19 | /// MIME type 20 | pub mime_type: String, 21 | 22 | /// Parent folder ID 23 | pub folder_id: Option, 24 | 25 | /// Creation timestamp 26 | pub created_at: u64, 27 | 28 | /// Last modification timestamp 29 | pub modified_at: u64, 30 | } 31 | 32 | impl From for FileDto { 33 | fn from(file: File) -> Self { 34 | Self { 35 | id: file.id().to_string(), 36 | name: file.name().to_string(), 37 | path: file.path_string().to_string(), 38 | size: file.size(), 39 | mime_type: file.mime_type().to_string(), 40 | folder_id: file.folder_id().map(String::from), 41 | created_at: file.created_at(), 42 | modified_at: file.modified_at(), 43 | } 44 | } 45 | } 46 | 47 | // Para convertir de FileDto a File para los batch handlers 48 | impl From for File { 49 | fn from(dto: FileDto) -> Self { 50 | // Usar constructor para crear una entidad desde DTO 51 | // Nota: esto debe simplificarse si File tiene un constructor adecuado 52 | // Si no, deberías hacer la conversión de la mejor manera posible 53 | File::from_dto( 54 | dto.id, 55 | dto.name, 56 | dto.path, 57 | dto.size, 58 | dto.mime_type, 59 | dto.folder_id, 60 | dto.created_at, 61 | dto.modified_at 62 | ) 63 | } 64 | } 65 | 66 | impl FileDto { 67 | /// Creates an empty file DTO for stub implementations 68 | pub fn empty() -> Self { 69 | Self { 70 | id: "stub-id".to_string(), 71 | name: "stub-file".to_string(), 72 | path: "/stub/path".to_string(), 73 | size: 0, 74 | mime_type: "application/octet-stream".to_string(), 75 | folder_id: None, 76 | created_at: 0, 77 | modified_at: 0, 78 | } 79 | } 80 | } 81 | 82 | impl Default for FileDto { 83 | fn default() -> Self { 84 | Self::empty() 85 | } 86 | } -------------------------------------------------------------------------------- /src/application/dtos/folder_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Serialize, Deserialize}; 2 | use crate::domain::entities::folder::Folder; 3 | 4 | /// DTO for folder creation requests 5 | #[derive(Debug, Deserialize)] 6 | pub struct CreateFolderDto { 7 | /// Name of the folder to create 8 | pub name: String, 9 | 10 | /// Parent folder ID (None for root level) 11 | pub parent_id: Option, 12 | } 13 | 14 | /// DTO for folder rename requests 15 | #[derive(Debug, Deserialize)] 16 | pub struct RenameFolderDto { 17 | /// New name for the folder 18 | pub name: String, 19 | } 20 | 21 | /// DTO for folder move requests 22 | #[derive(Debug, Deserialize)] 23 | pub struct MoveFolderDto { 24 | /// New parent folder ID (None for root level) 25 | pub parent_id: Option, 26 | } 27 | 28 | /// DTO for folder responses 29 | #[derive(Debug, Clone, Serialize, Deserialize)] 30 | pub struct FolderDto { 31 | /// Folder ID 32 | pub id: String, 33 | 34 | /// Folder name 35 | pub name: String, 36 | 37 | /// Path to the folder (relative) 38 | pub path: String, 39 | 40 | /// Parent folder ID 41 | pub parent_id: Option, 42 | 43 | /// Creation timestamp 44 | pub created_at: u64, 45 | 46 | /// Last modification timestamp 47 | pub modified_at: u64, 48 | 49 | /// Whether this is a root folder 50 | pub is_root: bool, 51 | } 52 | 53 | impl From for FolderDto { 54 | fn from(folder: Folder) -> Self { 55 | let is_root = folder.parent_id().is_none(); 56 | 57 | Self { 58 | id: folder.id().to_string(), 59 | name: folder.name().to_string(), 60 | path: folder.path_string().to_string(), 61 | parent_id: folder.parent_id().map(String::from), 62 | created_at: folder.created_at(), 63 | modified_at: folder.modified_at(), 64 | is_root, 65 | } 66 | } 67 | } 68 | 69 | // Para convertir de FolderDto a Folder para los batch handlers 70 | impl From for Folder { 71 | fn from(dto: FolderDto) -> Self { 72 | // Usar constructor para crear una entidad desde DTO 73 | // Nota: esto debe simplificarse si Folder tiene un constructor adecuado 74 | Folder::from_dto( 75 | dto.id, 76 | dto.name, 77 | dto.path, 78 | dto.parent_id, 79 | dto.created_at, 80 | dto.modified_at 81 | ) 82 | } 83 | } 84 | 85 | impl FolderDto { 86 | /// Creates an empty folder DTO for stub implementations 87 | pub fn empty() -> Self { 88 | Self { 89 | id: "stub-id".to_string(), 90 | name: "stub-folder".to_string(), 91 | path: "/stub/path".to_string(), 92 | parent_id: None, 93 | created_at: 0, 94 | modified_at: 0, 95 | is_root: true, 96 | } 97 | } 98 | } 99 | 100 | impl Default for FolderDto { 101 | fn default() -> Self { 102 | Self::empty() 103 | } 104 | } -------------------------------------------------------------------------------- /src/application/dtos/i18n_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Serialize, Deserialize}; 2 | use crate::domain::services::i18n_service::Locale; 3 | 4 | /// DTO for locale information 5 | #[derive(Debug, Serialize, Deserialize)] 6 | pub struct LocaleDto { 7 | /// Locale code (e.g., "en", "es") 8 | pub code: String, 9 | 10 | /// Locale name in its own language (e.g., "English", "Español") 11 | pub name: String, 12 | } 13 | 14 | impl From for LocaleDto { 15 | fn from(locale: Locale) -> Self { 16 | let (code, name) = match locale { 17 | Locale::English => ("en", "English"), 18 | Locale::Spanish => ("es", "Español"), 19 | }; 20 | 21 | Self { 22 | code: code.to_string(), 23 | name: name.to_string(), 24 | } 25 | } 26 | } 27 | 28 | /// DTO for translation request 29 | #[derive(Debug, Deserialize)] 30 | pub struct TranslationRequestDto { 31 | /// The translation key 32 | pub key: String, 33 | 34 | /// The locale code (optional, defaults to "en") 35 | pub locale: Option, 36 | } 37 | 38 | /// DTO for translation response 39 | #[derive(Debug, Serialize)] 40 | pub struct TranslationResponseDto { 41 | /// The translation key 42 | pub key: String, 43 | 44 | /// The locale code used for translation 45 | pub locale: String, 46 | 47 | /// The translated text 48 | pub text: String, 49 | } 50 | 51 | /// DTO for translation error 52 | #[derive(Debug, Serialize)] 53 | pub struct TranslationErrorDto { 54 | /// The translation key that was not found 55 | pub key: String, 56 | 57 | /// The locale code used for translation 58 | pub locale: String, 59 | 60 | /// The error message 61 | pub error: String, 62 | } -------------------------------------------------------------------------------- /src/application/dtos/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod address_book_dto; 2 | pub mod calendar_dto; 3 | pub mod contact_dto; 4 | pub mod favorites_dto; 5 | pub mod file_dto; 6 | pub mod folder_dto; 7 | pub mod i18n_dto; 8 | pub mod pagination; 9 | pub mod recent_dto; 10 | pub mod search_dto; 11 | pub mod share_dto; 12 | pub mod trash_dto; 13 | pub mod user_dto; 14 | 15 | -------------------------------------------------------------------------------- /src/application/dtos/pagination.rs: -------------------------------------------------------------------------------- 1 | use serde::{Serialize, Deserialize}; 2 | 3 | /// Un DTO para representar información de paginación 4 | #[derive(Debug, Clone, Serialize, Deserialize)] 5 | pub struct PaginationDto { 6 | /// Página actual (comienza en 0) 7 | pub page: usize, 8 | /// Tamaño de página 9 | pub page_size: usize, 10 | /// Número total de elementos 11 | pub total_items: usize, 12 | /// Número total de páginas 13 | pub total_pages: usize, 14 | /// Indica si hay una página siguiente 15 | pub has_next: bool, 16 | /// Indica si hay una página anterior 17 | pub has_prev: bool, 18 | } 19 | 20 | /// Un DTO para representar una solicitud de paginación 21 | #[derive(Debug, Clone, Serialize, Deserialize)] 22 | pub struct PaginationRequestDto { 23 | /// Página solicitada (comienza en 0) 24 | #[serde(default)] 25 | pub page: usize, 26 | /// Tamaño de página solicitado 27 | #[serde(default = "default_page_size")] 28 | pub page_size: usize, 29 | } 30 | 31 | /// Un DTO para representar una respuesta paginada 32 | #[derive(Debug, Clone, Serialize, Deserialize)] 33 | pub struct PaginatedResponseDto { 34 | /// Datos en la página actual 35 | pub items: Vec, 36 | /// Información de paginación 37 | pub pagination: PaginationDto, 38 | } 39 | 40 | impl Default for PaginationRequestDto { 41 | fn default() -> Self { 42 | Self { 43 | page: 0, 44 | page_size: default_page_size(), 45 | } 46 | } 47 | } 48 | 49 | /// Función para establecer el tamaño de página por defecto 50 | fn default_page_size() -> usize { 51 | 100 // Por defecto, 100 items por página 52 | } 53 | 54 | impl PaginationRequestDto { 55 | /// Calcula el offset para consultas paginadas 56 | pub fn offset(&self) -> usize { 57 | self.page * self.page_size 58 | } 59 | 60 | /// Calcula el límite para consultas paginadas 61 | pub fn limit(&self) -> usize { 62 | self.page_size 63 | } 64 | 65 | /// Valida y ajusta los parámetros de paginación 66 | pub fn validate_and_adjust(&self) -> Self { 67 | let mut page = self.page; 68 | let mut page_size = self.page_size; 69 | 70 | // Asegurar que la página sea al menos 0 71 | if page < 1 { 72 | page = 0; 73 | } 74 | 75 | // Asegurar que el tamaño de página esté entre 10 y 500 76 | if page_size < 10 { 77 | page_size = 10; 78 | } else if page_size > 500 { 79 | page_size = 500; 80 | } 81 | 82 | Self { 83 | page, 84 | page_size, 85 | } 86 | } 87 | } 88 | 89 | impl PaginatedResponseDto { 90 | /// Crea una nueva respuesta paginada a partir de los datos y la información de paginación 91 | pub fn new( 92 | items: Vec, 93 | page: usize, 94 | page_size: usize, 95 | total_items: usize, 96 | ) -> Self { 97 | let total_pages = if total_items == 0 { 98 | 0 99 | } else { 100 | (total_items + page_size - 1) / page_size 101 | }; 102 | 103 | let pagination = PaginationDto { 104 | page, 105 | page_size, 106 | total_items, 107 | total_pages, 108 | has_next: page < total_pages - 1, 109 | has_prev: page > 0, 110 | }; 111 | 112 | Self { 113 | items, 114 | pagination, 115 | } 116 | } 117 | } -------------------------------------------------------------------------------- /src/application/dtos/recent_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use chrono::{DateTime, Utc}; 3 | 4 | /// DTO para elementos recientes 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct RecentItemDto { 7 | /// Identificador único para el elemento reciente 8 | pub id: String, 9 | 10 | /// ID del usuario propietario 11 | pub user_id: String, 12 | 13 | /// ID del elemento (archivo o carpeta) 14 | pub item_id: String, 15 | 16 | /// Tipo del elemento ('file' o 'folder') 17 | pub item_type: String, 18 | 19 | /// Cuándo se accedió al elemento 20 | pub accessed_at: DateTime, 21 | } -------------------------------------------------------------------------------- /src/application/dtos/share_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::domain::entities::share::{Share, SharePermissions}; 4 | 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct ShareDto { 7 | pub id: String, 8 | pub item_id: String, 9 | pub item_type: String, 10 | pub token: String, 11 | pub url: String, 12 | pub has_password: bool, 13 | pub expires_at: Option, 14 | pub permissions: SharePermissionsDto, 15 | pub created_at: u64, 16 | pub created_by: String, 17 | pub access_count: u64, 18 | } 19 | 20 | #[derive(Debug, Clone, Serialize, Deserialize)] 21 | pub struct SharePermissionsDto { 22 | pub read: bool, 23 | pub write: bool, 24 | pub reshare: bool, 25 | } 26 | 27 | #[derive(Debug, Clone, Serialize, Deserialize)] 28 | pub struct CreateShareDto { 29 | pub item_id: String, 30 | pub item_type: String, 31 | pub password: Option, 32 | pub expires_at: Option, 33 | pub permissions: Option, 34 | } 35 | 36 | #[derive(Debug, Clone, Serialize, Deserialize)] 37 | pub struct UpdateShareDto { 38 | pub password: Option, 39 | pub expires_at: Option, 40 | pub permissions: Option, 41 | } 42 | 43 | /// Extension methods to convert between DTOs and domain entities 44 | impl ShareDto { 45 | pub fn from_entity(share: &Share, base_url: &str) -> Self { 46 | let url = format!("{}/s/{}", base_url, share.token); 47 | 48 | Self { 49 | id: share.id.clone(), 50 | item_id: share.item_id.clone(), 51 | item_type: share.item_type.to_string(), 52 | token: share.token.clone(), 53 | url, 54 | has_password: share.password_hash.is_some(), 55 | expires_at: share.expires_at, 56 | permissions: SharePermissionsDto::from_entity(&share.permissions), 57 | created_at: share.created_at, 58 | created_by: share.created_by.clone(), 59 | access_count: share.access_count, 60 | } 61 | } 62 | } 63 | 64 | impl SharePermissionsDto { 65 | pub fn from_entity(permissions: &SharePermissions) -> Self { 66 | Self { 67 | read: permissions.read, 68 | write: permissions.write, 69 | reshare: permissions.reshare, 70 | } 71 | } 72 | 73 | pub fn to_entity(&self) -> SharePermissions { 74 | SharePermissions::new(self.read, self.write, self.reshare) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/application/dtos/trash_dto.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | /// DTO representing an item in the trash 5 | #[derive(Debug, Serialize, Deserialize)] 6 | pub struct TrashedItemDto { 7 | pub id: String, 8 | pub original_id: String, 9 | pub item_type: String, // "file" o "folder" 10 | pub name: String, 11 | pub original_path: String, 12 | pub trashed_at: DateTime, 13 | pub days_until_deletion: i64, 14 | } 15 | 16 | /// Request to move an item to trash 17 | #[derive(Debug, Deserialize)] 18 | pub struct MoveToTrashRequest { 19 | pub item_id: String, 20 | pub item_type: String, // "file" o "folder" 21 | } 22 | 23 | /// Request to restore an item from trash 24 | #[derive(Debug, Deserialize)] 25 | pub struct RestoreFromTrashRequest { 26 | pub trash_id: String, 27 | } 28 | 29 | /// Request to permanently delete an item from trash 30 | #[derive(Debug, Deserialize)] 31 | pub struct DeletePermanentlyRequest { 32 | pub trash_id: String, 33 | } -------------------------------------------------------------------------------- /src/application/dtos/user_dto.rs: -------------------------------------------------------------------------------- 1 | use serde::{Serialize, Deserialize}; 2 | use chrono::{DateTime, Utc}; 3 | use crate::domain::entities::user::User; 4 | 5 | #[derive(Debug, Serialize, Deserialize)] 6 | pub struct UserDto { 7 | pub id: String, 8 | pub username: String, 9 | pub email: String, 10 | pub role: String, 11 | pub storage_quota_bytes: i64, 12 | pub storage_used_bytes: i64, 13 | pub created_at: DateTime, 14 | pub updated_at: DateTime, 15 | pub last_login_at: Option>, 16 | pub active: bool, 17 | } 18 | 19 | impl From for UserDto { 20 | fn from(user: User) -> Self { 21 | Self { 22 | id: user.id().to_string(), 23 | username: user.username().to_string(), 24 | email: user.email().to_string(), 25 | role: format!("{}", user.role()), 26 | storage_quota_bytes: user.storage_quota_bytes(), 27 | storage_used_bytes: user.storage_used_bytes(), 28 | created_at: user.created_at(), 29 | updated_at: user.updated_at(), 30 | last_login_at: user.last_login_at(), 31 | active: user.is_active(), 32 | } 33 | } 34 | } 35 | 36 | #[derive(Debug, Serialize, Deserialize, Clone)] 37 | pub struct LoginDto { 38 | pub username: String, 39 | pub password: String, 40 | } 41 | 42 | #[derive(Debug, Serialize, Deserialize, Clone)] 43 | pub struct RegisterDto { 44 | pub username: String, 45 | pub email: String, 46 | pub password: String, 47 | pub role: Option, 48 | } 49 | 50 | #[derive(Debug, Serialize, Deserialize)] 51 | pub struct AuthResponseDto { 52 | pub user: UserDto, 53 | pub access_token: String, 54 | pub refresh_token: String, 55 | pub token_type: String, 56 | pub expires_in: i64, 57 | } 58 | 59 | #[derive(Debug, Serialize, Deserialize)] 60 | pub struct ChangePasswordDto { 61 | pub current_password: String, 62 | pub new_password: String, 63 | } 64 | 65 | #[derive(Debug, Serialize, Deserialize)] 66 | pub struct RefreshTokenDto { 67 | pub refresh_token: String, 68 | } -------------------------------------------------------------------------------- /src/application/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod dtos; 2 | pub mod ports; 3 | pub mod services; 4 | pub mod transactions; 5 | pub mod adapters; 6 | 7 | // Re-exportaciones para facilitar el acceso a los principales puertos -------------------------------------------------------------------------------- /src/application/ports/auth_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::domain::entities::user::User; 3 | use crate::domain::entities::session::Session; 4 | use crate::common::errors::DomainError; 5 | 6 | #[async_trait] 7 | pub trait UserStoragePort: Send + Sync + 'static { 8 | /// Crea un nuevo usuario 9 | async fn create_user(&self, user: User) -> Result; 10 | 11 | /// Obtiene un usuario por ID 12 | async fn get_user_by_id(&self, id: &str) -> Result; 13 | 14 | /// Obtiene un usuario por nombre de usuario 15 | async fn get_user_by_username(&self, username: &str) -> Result; 16 | 17 | /// Obtiene un usuario por correo electrónico 18 | async fn get_user_by_email(&self, email: &str) -> Result; 19 | 20 | /// Actualiza un usuario existente 21 | async fn update_user(&self, user: User) -> Result; 22 | 23 | /// Actualiza solo el uso de almacenamiento de un usuario 24 | async fn update_storage_usage(&self, user_id: &str, usage_bytes: i64) -> Result<(), DomainError>; 25 | 26 | /// Lista usuarios con paginación 27 | async fn list_users(&self, limit: i64, offset: i64) -> Result, DomainError>; 28 | 29 | /// Lista usuarios por rol (por ejemplo, "admin" o "user") 30 | async fn list_users_by_role(&self, role: &str) -> Result, DomainError>; 31 | 32 | /// Elimina un usuario por su ID 33 | async fn delete_user(&self, user_id: &str) -> Result<(), DomainError>; 34 | 35 | /// Cambia la contraseña de un usuario 36 | async fn change_password(&self, user_id: &str, password_hash: &str) -> Result<(), DomainError>; 37 | } 38 | 39 | #[async_trait] 40 | pub trait SessionStoragePort: Send + Sync + 'static { 41 | /// Crea una nueva sesión 42 | async fn create_session(&self, session: Session) -> Result; 43 | 44 | /// Obtiene una sesión por token de actualización 45 | async fn get_session_by_refresh_token(&self, refresh_token: &str) -> Result; 46 | 47 | /// Revoca una sesión específica 48 | async fn revoke_session(&self, session_id: &str) -> Result<(), DomainError>; 49 | 50 | /// Revoca todas las sesiones de un usuario 51 | async fn revoke_all_user_sessions(&self, user_id: &str) -> Result; 52 | } -------------------------------------------------------------------------------- /src/application/ports/calendar_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use chrono::{DateTime, Utc}; 3 | use crate::application::dtos::calendar_dto::{ 4 | CalendarDto, CalendarEventDto, CreateCalendarDto, UpdateCalendarDto, 5 | CreateEventDto, UpdateEventDto, CreateEventICalDto 6 | }; 7 | use crate::common::errors::DomainError; 8 | 9 | /// Port for external calendar storage mechanisms 10 | #[async_trait] 11 | pub trait CalendarStoragePort: Send + Sync + 'static { 12 | // Calendar operations 13 | async fn create_calendar(&self, calendar: CreateCalendarDto, owner_id: &str) -> Result; 14 | async fn update_calendar(&self, calendar_id: &str, update: UpdateCalendarDto) -> Result; 15 | async fn delete_calendar(&self, calendar_id: &str) -> Result<(), DomainError>; 16 | async fn get_calendar(&self, calendar_id: &str) -> Result; 17 | async fn list_calendars_by_owner(&self, owner_id: &str) -> Result, DomainError>; 18 | async fn list_calendars_shared_with_user(&self, user_id: &str) -> Result, DomainError>; 19 | async fn list_public_calendars(&self, limit: i64, offset: i64) -> Result, DomainError>; 20 | async fn check_calendar_access(&self, calendar_id: &str, user_id: &str) -> Result; 21 | 22 | // Calendar sharing 23 | async fn share_calendar(&self, calendar_id: &str, user_id: &str, access_level: &str) -> Result<(), DomainError>; 24 | async fn remove_calendar_sharing(&self, calendar_id: &str, user_id: &str) -> Result<(), DomainError>; 25 | async fn get_calendar_shares(&self, calendar_id: &str) -> Result, DomainError>; 26 | 27 | // Calendar properties 28 | async fn set_calendar_property(&self, calendar_id: &str, property_name: &str, property_value: &str) -> Result<(), DomainError>; 29 | async fn get_calendar_property(&self, calendar_id: &str, property_name: &str) -> Result, DomainError>; 30 | async fn get_calendar_properties(&self, calendar_id: &str) -> Result, DomainError>; 31 | 32 | // Event operations 33 | async fn create_event(&self, event: CreateEventDto) -> Result; 34 | async fn create_event_from_ical(&self, event: CreateEventICalDto) -> Result; 35 | async fn update_event(&self, event_id: &str, update: UpdateEventDto) -> Result; 36 | async fn delete_event(&self, event_id: &str) -> Result<(), DomainError>; 37 | async fn get_event(&self, event_id: &str) -> Result; 38 | async fn list_events_by_calendar(&self, calendar_id: &str) -> Result, DomainError>; 39 | async fn list_events_by_calendar_paginated(&self, calendar_id: &str, limit: i64, offset: i64) -> Result, DomainError>; 40 | async fn get_events_in_time_range( 41 | &self, 42 | calendar_id: &str, 43 | start: &DateTime, 44 | end: &DateTime 45 | ) -> Result, DomainError>; 46 | } 47 | 48 | /// Port for calendar use cases 49 | #[async_trait] 50 | pub trait CalendarUseCase: Send + Sync + 'static { 51 | // Calendar operations 52 | async fn create_calendar(&self, calendar: CreateCalendarDto) -> Result; 53 | async fn update_calendar(&self, calendar_id: &str, update: UpdateCalendarDto) -> Result; 54 | async fn delete_calendar(&self, calendar_id: &str) -> Result<(), DomainError>; 55 | async fn get_calendar(&self, calendar_id: &str) -> Result; 56 | async fn list_my_calendars(&self) -> Result, DomainError>; 57 | async fn list_shared_calendars(&self) -> Result, DomainError>; 58 | async fn list_public_calendars(&self, limit: Option, offset: Option) -> Result, DomainError>; 59 | 60 | // Calendar sharing 61 | async fn share_calendar(&self, calendar_id: &str, user_id: &str, access_level: &str) -> Result<(), DomainError>; 62 | async fn remove_calendar_sharing(&self, calendar_id: &str, user_id: &str) -> Result<(), DomainError>; 63 | async fn get_calendar_shares(&self, calendar_id: &str) -> Result, DomainError>; 64 | 65 | // Event operations 66 | async fn create_event(&self, event: CreateEventDto) -> Result; 67 | async fn create_event_from_ical(&self, event: CreateEventICalDto) -> Result; 68 | async fn update_event(&self, event_id: &str, update: UpdateEventDto) -> Result; 69 | async fn delete_event(&self, event_id: &str) -> Result<(), DomainError>; 70 | async fn get_event(&self, event_id: &str) -> Result; 71 | async fn list_events(&self, calendar_id: &str, limit: Option, offset: Option) -> Result, DomainError>; 72 | async fn get_events_in_range( 73 | &self, 74 | calendar_id: &str, 75 | start: DateTime, 76 | end: DateTime 77 | ) -> Result, DomainError>; 78 | } -------------------------------------------------------------------------------- /src/application/ports/carddav_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::common::errors::DomainError; 3 | use crate::application::dtos::address_book_dto::{ 4 | AddressBookDto, CreateAddressBookDto, UpdateAddressBookDto, 5 | ShareAddressBookDto, UnshareAddressBookDto 6 | }; 7 | use crate::application::dtos::contact_dto::{ 8 | ContactDto, CreateContactDto, UpdateContactDto, CreateContactVCardDto, 9 | ContactGroupDto, CreateContactGroupDto, UpdateContactGroupDto, GroupMembershipDto 10 | }; 11 | 12 | pub type CardDavRepositoryError = DomainError; 13 | 14 | #[async_trait] 15 | pub trait AddressBookUseCase: Send + Sync + 'static { 16 | // Address Book operations 17 | async fn create_address_book(&self, dto: CreateAddressBookDto) -> Result; 18 | async fn update_address_book(&self, address_book_id: &str, update: UpdateAddressBookDto) -> Result; 19 | async fn delete_address_book(&self, address_book_id: &str, user_id: &str) -> Result<(), DomainError>; 20 | async fn get_address_book(&self, address_book_id: &str, user_id: &str) -> Result; 21 | async fn list_user_address_books(&self, user_id: &str) -> Result, DomainError>; 22 | async fn list_public_address_books(&self) -> Result, DomainError>; 23 | 24 | // Address Book sharing 25 | async fn share_address_book(&self, dto: ShareAddressBookDto, user_id: &str) -> Result<(), DomainError>; 26 | async fn unshare_address_book(&self, dto: UnshareAddressBookDto, user_id: &str) -> Result<(), DomainError>; 27 | async fn get_address_book_shares(&self, address_book_id: &str, user_id: &str) -> Result, DomainError>; 28 | } 29 | 30 | #[async_trait] 31 | pub trait ContactUseCase: Send + Sync + 'static { 32 | // Contact operations 33 | async fn create_contact(&self, dto: CreateContactDto) -> Result; 34 | async fn create_contact_from_vcard(&self, dto: CreateContactVCardDto) -> Result; 35 | async fn update_contact(&self, contact_id: &str, update: UpdateContactDto) -> Result; 36 | async fn delete_contact(&self, contact_id: &str, user_id: &str) -> Result<(), DomainError>; 37 | async fn get_contact(&self, contact_id: &str, user_id: &str) -> Result; 38 | async fn list_contacts(&self, address_book_id: &str, user_id: &str) -> Result, DomainError>; 39 | async fn search_contacts(&self, address_book_id: &str, query: &str, user_id: &str) -> Result, DomainError>; 40 | 41 | // Contact Group operations 42 | async fn create_group(&self, dto: CreateContactGroupDto) -> Result; 43 | async fn update_group(&self, group_id: &str, update: UpdateContactGroupDto) -> Result; 44 | async fn delete_group(&self, group_id: &str, user_id: &str) -> Result<(), DomainError>; 45 | async fn get_group(&self, group_id: &str, user_id: &str) -> Result; 46 | async fn list_groups(&self, address_book_id: &str, user_id: &str) -> Result, DomainError>; 47 | 48 | // Group membership 49 | async fn add_contact_to_group(&self, dto: GroupMembershipDto, user_id: &str) -> Result<(), DomainError>; 50 | async fn remove_contact_from_group(&self, dto: GroupMembershipDto, user_id: &str) -> Result<(), DomainError>; 51 | async fn list_contacts_in_group(&self, group_id: &str, user_id: &str) -> Result, DomainError>; 52 | async fn list_groups_for_contact(&self, contact_id: &str, user_id: &str) -> Result, DomainError>; 53 | 54 | // vCard operations 55 | async fn get_contact_vcard(&self, contact_id: &str, user_id: &str) -> Result; 56 | async fn get_contacts_as_vcards(&self, address_book_id: &str, user_id: &str) -> Result, DomainError>; 57 | } -------------------------------------------------------------------------------- /src/application/ports/favorites_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::common::errors::Result; 3 | use crate::application::dtos::favorites_dto::FavoriteItemDto; 4 | 5 | /// Defines operations for managing user favorites 6 | #[async_trait] 7 | pub trait FavoritesUseCase: Send + Sync { 8 | /// Get all favorites for a user 9 | async fn get_favorites(&self, user_id: &str) -> Result>; 10 | 11 | /// Add an item to user's favorites 12 | async fn add_to_favorites(&self, user_id: &str, item_id: &str, item_type: &str) -> Result<()>; 13 | 14 | /// Remove an item from user's favorites 15 | async fn remove_from_favorites(&self, user_id: &str, item_id: &str, item_type: &str) -> Result; 16 | 17 | /// Check if an item is in user's favorites 18 | async fn is_favorite(&self, user_id: &str, item_id: &str, item_type: &str) -> Result; 19 | } -------------------------------------------------------------------------------- /src/application/ports/file_ports.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use async_trait::async_trait; 3 | use bytes::Bytes; 4 | use futures::Stream; 5 | 6 | use crate::application::dtos::file_dto::FileDto; 7 | use crate::common::errors::DomainError; 8 | 9 | /// Puerto primario para operaciones de subida de archivos 10 | #[async_trait] 11 | pub trait FileUploadUseCase: Send + Sync + 'static { 12 | /// Sube un nuevo archivo desde bytes 13 | async fn upload_file( 14 | &self, 15 | name: String, 16 | folder_id: Option, 17 | content_type: String, 18 | content: Vec, 19 | ) -> Result; 20 | } 21 | 22 | /// Puerto primario para operaciones de recuperación de archivos 23 | #[async_trait] 24 | pub trait FileRetrievalUseCase: Send + Sync + 'static { 25 | /// Obtiene un archivo por su ID 26 | async fn get_file(&self, id: &str) -> Result; 27 | 28 | /// Lista archivos en una carpeta 29 | async fn list_files(&self, folder_id: Option<&str>) -> Result, DomainError>; 30 | 31 | /// Obtiene contenido de archivo como bytes (para archivos pequeños) 32 | async fn get_file_content(&self, id: &str) -> Result, DomainError>; 33 | 34 | /// Obtiene contenido de archivo como stream (para archivos grandes) 35 | async fn get_file_stream(&self, id: &str) -> Result> + Send>, DomainError>; 36 | } 37 | 38 | /// Puerto primario para operaciones de gestión de archivos 39 | #[async_trait] 40 | pub trait FileManagementUseCase: Send + Sync + 'static { 41 | /// Mueve un archivo a otra carpeta 42 | async fn move_file(&self, file_id: &str, folder_id: Option) -> Result; 43 | 44 | /// Elimina un archivo 45 | async fn delete_file(&self, id: &str) -> Result<(), DomainError>; 46 | } 47 | 48 | /// Factory para crear implementaciones de casos de uso de archivos 49 | pub trait FileUseCaseFactory: Send + Sync + 'static { 50 | fn create_file_upload_use_case(&self) -> Arc; 51 | fn create_file_retrieval_use_case(&self) -> Arc; 52 | fn create_file_management_use_case(&self) -> Arc; 53 | } -------------------------------------------------------------------------------- /src/application/ports/inbound.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use async_trait::async_trait; 3 | use bytes::Bytes; 4 | use futures::Stream; 5 | 6 | use crate::application::dtos::file_dto::FileDto; 7 | use crate::application::dtos::folder_dto::{CreateFolderDto, FolderDto, MoveFolderDto, RenameFolderDto}; 8 | use crate::application::dtos::search_dto::{SearchCriteriaDto, SearchResultsDto}; 9 | use crate::common::errors::DomainError; 10 | 11 | /// Puerto primario para operaciones de archivos 12 | #[async_trait] 13 | pub trait FileUseCase: Send + Sync + 'static { 14 | /// Sube un nuevo archivo desde bytes 15 | async fn upload_file( 16 | &self, 17 | name: String, 18 | folder_id: Option, 19 | content_type: String, 20 | content: Vec, 21 | ) -> Result; 22 | 23 | /// Obtiene un archivo por su ID 24 | async fn get_file(&self, id: &str) -> Result; 25 | 26 | /// Obtiene un archivo por su ruta (para WebDAV) 27 | async fn get_file_by_path(&self, path: &str) -> Result; 28 | 29 | /// Crea un nuevo archivo en la ruta especificada (para WebDAV) 30 | async fn create_file(&self, parent_path: &str, filename: &str, content: &[u8], content_type: &str) -> Result; 31 | 32 | /// Actualiza el contenido de un archivo existente (para WebDAV) 33 | async fn update_file(&self, path: &str, content: &[u8]) -> Result<(), DomainError>; 34 | 35 | /// Lista archivos en una carpeta 36 | async fn list_files(&self, folder_id: Option<&str>) -> Result, DomainError>; 37 | 38 | /// Elimina un archivo 39 | async fn delete_file(&self, id: &str) -> Result<(), DomainError>; 40 | 41 | /// Obtiene contenido de archivo como bytes (para archivos pequeños) 42 | async fn get_file_content(&self, id: &str) -> Result, DomainError>; 43 | 44 | /// Obtiene contenido de archivo como stream (para archivos grandes) 45 | async fn get_file_stream(&self, id: &str) -> Result> + Send>, DomainError>; 46 | 47 | /// Mueve un archivo a otra carpeta 48 | async fn move_file(&self, file_id: &str, folder_id: Option) -> Result; 49 | } 50 | 51 | /// Puerto primario para operaciones de carpetas 52 | #[async_trait] 53 | pub trait FolderUseCase: Send + Sync + 'static { 54 | /// Crea una nueva carpeta 55 | async fn create_folder(&self, dto: CreateFolderDto) -> Result; 56 | 57 | /// Obtiene una carpeta por su ID 58 | async fn get_folder(&self, id: &str) -> Result; 59 | 60 | /// Obtiene una carpeta por su ruta 61 | async fn get_folder_by_path(&self, path: &str) -> Result; 62 | 63 | /// Lista carpetas dentro de una carpeta padre 64 | async fn list_folders(&self, parent_id: Option<&str>) -> Result, DomainError>; 65 | 66 | /// Lista carpetas con paginación 67 | async fn list_folders_paginated( 68 | &self, 69 | parent_id: Option<&str>, 70 | pagination: &crate::application::dtos::pagination::PaginationRequestDto 71 | ) -> Result, DomainError>; 72 | 73 | /// Renombra una carpeta 74 | async fn rename_folder(&self, id: &str, dto: RenameFolderDto) -> Result; 75 | 76 | /// Mueve una carpeta a otro padre 77 | async fn move_folder(&self, id: &str, dto: MoveFolderDto) -> Result; 78 | 79 | /// Elimina una carpeta 80 | async fn delete_folder(&self, id: &str) -> Result<(), DomainError>; 81 | } 82 | 83 | /** 84 | * Puerto primario para búsqueda de archivos y carpetas 85 | * 86 | * Define las operaciones relacionadas con la búsqueda avanzada de 87 | * archivos y carpetas basándose en diversos criterios. 88 | */ 89 | #[async_trait] 90 | pub trait SearchUseCase: Send + Sync + 'static { 91 | /** 92 | * Realiza una búsqueda basada en los criterios especificados 93 | * 94 | * @param criteria Criterios de búsqueda que incluyen texto, fechas, tamaños, etc. 95 | * @return Resultados de la búsqueda que contienen archivos y carpetas coincidentes 96 | */ 97 | async fn search(&self, criteria: SearchCriteriaDto) -> Result; 98 | 99 | /** 100 | * Limpia la caché de resultados de búsqueda 101 | * 102 | * @return Resultado indicando éxito o error 103 | */ 104 | async fn clear_search_cache(&self) -> Result<(), DomainError>; 105 | } 106 | 107 | /// Factory para crear implementaciones de casos de uso 108 | pub trait UseCaseFactory { 109 | fn create_file_use_case(&self) -> Arc; 110 | fn create_folder_use_case(&self) -> Arc; 111 | fn create_search_use_case(&self) -> Arc; 112 | } -------------------------------------------------------------------------------- /src/application/ports/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod auth_ports; 2 | pub mod calendar_ports; 3 | pub mod carddav_ports; 4 | pub mod favorites_ports; 5 | pub mod file_ports; 6 | pub mod inbound; 7 | pub mod outbound; 8 | pub mod recent_ports; 9 | pub mod share_ports; 10 | pub mod storage_ports; 11 | pub mod trash_ports; -------------------------------------------------------------------------------- /src/application/ports/recent_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::common::errors::Result; 3 | use crate::application::dtos::recent_dto::RecentItemDto; 4 | 5 | /// Define operaciones para gestionar elementos recientes del usuario 6 | #[async_trait] 7 | pub trait RecentItemsUseCase: Send + Sync { 8 | /// Obtener todos los elementos recientes de un usuario 9 | async fn get_recent_items(&self, user_id: &str, limit: Option) -> Result>; 10 | 11 | /// Registrar acceso a un elemento 12 | async fn record_item_access(&self, user_id: &str, item_id: &str, item_type: &str) -> Result<()>; 13 | 14 | /// Eliminar un elemento de recientes 15 | async fn remove_from_recent(&self, user_id: &str, item_id: &str, item_type: &str) -> Result; 16 | 17 | /// Limpiar toda la lista de elementos recientes 18 | async fn clear_recent_items(&self, user_id: &str) -> Result<()>; 19 | } -------------------------------------------------------------------------------- /src/application/ports/share_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | use crate::{ 4 | application::dtos::{ 5 | pagination::PaginatedResponseDto, 6 | share_dto::{CreateShareDto, ShareDto, UpdateShareDto} 7 | }, 8 | common::errors::DomainError, 9 | domain::entities::share::ShareItemType, 10 | }; 11 | 12 | 13 | #[async_trait] 14 | pub trait ShareUseCase: Send + Sync + 'static { 15 | /// Create a new shared link for a file or folder 16 | async fn create_shared_link( 17 | &self, 18 | user_id: &str, 19 | dto: CreateShareDto, 20 | ) -> Result; 21 | 22 | /// Get a shared link by its ID 23 | async fn get_shared_link(&self, id: &str) -> Result; 24 | 25 | /// Get a shared link by its token (for access by non-users) 26 | async fn get_shared_link_by_token(&self, token: &str) -> Result; 27 | 28 | /// Get all shared links for a specific item 29 | async fn get_shared_links_for_item( 30 | &self, 31 | item_id: &str, 32 | item_type: &ShareItemType, 33 | ) -> Result, DomainError>; 34 | 35 | /// Update a shared link 36 | async fn update_shared_link( 37 | &self, 38 | id: &str, 39 | dto: UpdateShareDto, 40 | ) -> Result; 41 | 42 | /// Delete a shared link 43 | async fn delete_shared_link(&self, id: &str) -> Result<(), DomainError>; 44 | 45 | /// Get all shared links created by a specific user 46 | async fn get_user_shared_links( 47 | &self, 48 | user_id: &str, 49 | page: usize, 50 | per_page: usize, 51 | ) -> Result, DomainError>; 52 | 53 | /// Verify a password for a password-protected shared link 54 | async fn verify_shared_link_password( 55 | &self, 56 | token: &str, 57 | password: &str, 58 | ) -> Result; 59 | 60 | /// Register an access to a shared link 61 | async fn register_shared_link_access(&self, token: &str) -> Result<(), DomainError>; 62 | } 63 | 64 | #[async_trait] 65 | pub trait ShareStoragePort: Send + Sync + 'static { 66 | async fn save_share(&self, share: &crate::domain::entities::share::Share) 67 | -> Result; 68 | 69 | async fn find_share_by_id(&self, id: &str) 70 | -> Result; 71 | 72 | async fn find_share_by_token(&self, token: &str) 73 | -> Result; 74 | 75 | async fn find_shares_by_item(&self, item_id: &str, item_type: &ShareItemType) 76 | -> Result, DomainError>; 77 | 78 | async fn update_share(&self, share: &crate::domain::entities::share::Share) 79 | -> Result; 80 | 81 | async fn delete_share(&self, id: &str) -> Result<(), DomainError>; 82 | 83 | async fn find_shares_by_user(&self, user_id: &str, offset: usize, limit: usize) 84 | -> Result<(Vec, usize), DomainError>; 85 | } 86 | -------------------------------------------------------------------------------- /src/application/ports/storage_ports.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use async_trait::async_trait; 3 | use bytes::Bytes; 4 | use futures::Stream; 5 | use serde_json::Value; 6 | 7 | use crate::domain::entities::file::File; 8 | use crate::domain::services::path_service::StoragePath; 9 | use crate::common::errors::DomainError; 10 | 11 | /// Puerto secundario para lectura de archivos 12 | #[async_trait] 13 | pub trait FileReadPort: Send + Sync + 'static { 14 | /// Obtiene un archivo por su ID 15 | async fn get_file(&self, id: &str) -> Result; 16 | 17 | /// Lista archivos en una carpeta 18 | async fn list_files(&self, folder_id: Option<&str>) -> Result, DomainError>; 19 | 20 | /// Obtiene contenido de archivo como bytes 21 | async fn get_file_content(&self, id: &str) -> Result, DomainError>; 22 | 23 | /// Obtiene contenido de archivo como stream 24 | async fn get_file_stream(&self, id: &str) -> Result> + Send>, DomainError>; 25 | } 26 | 27 | /// Puerto secundario para escritura de archivos 28 | #[async_trait] 29 | pub trait FileWritePort: Send + Sync + 'static { 30 | /// Guarda un nuevo archivo desde bytes 31 | async fn save_file( 32 | &self, 33 | name: String, 34 | folder_id: Option, 35 | content_type: String, 36 | content: Vec, 37 | ) -> Result; 38 | 39 | /// Mueve un archivo a otra carpeta 40 | async fn move_file(&self, file_id: &str, target_folder_id: Option) -> Result; 41 | 42 | /// Elimina un archivo 43 | async fn delete_file(&self, id: &str) -> Result<(), DomainError>; 44 | 45 | /// Obtiene detalles de una carpeta 46 | async fn get_folder_details(&self, folder_id: &str) -> Result; 47 | 48 | /// Obtiene la ruta de una carpeta como string 49 | async fn get_folder_path_str(&self, folder_id: &str) -> Result; 50 | } 51 | 52 | /// Puerto secundario para resolución de rutas de archivos 53 | #[async_trait] 54 | pub trait FilePathResolutionPort: Send + Sync + 'static { 55 | /// Obtiene la ruta de almacenamiento de un archivo 56 | async fn get_file_path(&self, id: &str) -> Result; 57 | 58 | /// Resuelve una ruta de dominio a una ruta física 59 | fn resolve_path(&self, storage_path: &StoragePath) -> PathBuf; 60 | } 61 | 62 | /// Puerto secundario para verificación de existencia de archivos/directorios 63 | #[async_trait] 64 | pub trait StorageVerificationPort: Send + Sync + 'static { 65 | /// Verifica si existe un archivo en la ruta dada 66 | async fn file_exists(&self, storage_path: &StoragePath) -> Result; 67 | 68 | /// Verifica si existe un directorio en la ruta dada 69 | async fn directory_exists(&self, storage_path: &StoragePath) -> Result; 70 | } 71 | 72 | /// Puerto secundario para gestión de directorios 73 | #[async_trait] 74 | pub trait DirectoryManagementPort: Send + Sync + 'static { 75 | /// Crea directorios si no existen 76 | async fn ensure_directory(&self, storage_path: &StoragePath) -> Result<(), DomainError>; 77 | } 78 | 79 | /// Puerto secundario para gestión de uso de almacenamiento 80 | #[async_trait] 81 | pub trait StorageUsagePort: Send + Sync + 'static { 82 | /// Actualiza estadísticas de uso de almacenamiento para un usuario 83 | async fn update_user_storage_usage(&self, user_id: &str) -> Result; 84 | 85 | /// Actualiza estadísticas de uso de almacenamiento para todos los usuarios 86 | async fn update_all_users_storage_usage(&self) -> Result<(), DomainError>; 87 | } 88 | 89 | /// Generic storage service interface for calendar and contact services 90 | #[async_trait] 91 | pub trait StorageUseCase: Send + Sync + 'static { 92 | /// Handle a request with the specified action and parameters 93 | async fn handle_request(&self, action: &str, params: Value) -> Result; 94 | } -------------------------------------------------------------------------------- /src/application/ports/trash_ports.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | use crate::application::dtos::trash_dto::TrashedItemDto; 4 | use crate::common::errors::Result; 5 | 6 | /// Port for trash-related use cases 7 | #[async_trait] 8 | pub trait TrashUseCase: Send + Sync { 9 | /// List items in the user's trash 10 | async fn get_trash_items(&self, user_id: &str) -> Result>; 11 | 12 | /// Move a file or folder to trash 13 | async fn move_to_trash(&self, item_id: &str, item_type: &str, user_id: &str) -> Result<()>; 14 | 15 | /// Restore an item from trash to its original location 16 | async fn restore_item(&self, trash_id: &str, user_id: &str) -> Result<()>; 17 | 18 | /// Permanently delete an item from trash 19 | async fn delete_permanently(&self, trash_id: &str, user_id: &str) -> Result<()>; 20 | 21 | /// Empty the trash for a specific user 22 | async fn empty_trash(&self, user_id: &str) -> Result<()>; 23 | } -------------------------------------------------------------------------------- /src/application/services/file_management_service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use async_trait::async_trait; 3 | 4 | use crate::application::dtos::file_dto::FileDto; 5 | use crate::application::ports::file_ports::FileManagementUseCase; 6 | use crate::application::ports::storage_ports::FileWritePort; 7 | use crate::common::errors::DomainError; 8 | 9 | /// Service for file management operations 10 | pub struct FileManagementService { 11 | file_repository: Arc, 12 | } 13 | 14 | impl FileManagementService { 15 | /// Creates a new file management service 16 | pub fn new(file_repository: Arc) -> Self { 17 | Self { file_repository } 18 | } 19 | 20 | /// Creates a stub for testing 21 | pub fn default_stub() -> Self { 22 | Self { 23 | file_repository: Arc::new(crate::infrastructure::repositories::FileFsWriteRepository::default_stub()) 24 | } 25 | } 26 | } 27 | 28 | #[async_trait] 29 | impl FileManagementUseCase for FileManagementService { 30 | async fn move_file(&self, file_id: &str, folder_id: Option) -> Result { 31 | tracing::info!("Moving file with ID: {} to folder: {:?}", file_id, folder_id); 32 | 33 | let moved_file = self.file_repository.move_file(file_id, folder_id).await 34 | .map_err(|e| { 35 | tracing::error!("Error moving file (ID: {}): {}", file_id, e); 36 | e 37 | })?; 38 | 39 | tracing::info!("File moved successfully: {} (ID: {}) to folder: {:?}", 40 | moved_file.name(), moved_file.id(), moved_file.folder_id()); 41 | 42 | Ok(FileDto::from(moved_file)) 43 | } 44 | 45 | async fn delete_file(&self, id: &str) -> Result<(), DomainError> { 46 | self.file_repository.delete_file(id).await 47 | } 48 | } -------------------------------------------------------------------------------- /src/application/services/file_retrieval_service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use async_trait::async_trait; 3 | use bytes::Bytes; 4 | use futures::Stream; 5 | 6 | use crate::application::dtos::file_dto::FileDto; 7 | use crate::application::ports::file_ports::FileRetrievalUseCase; 8 | use crate::application::ports::storage_ports::FileReadPort; 9 | use crate::common::errors::DomainError; 10 | 11 | /// Servicio para operaciones de recuperación de archivos 12 | pub struct FileRetrievalService { 13 | file_repository: Arc, 14 | } 15 | 16 | impl FileRetrievalService { 17 | /// Crea un nuevo servicio de recuperación de archivos 18 | pub fn new(file_repository: Arc) -> Self { 19 | Self { file_repository } 20 | } 21 | 22 | /// Crea un stub para pruebas 23 | pub fn default_stub() -> Self { 24 | Self { 25 | file_repository: Arc::new(crate::infrastructure::repositories::FileFsReadRepository::default_stub()) 26 | } 27 | } 28 | } 29 | 30 | #[async_trait] 31 | impl FileRetrievalUseCase for FileRetrievalService { 32 | async fn get_file(&self, id: &str) -> Result { 33 | let file = self.file_repository.get_file(id).await?; 34 | Ok(FileDto::from(file)) 35 | } 36 | 37 | async fn list_files(&self, folder_id: Option<&str>) -> Result, DomainError> { 38 | let files = self.file_repository.list_files(folder_id).await?; 39 | Ok(files.into_iter().map(FileDto::from).collect()) 40 | } 41 | 42 | async fn get_file_content(&self, id: &str) -> Result, DomainError> { 43 | self.file_repository.get_file_content(id).await 44 | } 45 | 46 | async fn get_file_stream(&self, id: &str) -> Result> + Send>, DomainError> { 47 | self.file_repository.get_file_stream(id).await 48 | } 49 | } -------------------------------------------------------------------------------- /src/application/services/file_upload_service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use async_trait::async_trait; 3 | 4 | use crate::application::dtos::file_dto::FileDto; 5 | use crate::application::ports::file_ports::FileUploadUseCase; 6 | use crate::application::ports::storage_ports::FileWritePort; 7 | use crate::common::errors::DomainError; 8 | use crate::application::ports::storage_ports::StorageUsagePort; 9 | use tracing::{debug, warn}; 10 | 11 | /// Helper function to extract username from folder path string 12 | fn extract_username_from_path(path: &str) -> Option { 13 | // Check if path contains the folder pattern 14 | if !path.contains("Mi Carpeta - ") { 15 | return None; 16 | } 17 | 18 | // Split by the pattern and get the second part 19 | let parts: Vec<&str> = path.split("Mi Carpeta - ").collect(); 20 | if parts.len() <= 1 { 21 | return None; 22 | } 23 | 24 | // Trim and return as owned String 25 | Some(parts[1].trim().to_string()) 26 | } 27 | 28 | /// Servicio para operaciones de subida de archivos 29 | pub struct FileUploadService { 30 | file_repository: Arc, 31 | storage_usage_service: Option>, 32 | } 33 | 34 | impl FileUploadService { 35 | /// Crea un nuevo servicio de subida de archivos 36 | pub fn new(file_repository: Arc) -> Self { 37 | Self { 38 | file_repository, 39 | storage_usage_service: None, 40 | } 41 | } 42 | 43 | /// Configura el servicio de uso de almacenamiento 44 | pub fn with_storage_usage_service( 45 | mut self, 46 | storage_usage_service: Arc 47 | ) -> Self { 48 | self.storage_usage_service = Some(storage_usage_service); 49 | self 50 | } 51 | 52 | /// Crea un stub para pruebas 53 | pub fn default_stub() -> Self { 54 | Self { 55 | file_repository: Arc::new(crate::infrastructure::repositories::FileFsWriteRepository::default_stub()), 56 | storage_usage_service: None, 57 | } 58 | } 59 | } 60 | 61 | #[async_trait] 62 | impl FileUploadUseCase for FileUploadService { 63 | async fn upload_file( 64 | &self, 65 | name: String, 66 | folder_id: Option, 67 | content_type: String, 68 | content: Vec, 69 | ) -> Result { 70 | // Upload the file 71 | let file = self.file_repository.save_file(name, folder_id, content_type, content).await?; 72 | 73 | // Extract the owner's user ID if available 74 | // We could make this more explicit by adding a user_id parameter 75 | if let Some(storage_service) = &self.storage_usage_service { 76 | // Extract user ID from folder pattern 'Mi Carpeta - {username}' 77 | if let Some(folder_id) = file.folder_id() { 78 | // Since we don't have direct access to folder details, 79 | // we'll use pattern matching on the folder ID 80 | // In a more complete implementation, we would use a folder repository 81 | let folder_id_str = folder_id; 82 | 83 | // Check if we can extract a username from context 84 | if let Ok(folder_path) = self.file_repository.get_folder_path_str(folder_id_str).await { 85 | // Process the string to extract username without creating borrowing issues 86 | if let Some(username) = extract_username_from_path(&folder_path) { 87 | // Find user by username and update their storage usage 88 | // We do this asynchronously to avoid blocking the upload response 89 | let service_clone = Arc::clone(storage_service); 90 | tokio::spawn(async move { 91 | match service_clone.update_user_storage_usage(&username).await { 92 | Ok(usage) => { 93 | debug!("Updated storage usage for user {} to {} bytes", username, usage); 94 | }, 95 | Err(e) => { 96 | warn!("Failed to update storage usage for {}: {}", username, e); 97 | } 98 | } 99 | }); 100 | } 101 | } else { 102 | warn!("Could not get folder path for ID: {}", folder_id_str); 103 | } 104 | } 105 | } 106 | 107 | Ok(FileDto::from(file)) 108 | } 109 | } -------------------------------------------------------------------------------- /src/application/services/file_use_case_factory.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::application::ports::file_ports::{FileUploadUseCase, FileRetrievalUseCase, FileManagementUseCase, FileUseCaseFactory}; 4 | use crate::application::services::file_upload_service::FileUploadService; 5 | use crate::application::services::file_retrieval_service::FileRetrievalService; 6 | use crate::application::services::file_management_service::FileManagementService; 7 | use crate::application::ports::storage_ports::{FileReadPort, FileWritePort}; 8 | 9 | /// Factory para crear implementaciones de casos de uso de archivos 10 | pub struct AppFileUseCaseFactory { 11 | file_read_repository: Arc, 12 | file_write_repository: Arc, 13 | } 14 | 15 | impl AppFileUseCaseFactory { 16 | /// Crea una nueva factory para casos de uso de archivos 17 | pub fn new( 18 | file_read_repository: Arc, 19 | file_write_repository: Arc 20 | ) -> Self { 21 | Self { 22 | file_read_repository, 23 | file_write_repository, 24 | } 25 | } 26 | 27 | /// Crea un stub para pruebas 28 | pub fn default_stub() -> Self { 29 | Self { 30 | file_read_repository: Arc::new(crate::infrastructure::repositories::FileFsReadRepository::default_stub()), 31 | file_write_repository: Arc::new(crate::infrastructure::repositories::FileFsWriteRepository::default_stub()), 32 | } 33 | } 34 | } 35 | 36 | impl FileUseCaseFactory for AppFileUseCaseFactory { 37 | fn create_file_upload_use_case(&self) -> Arc { 38 | Arc::new(FileUploadService::new(self.file_write_repository.clone())) 39 | } 40 | 41 | fn create_file_retrieval_use_case(&self) -> Arc { 42 | Arc::new(FileRetrievalService::new(self.file_read_repository.clone())) 43 | } 44 | 45 | fn create_file_management_use_case(&self) -> Arc { 46 | Arc::new(FileManagementService::new(self.file_write_repository.clone())) 47 | } 48 | } -------------------------------------------------------------------------------- /src/application/services/i18n_application_service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::domain::services::i18n_service::{I18nService, I18nResult, Locale}; 4 | 5 | /// Service for i18n operations 6 | pub struct I18nApplicationService { 7 | i18n_service: Arc, 8 | } 9 | 10 | impl I18nApplicationService { 11 | /// Creates a dummy service for testing 12 | pub fn dummy() -> Self { 13 | struct DummyI18nService; 14 | 15 | #[async_trait::async_trait] 16 | impl I18nService for DummyI18nService { 17 | async fn translate(&self, _key: &str, _locale: Locale) -> I18nResult { 18 | Ok("DUMMY_TRANSLATION".to_string()) 19 | } 20 | 21 | async fn load_translations(&self, _locale: Locale) -> I18nResult<()> { 22 | Ok(()) 23 | } 24 | 25 | async fn available_locales(&self) -> Vec { 26 | vec![Locale::English, Locale::Spanish] 27 | } 28 | 29 | async fn is_supported(&self, _locale: Locale) -> bool { 30 | true 31 | } 32 | } 33 | 34 | Self { i18n_service: Arc::new(DummyI18nService) } 35 | } 36 | 37 | /// Creates a new i18n application service 38 | pub fn new(i18n_service: Arc) -> Self { 39 | Self { i18n_service } 40 | } 41 | 42 | /// Get a translation for a key and locale 43 | pub async fn translate(&self, key: &str, locale: Option) -> I18nResult { 44 | let locale = locale.unwrap_or(Locale::default()); 45 | self.i18n_service.translate(key, locale).await 46 | } 47 | 48 | /// Load translations for a locale 49 | pub async fn load_translations(&self, locale: Locale) -> I18nResult<()> { 50 | self.i18n_service.load_translations(locale).await 51 | } 52 | 53 | /// Load translations for all available locales 54 | #[allow(dead_code)] 55 | pub async fn load_all_translations(&self) -> Vec<(Locale, I18nResult<()>)> { 56 | let locales = self.i18n_service.available_locales().await; 57 | let mut results = Vec::new(); 58 | 59 | for locale in locales { 60 | let result = self.i18n_service.load_translations(locale).await; 61 | results.push((locale, result)); 62 | } 63 | 64 | results 65 | } 66 | 67 | /// Get available locales 68 | pub async fn available_locales(&self) -> Vec { 69 | self.i18n_service.available_locales().await 70 | } 71 | 72 | /// Check if a locale is supported 73 | #[allow(dead_code)] 74 | pub async fn is_supported(&self, locale: Locale) -> bool { 75 | self.i18n_service.is_supported(locale).await 76 | } 77 | } -------------------------------------------------------------------------------- /src/application/services/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod auth_application_service; 2 | pub mod batch_operations; 3 | pub mod calendar_service; 4 | pub mod contact_service; 5 | pub mod favorites_service; 6 | pub mod file_management_service; 7 | pub mod file_retrieval_service; 8 | pub mod file_service; 9 | pub mod file_upload_service; 10 | pub mod file_use_case_factory; 11 | pub mod folder_service; 12 | pub mod i18n_application_service; 13 | pub mod recent_service; 14 | pub mod search_service; 15 | pub mod share_service; 16 | pub mod storage_mediator; 17 | pub mod storage_usage_service; 18 | pub mod trash_service; 19 | 20 | #[cfg(test)] 21 | mod trash_service_test; 22 | 23 | // Re-exportar para facilitar acceso 24 | pub use file_upload_service::FileUploadService; 25 | pub use file_retrieval_service::FileRetrievalService; 26 | pub use file_management_service::FileManagementService; 27 | pub use file_use_case_factory::AppFileUseCaseFactory; 28 | -------------------------------------------------------------------------------- /src/application/transactions/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod storage_transaction; -------------------------------------------------------------------------------- /src/bin/migrate.rs: -------------------------------------------------------------------------------- 1 | use sqlx::postgres::PgPoolOptions; 2 | use std::env; 3 | use std::path::Path; 4 | use std::time::Duration; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | // Configurar logging 9 | tracing_subscriber::fmt::init(); 10 | 11 | // Cargar variables de entorno (primero .env.local, luego .env) 12 | if let Ok(path) = env::var("DOTENV_PATH") { 13 | dotenv::from_path(Path::new(&path)).ok(); 14 | } else { 15 | dotenv::from_filename(".env.local").ok(); 16 | dotenv::dotenv().ok(); 17 | } 18 | 19 | // Obtener DATABASE_URL desde variables de entorno 20 | let database_url = env::var("DATABASE_URL").expect("DATABASE_URL debe estar configurada"); 21 | 22 | println!("Conectando a la base de datos..."); 23 | 24 | // Crear pool de conexiones 25 | let pool = PgPoolOptions::new() 26 | .max_connections(5) 27 | .acquire_timeout(Duration::from_secs(10)) 28 | .connect(&database_url) 29 | .await?; 30 | 31 | // Ejecutar migraciones 32 | println!("Ejecutando migraciones..."); 33 | 34 | // Obtenemos el directorio desde una variable de entorno o usamos un valor por defecto 35 | let migrations_dir = env::var("MIGRATIONS_DIR").unwrap_or_else(|_| "./migrations".to_string()); 36 | println!("Directorio de migraciones: {}", migrations_dir); 37 | 38 | // Crear un migrator 39 | let migrator = sqlx::migrate::Migrator::new(Path::new(&migrations_dir)) 40 | .await 41 | .expect("No se pudo crear el migrator"); 42 | 43 | // Ejecutar todas las migraciones pendientes 44 | migrator.run(&pool).await?; 45 | 46 | println!("Migraciones aplicadas correctamente"); 47 | 48 | Ok(()) 49 | } -------------------------------------------------------------------------------- /src/common/auth_factory.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use anyhow::Result; 3 | use sqlx::PgPool; 4 | 5 | use crate::domain::services::auth_service::AuthService; 6 | use crate::application::services::auth_application_service::AuthApplicationService; 7 | use crate::application::services::folder_service::FolderService; 8 | use crate::infrastructure::repositories::{UserPgRepository, SessionPgRepository}; 9 | use crate::common::config::AppConfig; 10 | use crate::common::di::AuthServices; 11 | 12 | pub async fn create_auth_services( 13 | config: &AppConfig, 14 | pool: Arc, 15 | folder_service: Option> 16 | ) -> Result { 17 | // Crear servicio de dominio de autenticación 18 | let auth_service = Arc::new(AuthService::new( 19 | config.auth.jwt_secret.clone(), 20 | config.auth.access_token_expiry_secs, 21 | config.auth.refresh_token_expiry_secs, 22 | )); 23 | 24 | // Crear repositorios PostgreSQL 25 | let user_repository = Arc::new(UserPgRepository::new(pool.clone())); 26 | let session_repository = Arc::new(SessionPgRepository::new(pool.clone())); 27 | 28 | // Crear servicio de aplicación de autenticación 29 | let mut auth_app_service = AuthApplicationService::new( 30 | user_repository, 31 | session_repository, 32 | auth_service.clone(), 33 | ); 34 | 35 | // Configurar servicio de carpetas si está disponible 36 | if let Some(folder_svc) = folder_service { 37 | auth_app_service = auth_app_service.with_folder_service(folder_svc); 38 | } 39 | 40 | // Empaquetar servicio en Arc 41 | let auth_application_service = Arc::new(auth_app_service); 42 | 43 | Ok(AuthServices { 44 | auth_service, 45 | auth_application_service, 46 | }) 47 | } -------------------------------------------------------------------------------- /src/common/db.rs: -------------------------------------------------------------------------------- 1 | use sqlx::{postgres::PgPoolOptions, PgPool, Row}; 2 | use anyhow::Result; 3 | use std::time::Duration; 4 | use crate::common::config::AppConfig; 5 | 6 | pub async fn create_database_pool(config: &AppConfig) -> Result { 7 | tracing::info!("Inicializando conexión a PostgreSQL con URL: {}", 8 | config.database.connection_string.replace("postgres://", "postgres://[user]:[pass]@")); 9 | 10 | // Add a more robust connection attempt with retries 11 | let mut attempt = 0; 12 | const MAX_ATTEMPTS: usize = 3; 13 | 14 | while attempt < MAX_ATTEMPTS { 15 | attempt += 1; 16 | tracing::info!("Intento de conexión a PostgreSQL #{}", attempt); 17 | 18 | // Crear el pool de conexiones con las opciones de configuración 19 | match PgPoolOptions::new() 20 | .max_connections(config.database.max_connections) 21 | .min_connections(config.database.min_connections) 22 | .acquire_timeout(Duration::from_secs(config.database.connect_timeout_secs)) 23 | .idle_timeout(Duration::from_secs(config.database.idle_timeout_secs)) 24 | .max_lifetime(Duration::from_secs(config.database.max_lifetime_secs)) 25 | .connect(&config.database.connection_string) 26 | .await { 27 | Ok(pool) => { 28 | // Verificar la conexión 29 | match sqlx::query("SELECT 1").execute(&pool).await { 30 | Ok(_) => { 31 | tracing::info!("Conexión a PostgreSQL establecida correctamente"); 32 | 33 | // Verify if migrations have been applied 34 | let migration_check = sqlx::query("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'auth' AND tablename = 'users')") 35 | .fetch_one(&pool) 36 | .await; 37 | 38 | match migration_check { 39 | Ok(row) => { 40 | let tables_exist: bool = row.get(0); 41 | if !tables_exist { 42 | tracing::warn!("Las tablas de la base de datos no existen. Por favor, ejecuta las migraciones con: cargo run --bin migrate --features migrations"); 43 | } 44 | }, 45 | Err(_) => { 46 | tracing::warn!("No se pudo verificar el estado de las migraciones. Por favor, ejecuta las migraciones con: cargo run --bin migrate --features migrations"); 47 | } 48 | } 49 | 50 | return Ok(pool); 51 | }, 52 | Err(e) => { 53 | tracing::error!("Error al verificar conexión: {}", e); 54 | tracing::warn!("La base de datos parece no estar configurada. Por favor, ejecuta las migraciones con: cargo run --bin migrate --features migrations"); 55 | if attempt >= MAX_ATTEMPTS { 56 | return Err(anyhow::anyhow!("Error al verificar la conexión a PostgreSQL: {}", e)); 57 | } 58 | } 59 | } 60 | }, 61 | Err(e) => { 62 | tracing::error!("Error al conectar a PostgreSQL: {}", e); 63 | if attempt >= MAX_ATTEMPTS { 64 | return Err(anyhow::anyhow!("Error en la conexión a PostgreSQL: {}", e)); 65 | } 66 | tokio::time::sleep(Duration::from_secs(1)).await; 67 | } 68 | } 69 | } 70 | 71 | Err(anyhow::anyhow!("No se pudo establecer la conexión a PostgreSQL después de {} intentos", MAX_ATTEMPTS)) 72 | } -------------------------------------------------------------------------------- /src/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod errors; 2 | pub mod config; 3 | pub mod cache; 4 | pub mod di; 5 | pub mod db; 6 | pub mod auth_factory; -------------------------------------------------------------------------------- /src/domain/entities/contact.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, NaiveDate, Utc}; 2 | use serde::{Deserialize, Serialize}; 3 | use sqlx::types::Uuid; 4 | 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct AddressBook { 7 | pub id: Uuid, 8 | pub name: String, 9 | pub owner_id: String, 10 | pub description: Option, 11 | pub color: Option, 12 | pub is_public: bool, 13 | pub created_at: DateTime, 14 | pub updated_at: DateTime, 15 | } 16 | 17 | impl Default for AddressBook { 18 | fn default() -> Self { 19 | Self { 20 | id: Uuid::new_v4(), 21 | name: "Default Address Book".to_string(), 22 | owner_id: "default".to_string(), 23 | description: None, 24 | color: None, 25 | is_public: false, 26 | created_at: Utc::now(), 27 | updated_at: Utc::now(), 28 | } 29 | } 30 | } 31 | 32 | #[derive(Debug, Clone, Serialize, Deserialize)] 33 | pub struct Email { 34 | pub email: String, 35 | pub r#type: String, // home, work, other 36 | pub is_primary: bool, 37 | } 38 | 39 | #[derive(Debug, Clone, Serialize, Deserialize)] 40 | pub struct Phone { 41 | pub number: String, 42 | pub r#type: String, // mobile, home, work, fax, other 43 | pub is_primary: bool, 44 | } 45 | 46 | #[derive(Debug, Clone, Serialize, Deserialize)] 47 | pub struct Address { 48 | pub street: Option, 49 | pub city: Option, 50 | pub state: Option, 51 | pub postal_code: Option, 52 | pub country: Option, 53 | pub r#type: String, // home, work, other 54 | pub is_primary: bool, 55 | } 56 | 57 | #[derive(Debug, Clone, Serialize, Deserialize)] 58 | pub struct Contact { 59 | pub id: Uuid, 60 | pub address_book_id: Uuid, 61 | pub uid: String, 62 | pub full_name: Option, 63 | pub first_name: Option, 64 | pub last_name: Option, 65 | pub nickname: Option, 66 | pub email: Vec, 67 | pub phone: Vec, 68 | pub address: Vec
, 69 | pub organization: Option, 70 | pub title: Option, 71 | pub notes: Option, 72 | pub photo_url: Option, 73 | pub birthday: Option, 74 | pub anniversary: Option, 75 | pub vcard: String, 76 | pub etag: String, 77 | pub created_at: DateTime, 78 | pub updated_at: DateTime, 79 | } 80 | 81 | impl Default for Contact { 82 | fn default() -> Self { 83 | Self { 84 | id: Uuid::new_v4(), 85 | address_book_id: Uuid::new_v4(), 86 | uid: format!("{}@oxicloud", Uuid::new_v4()), 87 | full_name: None, 88 | first_name: None, 89 | last_name: None, 90 | nickname: None, 91 | email: Vec::new(), 92 | phone: Vec::new(), 93 | address: Vec::new(), 94 | organization: None, 95 | title: None, 96 | notes: None, 97 | photo_url: None, 98 | birthday: None, 99 | anniversary: None, 100 | vcard: "BEGIN:VCARD\nVERSION:3.0\nEND:VCARD".to_string(), 101 | etag: Uuid::new_v4().to_string(), 102 | created_at: Utc::now(), 103 | updated_at: Utc::now(), 104 | } 105 | } 106 | } 107 | 108 | #[derive(Debug, Clone, Serialize, Deserialize)] 109 | pub struct ContactGroup { 110 | pub id: Uuid, 111 | pub address_book_id: Uuid, 112 | pub name: String, 113 | pub created_at: DateTime, 114 | pub updated_at: DateTime, 115 | } 116 | 117 | impl Default for ContactGroup { 118 | fn default() -> Self { 119 | Self { 120 | id: Uuid::new_v4(), 121 | address_book_id: Uuid::new_v4(), 122 | name: "New Group".to_string(), 123 | created_at: Utc::now(), 124 | updated_at: Utc::now(), 125 | } 126 | } 127 | } -------------------------------------------------------------------------------- /src/domain/entities/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod calendar; 2 | pub mod calendar_event; 3 | pub mod contact; 4 | pub mod file; 5 | pub mod folder; 6 | pub mod user; 7 | pub mod session; 8 | pub mod share; 9 | pub mod trashed_item; -------------------------------------------------------------------------------- /src/domain/entities/session.rs: -------------------------------------------------------------------------------- 1 | use serde::{Serialize, Deserialize}; 2 | use uuid::Uuid; 3 | use chrono::{DateTime, Utc, Duration}; 4 | 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct Session { 7 | pub id: String, 8 | pub user_id: String, 9 | pub refresh_token: String, 10 | pub expires_at: DateTime, 11 | pub ip_address: Option, 12 | pub user_agent: Option, 13 | pub created_at: DateTime, 14 | pub revoked: bool, 15 | } 16 | 17 | impl Session { 18 | pub fn new( 19 | user_id: String, 20 | refresh_token: String, 21 | ip_address: Option, 22 | user_agent: Option, 23 | expires_in_days: i64, 24 | ) -> Self { 25 | let now = Utc::now(); 26 | Self { 27 | id: Uuid::new_v4().to_string(), 28 | user_id, 29 | refresh_token, 30 | expires_at: now + Duration::days(expires_in_days), 31 | ip_address, 32 | user_agent, 33 | created_at: now, 34 | revoked: false, 35 | } 36 | } 37 | 38 | // Getters 39 | pub fn id(&self) -> &str { 40 | &self.id 41 | } 42 | 43 | pub fn user_id(&self) -> &str { 44 | &self.user_id 45 | } 46 | 47 | pub fn refresh_token(&self) -> &str { 48 | &self.refresh_token 49 | } 50 | 51 | pub fn expires_at(&self) -> DateTime { 52 | self.expires_at 53 | } 54 | 55 | pub fn created_at(&self) -> DateTime { 56 | self.created_at 57 | } 58 | 59 | pub fn is_expired(&self) -> bool { 60 | Utc::now() > self.expires_at 61 | } 62 | 63 | pub fn is_revoked(&self) -> bool { 64 | self.revoked 65 | } 66 | 67 | pub fn revoke(&mut self) { 68 | self.revoked = true; 69 | } 70 | } -------------------------------------------------------------------------------- /src/domain/entities/trashed_item.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use uuid::Uuid; 3 | 4 | #[derive(Debug, Clone, PartialEq)] 5 | pub enum TrashedItemType { 6 | File, 7 | Folder, 8 | } 9 | 10 | #[derive(Debug, Clone)] 11 | pub struct TrashedItem { 12 | pub id: Uuid, 13 | pub original_id: Uuid, 14 | pub user_id: Uuid, 15 | pub item_type: TrashedItemType, 16 | pub name: String, 17 | pub original_path: String, 18 | pub trashed_at: DateTime, 19 | pub deletion_date: DateTime, // Fecha de eliminación permanente automática 20 | } 21 | 22 | impl TrashedItem { 23 | pub fn new( 24 | original_id: Uuid, 25 | user_id: Uuid, 26 | item_type: TrashedItemType, 27 | name: String, 28 | original_path: String, 29 | retention_days: u32, 30 | ) -> Self { 31 | let now = Utc::now(); 32 | Self { 33 | id: Uuid::new_v4(), 34 | original_id, 35 | user_id, 36 | item_type, 37 | name, 38 | original_path, 39 | trashed_at: now, 40 | deletion_date: now + chrono::Duration::days(retention_days as i64), 41 | } 42 | } 43 | 44 | pub fn days_until_deletion(&self) -> i64 { 45 | let now = Utc::now(); 46 | (self.deletion_date - now).num_days().max(0) 47 | } 48 | } -------------------------------------------------------------------------------- /src/domain/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod entities; 2 | pub mod repositories; 3 | pub mod services; -------------------------------------------------------------------------------- /src/domain/repositories/address_book_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use sqlx::types::Uuid; 3 | use std::result::Result; 4 | 5 | use crate::common::errors::DomainError; 6 | use crate::domain::entities::contact::AddressBook; 7 | 8 | pub type AddressBookRepositoryResult = Result; 9 | 10 | #[async_trait] 11 | pub trait AddressBookRepository: Send + Sync + 'static { 12 | async fn create_address_book(&self, address_book: AddressBook) -> AddressBookRepositoryResult; 13 | async fn update_address_book(&self, address_book: AddressBook) -> AddressBookRepositoryResult; 14 | async fn delete_address_book(&self, id: &Uuid) -> AddressBookRepositoryResult<()>; 15 | async fn get_address_book_by_id(&self, id: &Uuid) -> AddressBookRepositoryResult>; 16 | async fn get_address_books_by_owner(&self, owner_id: &str) -> AddressBookRepositoryResult>; 17 | async fn get_shared_address_books(&self, user_id: &str) -> AddressBookRepositoryResult>; 18 | async fn get_public_address_books(&self) -> AddressBookRepositoryResult>; 19 | async fn share_address_book(&self, address_book_id: &Uuid, user_id: &str, can_write: bool) -> AddressBookRepositoryResult<()>; 20 | async fn unshare_address_book(&self, address_book_id: &Uuid, user_id: &str) -> AddressBookRepositoryResult<()>; 21 | async fn get_address_book_shares(&self, address_book_id: &Uuid) -> AddressBookRepositoryResult>; 22 | } -------------------------------------------------------------------------------- /src/domain/repositories/calendar_event_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use uuid::Uuid; 3 | use chrono::{DateTime, Utc}; 4 | use crate::common::errors::DomainError; 5 | use crate::domain::entities::calendar_event::CalendarEvent; 6 | 7 | pub type CalendarEventRepositoryResult = Result; 8 | 9 | /// Repository interface for CalendarEvent entity operations 10 | #[async_trait] 11 | pub trait CalendarEventRepository: Send + Sync + 'static { 12 | /// Creates a new calendar event 13 | async fn create_event(&self, event: CalendarEvent) -> CalendarEventRepositoryResult; 14 | 15 | /// Updates an existing calendar event 16 | async fn update_event(&self, event: CalendarEvent) -> CalendarEventRepositoryResult; 17 | 18 | /// Deletes a calendar event by ID 19 | async fn delete_event(&self, id: &Uuid) -> CalendarEventRepositoryResult<()>; 20 | 21 | /// Finds a calendar event by its ID 22 | async fn find_event_by_id(&self, id: &Uuid) -> CalendarEventRepositoryResult; 23 | 24 | /// Lists all events in a specific calendar 25 | async fn list_events_by_calendar(&self, calendar_id: &Uuid) -> CalendarEventRepositoryResult>; 26 | 27 | /// Finds events in a calendar by their summary/title (partial match) 28 | async fn find_events_by_summary(&self, calendar_id: &Uuid, summary: &str) -> CalendarEventRepositoryResult>; 29 | 30 | /// Gets events in a specific time range for a calendar 31 | async fn get_events_in_time_range( 32 | &self, 33 | calendar_id: &Uuid, 34 | start: &DateTime, 35 | end: &DateTime 36 | ) -> CalendarEventRepositoryResult>; 37 | 38 | /// Finds an event by its iCalendar UID in a specific calendar 39 | async fn find_event_by_ical_uid(&self, calendar_id: &Uuid, ical_uid: &str) -> CalendarEventRepositoryResult>; 40 | 41 | /// Counts events in a calendar 42 | async fn count_events_in_calendar(&self, calendar_id: &Uuid) -> CalendarEventRepositoryResult; 43 | 44 | /// Deletes all events in a calendar 45 | async fn delete_all_events_in_calendar(&self, calendar_id: &Uuid) -> CalendarEventRepositoryResult; 46 | 47 | /// Lists events by calendar with pagination 48 | async fn list_events_by_calendar_paginated( 49 | &self, 50 | calendar_id: &Uuid, 51 | limit: i64, 52 | offset: i64 53 | ) -> CalendarEventRepositoryResult>; 54 | 55 | /// Finds events with recurrence rules that might occur in a time range 56 | async fn find_recurring_events_in_range( 57 | &self, 58 | calendar_id: &Uuid, 59 | start: &DateTime, 60 | end: &DateTime 61 | ) -> CalendarEventRepositoryResult>; 62 | } -------------------------------------------------------------------------------- /src/domain/repositories/calendar_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use uuid::Uuid; 3 | use crate::common::errors::DomainError; 4 | use crate::domain::entities::calendar::Calendar; 5 | 6 | pub type CalendarRepositoryResult = Result; 7 | 8 | /// Repository interface for Calendar entity operations 9 | #[async_trait] 10 | pub trait CalendarRepository: Send + Sync + 'static { 11 | /// Creates a new calendar 12 | async fn create_calendar(&self, calendar: Calendar) -> CalendarRepositoryResult; 13 | 14 | /// Updates an existing calendar 15 | async fn update_calendar(&self, calendar: Calendar) -> CalendarRepositoryResult; 16 | 17 | /// Deletes a calendar by ID 18 | async fn delete_calendar(&self, id: &Uuid) -> CalendarRepositoryResult<()>; 19 | 20 | /// Finds a calendar by its ID 21 | async fn find_calendar_by_id(&self, id: &Uuid) -> CalendarRepositoryResult; 22 | 23 | /// Lists all calendars for a specific user 24 | async fn list_calendars_by_owner(&self, owner_id: &str) -> CalendarRepositoryResult>; 25 | 26 | /// Finds a calendar by name and owner 27 | async fn find_calendar_by_name_and_owner(&self, name: &str, owner_id: &str) -> CalendarRepositoryResult; 28 | 29 | /// Lists calendars shared with a specific user 30 | async fn list_calendars_shared_with_user(&self, user_id: &str) -> CalendarRepositoryResult>; 31 | 32 | /// List public calendars 33 | async fn list_public_calendars(&self, limit: i64, offset: i64) -> CalendarRepositoryResult>; 34 | 35 | /// Checks if a user has access to a calendar 36 | async fn user_has_calendar_access(&self, calendar_id: &Uuid, user_id: &str) -> CalendarRepositoryResult; 37 | 38 | /// Gets a custom property for a calendar 39 | async fn get_calendar_property(&self, calendar_id: &Uuid, property_name: &str) -> CalendarRepositoryResult>; 40 | 41 | /// Sets a custom property for a calendar 42 | async fn set_calendar_property(&self, calendar_id: &Uuid, property_name: &str, property_value: &str) -> CalendarRepositoryResult<()>; 43 | 44 | /// Removes a custom property from a calendar 45 | async fn remove_calendar_property(&self, calendar_id: &Uuid, property_name: &str) -> CalendarRepositoryResult<()>; 46 | 47 | /// Gets all custom properties for a calendar 48 | async fn get_calendar_properties(&self, calendar_id: &Uuid) -> CalendarRepositoryResult>; 49 | 50 | /// Share calendar with another user 51 | async fn share_calendar(&self, calendar_id: &Uuid, user_id: &str, access_level: &str) -> CalendarRepositoryResult<()>; 52 | 53 | /// Remove calendar sharing for a user 54 | async fn remove_calendar_sharing(&self, calendar_id: &Uuid, user_id: &str) -> CalendarRepositoryResult<()>; 55 | 56 | /// Get calendar sharing information (who has access to this calendar) 57 | async fn get_calendar_shares(&self, calendar_id: &Uuid) -> CalendarRepositoryResult>; 58 | } -------------------------------------------------------------------------------- /src/domain/repositories/contact_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use sqlx::types::Uuid; 3 | use std::result::Result; 4 | 5 | use crate::common::errors::DomainError; 6 | use crate::domain::entities::contact::{Contact, ContactGroup}; 7 | 8 | pub type ContactRepositoryResult = Result; 9 | 10 | #[async_trait] 11 | pub trait ContactRepository: Send + Sync + 'static { 12 | async fn create_contact(&self, contact: Contact) -> ContactRepositoryResult; 13 | async fn update_contact(&self, contact: Contact) -> ContactRepositoryResult; 14 | async fn delete_contact(&self, id: &Uuid) -> ContactRepositoryResult<()>; 15 | async fn get_contact_by_id(&self, id: &Uuid) -> ContactRepositoryResult>; 16 | async fn get_contact_by_uid(&self, address_book_id: &Uuid, uid: &str) -> ContactRepositoryResult>; 17 | async fn get_contacts_by_address_book(&self, address_book_id: &Uuid) -> ContactRepositoryResult>; 18 | async fn get_contacts_by_email(&self, email: &str) -> ContactRepositoryResult>; 19 | async fn get_contacts_by_group(&self, group_id: &Uuid) -> ContactRepositoryResult>; 20 | async fn search_contacts(&self, address_book_id: &Uuid, query: &str) -> ContactRepositoryResult>; 21 | } 22 | 23 | #[async_trait] 24 | pub trait ContactGroupRepository: Send + Sync + 'static { 25 | async fn create_group(&self, group: ContactGroup) -> ContactRepositoryResult; 26 | async fn update_group(&self, group: ContactGroup) -> ContactRepositoryResult; 27 | async fn delete_group(&self, id: &Uuid) -> ContactRepositoryResult<()>; 28 | async fn get_group_by_id(&self, id: &Uuid) -> ContactRepositoryResult>; 29 | async fn get_groups_by_address_book(&self, address_book_id: &Uuid) -> ContactRepositoryResult>; 30 | async fn add_contact_to_group(&self, group_id: &Uuid, contact_id: &Uuid) -> ContactRepositoryResult<()>; 31 | async fn remove_contact_from_group(&self, group_id: &Uuid, contact_id: &Uuid) -> ContactRepositoryResult<()>; 32 | async fn get_contacts_in_group(&self, group_id: &Uuid) -> ContactRepositoryResult>; 33 | async fn get_groups_for_contact(&self, contact_id: &Uuid) -> ContactRepositoryResult>; 34 | } -------------------------------------------------------------------------------- /src/domain/repositories/folder_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::domain::entities::folder::Folder; 3 | use crate::domain::services::path_service::StoragePath; 4 | use crate::common::errors::DomainError; 5 | 6 | /// Error types for folder repository operations 7 | #[derive(Debug, thiserror::Error)] 8 | #[allow(dead_code)] 9 | pub enum FolderRepositoryError { 10 | #[error("Folder not found: {0}")] 11 | NotFound(String), 12 | 13 | #[error("Folder already exists: {0}")] 14 | AlreadyExists(String), 15 | 16 | #[error("Invalid folder path: {0}")] 17 | InvalidPath(String), 18 | 19 | #[error("Operation not supported: {0}")] 20 | OperationNotSupported(String), 21 | 22 | #[error("IO Error: {0}")] 23 | IoError(#[from] std::io::Error), 24 | 25 | #[error("Mapping error: {0}")] 26 | MappingError(String), 27 | 28 | #[error("Validation error: {0}")] 29 | ValidationError(String), 30 | 31 | #[error("Domain error: {0}")] 32 | DomainError(#[from] DomainError), 33 | 34 | #[error("Other error: {0}")] 35 | Other(String), 36 | } 37 | 38 | /// Result type for folder repository operations 39 | pub type FolderRepositoryResult = Result; 40 | 41 | /// Repository interface for folder operations (primary port) 42 | #[async_trait] 43 | pub trait FolderRepository: Send + Sync + 'static { 44 | /// Creates a new folder 45 | async fn create_folder(&self, name: String, parent_id: Option) -> FolderRepositoryResult; 46 | 47 | /// Gets a folder by its ID 48 | async fn get_folder_by_id(&self, id: &str) -> FolderRepositoryResult; 49 | 50 | /// Gets a folder by its path 51 | async fn get_folder_by_storage_path(&self, storage_path: &StoragePath) -> FolderRepositoryResult; 52 | 53 | /// Lists all folders in a parent folder (use with caution for large directories) 54 | async fn list_folders(&self, parent_id: Option<&str>) -> FolderRepositoryResult>; 55 | 56 | /// Lists folders in a parent folder with pagination support 57 | /// 58 | /// * `parent_id` - Optional parent folder ID 59 | /// * `offset` - Number of folders to skip 60 | /// * `limit` - Maximum number of folders to return 61 | /// * `include_total` - If true, returns the total count of folders as well 62 | async fn list_folders_paginated( 63 | &self, 64 | parent_id: Option<&str>, 65 | offset: usize, 66 | limit: usize, 67 | include_total: bool 68 | ) -> FolderRepositoryResult<(Vec, Option)>; 69 | 70 | /// Renames a folder 71 | async fn rename_folder(&self, id: &str, new_name: String) -> FolderRepositoryResult; 72 | 73 | /// Moves a folder to a new parent 74 | async fn move_folder(&self, id: &str, new_parent_id: Option<&str>) -> FolderRepositoryResult; 75 | 76 | /// Deletes a folder 77 | async fn delete_folder(&self, id: &str) -> FolderRepositoryResult<()>; 78 | 79 | /// Checks if a folder exists at the given path 80 | async fn folder_exists_at_storage_path(&self, storage_path: &StoragePath) -> FolderRepositoryResult; 81 | 82 | /// Gets the storage path for a folder 83 | async fn get_folder_storage_path(&self, id: &str) -> FolderRepositoryResult; 84 | 85 | /// Legacy method - checks if a folder exists at the given PathBuf path 86 | #[deprecated(note = "Use folder_exists_at_storage_path instead")] 87 | #[allow(dead_code)] 88 | async fn folder_exists(&self, path: &std::path::PathBuf) -> FolderRepositoryResult; 89 | 90 | /// Legacy method - gets a folder by its PathBuf path 91 | #[deprecated(note = "Use get_folder_by_storage_path instead")] 92 | #[allow(dead_code)] 93 | async fn get_folder_by_path(&self, path: &std::path::PathBuf) -> FolderRepositoryResult; 94 | 95 | /// Moves a folder to trash 96 | async fn move_to_trash(&self, folder_id: &str) -> FolderRepositoryResult<()>; 97 | 98 | /// Restores a folder from trash 99 | async fn restore_from_trash(&self, folder_id: &str, original_path: &str) -> FolderRepositoryResult<()>; 100 | 101 | /// Permanently deletes a folder (used for trash cleanup) 102 | async fn delete_folder_permanently(&self, folder_id: &str) -> FolderRepositoryResult<()>; 103 | } -------------------------------------------------------------------------------- /src/domain/repositories/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod address_book_repository; 2 | pub mod calendar_repository; 3 | pub mod calendar_event_repository; 4 | pub mod contact_repository; 5 | pub mod file_repository; 6 | pub mod folder_repository; 7 | pub mod session_repository; 8 | pub mod share_repository; 9 | pub mod trash_repository; 10 | pub mod user_repository; -------------------------------------------------------------------------------- /src/domain/repositories/session_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::domain::entities::session::Session; 3 | use crate::common::errors::DomainError; 4 | 5 | #[derive(Debug, thiserror::Error)] 6 | pub enum SessionRepositoryError { 7 | #[error("Sesión no encontrada: {0}")] 8 | NotFound(String), 9 | 10 | #[error("Error de base de datos: {0}")] 11 | DatabaseError(String), 12 | 13 | #[error("Error de tiempo de espera: {0}")] 14 | Timeout(String), 15 | } 16 | 17 | pub type SessionRepositoryResult = Result; 18 | 19 | // Conversión de SessionRepositoryError a DomainError 20 | impl From for DomainError { 21 | fn from(err: SessionRepositoryError) -> Self { 22 | match err { 23 | SessionRepositoryError::NotFound(msg) => { 24 | DomainError::not_found("Session", msg) 25 | }, 26 | SessionRepositoryError::DatabaseError(msg) => { 27 | DomainError::internal_error("Database", msg) 28 | }, 29 | SessionRepositoryError::Timeout(msg) => { 30 | DomainError::timeout("Database", msg) 31 | }, 32 | } 33 | } 34 | } 35 | 36 | #[async_trait] 37 | pub trait SessionRepository: Send + Sync + 'static { 38 | /// Crea una nueva sesión 39 | async fn create_session(&self, session: Session) -> SessionRepositoryResult; 40 | 41 | /// Obtiene una sesión por ID 42 | async fn get_session_by_id(&self, id: &str) -> SessionRepositoryResult; 43 | 44 | /// Obtiene una sesión por token de actualización 45 | async fn get_session_by_refresh_token(&self, refresh_token: &str) -> SessionRepositoryResult; 46 | 47 | /// Obtiene todas las sesiones de un usuario 48 | async fn get_sessions_by_user_id(&self, user_id: &str) -> SessionRepositoryResult>; 49 | 50 | /// Revoca una sesión específica 51 | async fn revoke_session(&self, session_id: &str) -> SessionRepositoryResult<()>; 52 | 53 | /// Revoca todas las sesiones de un usuario 54 | async fn revoke_all_user_sessions(&self, user_id: &str) -> SessionRepositoryResult; 55 | 56 | /// Elimina sesiones expiradas 57 | async fn delete_expired_sessions(&self) -> SessionRepositoryResult; 58 | } -------------------------------------------------------------------------------- /src/domain/repositories/share_repository.rs: -------------------------------------------------------------------------------- 1 | 2 | use async_trait::async_trait; 3 | use thiserror::Error; 4 | 5 | use crate::domain::{ 6 | entities::share::{Share, ShareItemType}, 7 | repositories::user_repository::UserRepositoryError, 8 | }; 9 | 10 | #[derive(Debug, Error)] 11 | pub enum ShareRepositoryError { 12 | #[error("Share not found: {0}")] 13 | NotFound(String), 14 | #[error("Item not found: {0}")] 15 | ItemNotFound(String), 16 | #[error("Storage error: {0}")] 17 | StorageError(String), 18 | #[error("User repository error: {0}")] 19 | UserRepository(#[from] UserRepositoryError), 20 | #[error("Share already exists: {0}")] 21 | AlreadyExists(String), 22 | } 23 | 24 | #[async_trait] 25 | pub trait ShareRepository: Send + Sync + 'static { 26 | /// Save a new share or update an existing one 27 | async fn save(&self, share: &Share) -> Result; 28 | 29 | /// Find a share by its ID 30 | async fn find_by_id(&self, id: &str) -> Result; 31 | 32 | /// Find a share by its token 33 | async fn find_by_token(&self, token: &str) -> Result; 34 | 35 | /// Find all shares for a specific item 36 | async fn find_by_item(&self, item_id: &str, item_type: &ShareItemType) -> Result, ShareRepositoryError>; 37 | 38 | /// Delete a share by its ID 39 | async fn delete(&self, id: &str) -> Result<(), ShareRepositoryError>; 40 | 41 | /// Find all shares created by a specific user 42 | async fn find_by_user(&self, user_id: &str) -> Result, ShareRepositoryError>; 43 | 44 | /// Find all shares (admin operation) 45 | async fn find_all(&self) -> Result, ShareRepositoryError>; 46 | } 47 | -------------------------------------------------------------------------------- /src/domain/repositories/trash_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use uuid::Uuid; 3 | 4 | use crate::domain::entities::trashed_item::TrashedItem; 5 | use crate::common::errors::Result; 6 | 7 | #[async_trait] 8 | pub trait TrashRepository: Send + Sync { 9 | async fn add_to_trash(&self, item: &TrashedItem) -> Result<()>; 10 | async fn get_trash_items(&self, user_id: &Uuid) -> Result>; 11 | async fn get_trash_item(&self, id: &Uuid, user_id: &Uuid) -> Result>; 12 | async fn restore_from_trash(&self, id: &Uuid, user_id: &Uuid) -> Result<()>; 13 | async fn delete_permanently(&self, id: &Uuid, user_id: &Uuid) -> Result<()>; 14 | async fn clear_trash(&self, user_id: &Uuid) -> Result<()>; 15 | async fn get_expired_items(&self) -> Result>; 16 | } -------------------------------------------------------------------------------- /src/domain/repositories/user_repository.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use crate::domain::entities::user::{User, UserRole}; 3 | use crate::common::errors::DomainError; 4 | 5 | #[derive(Debug, thiserror::Error)] 6 | pub enum UserRepositoryError { 7 | #[error("Usuario no encontrado: {0}")] 8 | NotFound(String), 9 | 10 | #[error("Usuario ya existe: {0}")] 11 | AlreadyExists(String), 12 | 13 | #[error("Error de base de datos: {0}")] 14 | DatabaseError(String), 15 | 16 | #[error("Error de validación: {0}")] 17 | ValidationError(String), 18 | 19 | #[error("Error de tiempo de espera: {0}")] 20 | Timeout(String), 21 | 22 | #[error("Operación no permitida: {0}")] 23 | OperationNotAllowed(String), 24 | } 25 | 26 | pub type UserRepositoryResult = Result; 27 | 28 | // Conversión de UserRepositoryError a DomainError 29 | impl From for DomainError { 30 | fn from(err: UserRepositoryError) -> Self { 31 | match err { 32 | UserRepositoryError::NotFound(msg) => { 33 | DomainError::not_found("User", msg) 34 | }, 35 | UserRepositoryError::AlreadyExists(msg) => { 36 | DomainError::already_exists("User", msg) 37 | }, 38 | UserRepositoryError::DatabaseError(msg) => { 39 | DomainError::internal_error("Database", msg) 40 | }, 41 | UserRepositoryError::ValidationError(msg) => { 42 | DomainError::validation_error(msg) 43 | }, 44 | UserRepositoryError::Timeout(msg) => { 45 | DomainError::timeout("Database", msg) 46 | }, 47 | UserRepositoryError::OperationNotAllowed(msg) => { 48 | DomainError::access_denied("User", msg) 49 | }, 50 | } 51 | } 52 | } 53 | 54 | #[async_trait] 55 | pub trait UserRepository: Send + Sync + 'static { 56 | /// Crea un nuevo usuario 57 | async fn create_user(&self, user: User) -> UserRepositoryResult; 58 | 59 | /// Obtiene un usuario por ID 60 | async fn get_user_by_id(&self, id: &str) -> UserRepositoryResult; 61 | 62 | /// Obtiene un usuario por nombre de usuario 63 | async fn get_user_by_username(&self, username: &str) -> UserRepositoryResult; 64 | 65 | /// Obtiene un usuario por correo electrónico 66 | async fn get_user_by_email(&self, email: &str) -> UserRepositoryResult; 67 | 68 | /// Actualiza un usuario existente 69 | async fn update_user(&self, user: User) -> UserRepositoryResult; 70 | 71 | /// Actualiza solo el uso de almacenamiento de un usuario 72 | async fn update_storage_usage(&self, user_id: &str, usage_bytes: i64) -> UserRepositoryResult<()>; 73 | 74 | /// Actualiza la fecha de último inicio de sesión 75 | async fn update_last_login(&self, user_id: &str) -> UserRepositoryResult<()>; 76 | 77 | /// Lista usuarios con paginación 78 | async fn list_users(&self, limit: i64, offset: i64) -> UserRepositoryResult>; 79 | 80 | /// Activa o desactiva un usuario 81 | async fn set_user_active_status(&self, user_id: &str, active: bool) -> UserRepositoryResult<()>; 82 | 83 | /// Cambia la contraseña de un usuario 84 | async fn change_password(&self, user_id: &str, password_hash: &str) -> UserRepositoryResult<()>; 85 | 86 | /// Cambia el rol de un usuario 87 | async fn change_role(&self, user_id: &str, role: UserRole) -> UserRepositoryResult<()>; 88 | 89 | /// Lista usuarios por rol (admin o user) 90 | async fn list_users_by_role(&self, role: &str) -> UserRepositoryResult>; 91 | 92 | /// Elimina un usuario 93 | async fn delete_user(&self, user_id: &str) -> UserRepositoryResult<()>; 94 | } -------------------------------------------------------------------------------- /src/domain/services/i18n_service.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use thiserror::Error; 3 | 4 | /// Error types for i18n service operations 5 | #[derive(Debug, Error)] 6 | pub enum I18nError { 7 | #[error("Translation key not found: {0}")] 8 | KeyNotFound(String), 9 | 10 | #[error("Invalid locale: {0}")] 11 | InvalidLocale(String), 12 | 13 | #[error("Error loading translations: {0}")] 14 | LoadError(String), 15 | } 16 | 17 | /// Result type for i18n service operations 18 | pub type I18nResult = Result; 19 | 20 | /// Supported locales 21 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] 22 | pub enum Locale { 23 | English, 24 | Spanish, 25 | } 26 | 27 | impl Locale { 28 | /// Convert locale to code string 29 | pub fn as_str(&self) -> &'static str { 30 | match self { 31 | Locale::English => "en", 32 | Locale::Spanish => "es", 33 | } 34 | } 35 | 36 | /// Create from locale code string 37 | pub fn from_str(code: &str) -> Option { 38 | match code.to_lowercase().as_str() { 39 | "en" => Some(Locale::English), 40 | "es" => Some(Locale::Spanish), 41 | _ => None, 42 | } 43 | } 44 | 45 | /// Get default locale 46 | pub fn default() -> Self { 47 | Locale::English 48 | } 49 | } 50 | 51 | /// Interface for i18n service (primary port) 52 | #[async_trait] 53 | pub trait I18nService: Send + Sync + 'static { 54 | /// Get a translation for a key and locale 55 | async fn translate(&self, key: &str, locale: Locale) -> I18nResult; 56 | 57 | /// Load translations for a locale 58 | async fn load_translations(&self, locale: Locale) -> I18nResult<()>; 59 | 60 | /// Get available locales 61 | async fn available_locales(&self) -> Vec; 62 | 63 | /// Check if a locale is supported 64 | #[allow(dead_code)] 65 | async fn is_supported(&self, locale: Locale) -> bool; 66 | } -------------------------------------------------------------------------------- /src/domain/services/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod i18n_service; 2 | pub mod path_service; 3 | pub mod auth_service; -------------------------------------------------------------------------------- /src/infrastructure/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod repositories; 2 | pub mod services; 3 | 4 | -------------------------------------------------------------------------------- /src/infrastructure/repositories/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod file_fs_repository; 2 | pub mod folder_fs_repository; 3 | pub mod parallel_file_processor; 4 | 5 | // Nuevos repositorios refactorizados 6 | pub mod file_metadata_manager; 7 | pub mod file_path_resolver; 8 | pub mod file_fs_read_repository; 9 | pub mod file_fs_write_repository; 10 | pub mod trash_fs_repository; 11 | pub mod file_fs_repository_trash; 12 | pub mod folder_fs_repository_trash; 13 | pub mod share_fs_repository; 14 | 15 | // Repositorios PostgreSQL 16 | pub mod pg; 17 | 18 | // Re-exportar para facilitar acceso 19 | pub use file_metadata_manager::FileMetadataManager; 20 | pub use file_path_resolver::FilePathResolver; 21 | pub use file_fs_read_repository::FileFsReadRepository; 22 | pub use file_fs_write_repository::FileFsWriteRepository; 23 | pub use pg::{UserPgRepository, SessionPgRepository}; 24 | -------------------------------------------------------------------------------- /src/infrastructure/repositories/pg/mod.rs: -------------------------------------------------------------------------------- 1 | mod address_book_pg_repository; 2 | mod calendar_pg_repository; 3 | mod calendar_event_pg_repository; 4 | mod contact_pg_repository; 5 | mod contact_group_pg_repository; 6 | mod session_pg_repository; 7 | mod transaction_utils; 8 | mod user_pg_repository; 9 | 10 | pub use address_book_pg_repository::AddressBookPgRepository; 11 | pub use calendar_pg_repository::CalendarPgRepository; 12 | pub use calendar_event_pg_repository::CalendarEventPgRepository; 13 | pub use contact_pg_repository::ContactPgRepository; 14 | pub use contact_group_pg_repository::ContactGroupPgRepository; 15 | pub use session_pg_repository::SessionPgRepository; 16 | pub use user_pg_repository::UserPgRepository; 17 | -------------------------------------------------------------------------------- /src/infrastructure/services/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod file_system_i18n_service; 2 | pub mod file_system_utils; 3 | pub mod id_mapping_service; 4 | pub mod id_mapping_optimizer; 5 | pub mod cache_manager; 6 | pub mod file_metadata_cache; 7 | pub mod compression_service; 8 | pub mod buffer_pool; 9 | pub mod trash_cleanup_service; 10 | pub mod zip_service; -------------------------------------------------------------------------------- /src/infrastructure/services/trash_cleanup_service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::time::Duration; 3 | use tokio::time; 4 | use tracing::{debug, error, info, instrument}; 5 | 6 | use crate::common::errors::Result; 7 | use crate::domain::repositories::trash_repository::TrashRepository; 8 | use crate::application::ports::trash_ports::TrashUseCase; 9 | 10 | /// Servicio para la limpieza automática de elementos expirados en la papelera 11 | pub struct TrashCleanupService { 12 | trash_service: Arc, 13 | trash_repository: Arc, 14 | cleanup_interval_hours: u64, 15 | } 16 | 17 | impl TrashCleanupService { 18 | pub fn new( 19 | trash_service: Arc, 20 | trash_repository: Arc, 21 | cleanup_interval_hours: u64, 22 | ) -> Self { 23 | Self { 24 | trash_service, 25 | trash_repository, 26 | cleanup_interval_hours: cleanup_interval_hours.max(1), // Mínimo 1 hora 27 | } 28 | } 29 | 30 | /// Inicia el trabajo de limpieza periódica 31 | #[instrument(skip(self))] 32 | pub async fn start_cleanup_job(&self) { 33 | let trash_repository = self.trash_repository.clone(); 34 | let trash_service = self.trash_service.clone(); 35 | let interval_hours = self.cleanup_interval_hours; 36 | 37 | info!("Iniciando trabajo de limpieza de papelera con intervalo de {} horas", interval_hours); 38 | 39 | tokio::spawn(async move { 40 | let interval_duration = Duration::from_secs(interval_hours * 60 * 60); 41 | let mut interval = time::interval(interval_duration); 42 | 43 | // Primera ejecución inmediata 44 | Self::cleanup_expired_items(trash_repository.clone(), trash_service.clone()).await 45 | .unwrap_or_else(|e| error!("Error en la limpieza inicial de la papelera: {:?}", e)); 46 | 47 | loop { 48 | interval.tick().await; 49 | debug!("Ejecutando tarea programada de limpieza de papelera"); 50 | 51 | if let Err(e) = Self::cleanup_expired_items( 52 | trash_repository.clone(), 53 | trash_service.clone() 54 | ).await { 55 | error!("Error en la limpieza programada de la papelera: {:?}", e); 56 | } 57 | } 58 | }); 59 | } 60 | 61 | /// Limpia los elementos expirados en la papelera 62 | #[instrument(skip(trash_repository, trash_service))] 63 | async fn cleanup_expired_items( 64 | trash_repository: Arc, 65 | trash_service: Arc, 66 | ) -> Result<()> { 67 | debug!("Comenzando limpieza de elementos expirados en la papelera"); 68 | 69 | // Obtener todos los elementos expirados 70 | let expired_items = trash_repository.get_expired_items().await?; 71 | 72 | if expired_items.is_empty() { 73 | debug!("No hay elementos expirados para limpiar"); 74 | return Ok(()); 75 | } 76 | 77 | info!("Encontrados {} elementos expirados para eliminar", expired_items.len()); 78 | 79 | // Eliminar cada elemento expirado 80 | for item in expired_items { 81 | let trash_id = item.id.to_string(); 82 | let user_id = item.user_id.to_string(); 83 | 84 | debug!("Eliminando elemento expirado: id={}, user={}", trash_id, user_id); 85 | 86 | // Si falla una eliminación, continuar con las demás 87 | if let Err(e) = trash_service.delete_permanently(&trash_id, &user_id).await { 88 | error!("Error eliminando elemento expirado {}: {:?}", trash_id, e); 89 | } else { 90 | debug!("Elemento expirado eliminado correctamente: {}", trash_id); 91 | } 92 | } 93 | 94 | info!("Limpieza de papelera completada"); 95 | Ok(()) 96 | } 97 | } -------------------------------------------------------------------------------- /src/interfaces/api/handlers/caldav_handler.rs: -------------------------------------------------------------------------------- 1 | use axum::{ 2 | Router, 3 | routing::get, 4 | http::StatusCode, 5 | response::IntoResponse, 6 | Json, 7 | }; 8 | use std::sync::Arc; 9 | use serde_json::json; 10 | 11 | use crate::common::di::AppState; 12 | 13 | // Temporary placeholder implementation 14 | pub fn caldav_routes() -> Router { 15 | Router::new() 16 | .route("/placeholder", get(placeholder_handler)) 17 | } 18 | 19 | async fn placeholder_handler() -> impl IntoResponse { 20 | (StatusCode::OK, Json(json!({ 21 | "message": "CalDAV functionality is not yet implemented" 22 | }))) 23 | } -------------------------------------------------------------------------------- /src/interfaces/api/handlers/caldav_handler.rs.rej: -------------------------------------------------------------------------------- 1 | --- caldav_handler.rs 2 | +++ caldav_handler.rs 3 | @@ -242,9 +242,9 @@ 4 | } 5 | }, 6 | None => { 7 | - (StatusCode::NOT_IMPLEMENTED, Json(json\!({ 8 | - "error": "Calendar service not available" 9 | - }))) 10 | + let error_dto = CalendarEventDto::default(); 11 | + error\!("Calendar service not available"); 12 | + (StatusCode::NOT_IMPLEMENTED, Json(error_dto)) 13 | } 14 | } 15 | } 16 | @@ -277,9 +277,9 @@ 17 | } 18 | }, 19 | None => { 20 | - (StatusCode::NOT_IMPLEMENTED, Json(json\!({ 21 | - "error": "Calendar service not available" 22 | - }))) 23 | + let error_dto = CalendarEventDto::default(); 24 | + error\!("Calendar service not available"); 25 | + (StatusCode::NOT_IMPLEMENTED, Json(error_dto)) 26 | } 27 | } 28 | } 29 | @@ -320,9 +320,9 @@ 30 | } 31 | }, 32 | None => { 33 | - (StatusCode::NOT_IMPLEMENTED, Json(json\!({ 34 | - "error": "Calendar service not available" 35 | - }))) 36 | + let error_dto = CalendarEventDto::default(); 37 | + error\!("Calendar service not available"); 38 | + (StatusCode::NOT_IMPLEMENTED, Json(error_dto)) 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/interfaces/api/handlers/favorites_handler.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use axum::{ 3 | extract::{Path, State}, 4 | http::StatusCode, 5 | response::IntoResponse, 6 | Json, 7 | }; 8 | use tracing::{error, info}; 9 | 10 | use crate::application::ports::favorites_ports::FavoritesUseCase; 11 | 12 | /// Handler for favorite-related API endpoints 13 | pub async fn get_favorites( 14 | State(favorites_service): State>, 15 | ) -> impl IntoResponse { 16 | // For demo purposes, we're using a fixed user ID 17 | let user_id = "00000000-0000-0000-0000-000000000000"; 18 | 19 | match favorites_service.get_favorites(user_id).await { 20 | Ok(favorites) => { 21 | info!("Retrieved {} favorites for user", favorites.len()); 22 | (StatusCode::OK, Json(serde_json::json!(favorites))).into_response() 23 | }, 24 | Err(err) => { 25 | error!("Error retrieving favorites: {}", err); 26 | ( 27 | StatusCode::INTERNAL_SERVER_ERROR, 28 | Json(serde_json::json!({ 29 | "error": format!("Failed to retrieve favorites: {}", err) 30 | })) 31 | ).into_response() 32 | } 33 | } 34 | } 35 | 36 | /// Add an item to user's favorites 37 | pub async fn add_favorite( 38 | State(favorites_service): State>, 39 | Path((item_type, item_id)): Path<(String, String)>, 40 | ) -> impl IntoResponse { 41 | // For demo purposes, we're using a fixed user ID 42 | let user_id = "00000000-0000-0000-0000-000000000000"; 43 | 44 | // Validate item_type 45 | if item_type != "file" && item_type != "folder" { 46 | return ( 47 | StatusCode::BAD_REQUEST, 48 | Json(serde_json::json!({ 49 | "error": "Item type must be 'file' or 'folder'" 50 | })) 51 | ); 52 | } 53 | 54 | match favorites_service.add_to_favorites(user_id, &item_id, &item_type).await { 55 | Ok(_) => { 56 | info!("Added {} '{}' to favorites", item_type, item_id); 57 | ( 58 | StatusCode::CREATED, 59 | Json(serde_json::json!({ 60 | "message": "Item added to favorites" 61 | })) 62 | ) 63 | }, 64 | Err(err) => { 65 | error!("Error adding to favorites: {}", err); 66 | ( 67 | StatusCode::INTERNAL_SERVER_ERROR, 68 | Json(serde_json::json!({ 69 | "error": format!("Failed to add to favorites: {}", err) 70 | })) 71 | ) 72 | } 73 | } 74 | } 75 | 76 | /// Remove an item from user's favorites 77 | pub async fn remove_favorite( 78 | State(favorites_service): State>, 79 | Path((item_type, item_id)): Path<(String, String)>, 80 | ) -> impl IntoResponse { 81 | // For demo purposes, we're using a fixed user ID 82 | let user_id = "00000000-0000-0000-0000-000000000000"; 83 | 84 | match favorites_service.remove_from_favorites(user_id, &item_id, &item_type).await { 85 | Ok(removed) => { 86 | if removed { 87 | info!("Removed {} '{}' from favorites", item_type, item_id); 88 | ( 89 | StatusCode::OK, 90 | Json(serde_json::json!({ 91 | "message": "Item removed from favorites" 92 | })) 93 | ) 94 | } else { 95 | info!("Item {} '{}' was not in favorites", item_type, item_id); 96 | ( 97 | StatusCode::NOT_FOUND, 98 | Json(serde_json::json!({ 99 | "message": "Item was not in favorites" 100 | })) 101 | ) 102 | } 103 | }, 104 | Err(err) => { 105 | error!("Error removing from favorites: {}", err); 106 | ( 107 | StatusCode::INTERNAL_SERVER_ERROR, 108 | Json(serde_json::json!({ 109 | "error": format!("Failed to remove from favorites: {}", err) 110 | })) 111 | ) 112 | } 113 | } 114 | } -------------------------------------------------------------------------------- /src/interfaces/api/handlers/i18n_handler.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use axum::{ 3 | extract::{State, Query}, 4 | http::StatusCode, 5 | response::IntoResponse, 6 | Json, 7 | }; 8 | 9 | use crate::application::services::i18n_application_service::I18nApplicationService; 10 | use crate::application::dtos::i18n_dto::{LocaleDto, TranslationRequestDto, TranslationResponseDto, TranslationErrorDto}; 11 | use crate::domain::services::i18n_service::{Locale, I18nError}; 12 | 13 | type AppState = Arc; 14 | 15 | /// Handler for i18n-related API endpoints 16 | pub struct I18nHandler; 17 | 18 | impl I18nHandler { 19 | /// Gets a list of available locales 20 | pub async fn get_locales( 21 | State(service): State, 22 | ) -> impl IntoResponse { 23 | let locales = service.available_locales().await; 24 | let locale_dtos: Vec = locales.into_iter().map(LocaleDto::from).collect(); 25 | 26 | (StatusCode::OK, Json(locale_dtos)).into_response() 27 | } 28 | 29 | /// Translates a key to the requested locale 30 | pub async fn translate( 31 | State(service): State, 32 | Query(query): Query, 33 | ) -> impl IntoResponse { 34 | let locale = match &query.locale { 35 | Some(locale_str) => { 36 | match Locale::from_str(locale_str) { 37 | Some(locale) => Some(locale), 38 | None => { 39 | let error = TranslationErrorDto { 40 | key: query.key.clone(), 41 | locale: locale_str.clone(), 42 | error: format!("Unsupported locale: {}", locale_str), 43 | }; 44 | return (StatusCode::BAD_REQUEST, Json(error)).into_response(); 45 | } 46 | } 47 | }, 48 | None => None, 49 | }; 50 | 51 | match service.translate(&query.key, locale).await { 52 | Ok(text) => { 53 | let response = TranslationResponseDto { 54 | key: query.key, 55 | locale: locale.unwrap_or(Locale::default()).as_str().to_string(), 56 | text, 57 | }; 58 | (StatusCode::OK, Json(response)).into_response() 59 | }, 60 | Err(err) => { 61 | let status = match &err { 62 | I18nError::KeyNotFound(_) => StatusCode::NOT_FOUND, 63 | I18nError::InvalidLocale(_) => StatusCode::BAD_REQUEST, 64 | I18nError::LoadError(_) => StatusCode::INTERNAL_SERVER_ERROR, 65 | }; 66 | 67 | let error = TranslationErrorDto { 68 | key: query.key, 69 | locale: locale.unwrap_or(Locale::default()).as_str().to_string(), 70 | error: err.to_string(), 71 | }; 72 | 73 | (status, Json(error)).into_response() 74 | } 75 | } 76 | } 77 | 78 | /// Gets all translations for a locale 79 | pub async fn get_translations( 80 | State(_service): State, 81 | locale_code: String, 82 | ) -> impl IntoResponse { 83 | let locale = match Locale::from_str(&locale_code) { 84 | Some(locale) => locale, 85 | None => { 86 | return (StatusCode::BAD_REQUEST, Json(serde_json::json!({ 87 | "error": format!("Unsupported locale: {}", locale_code) 88 | }))).into_response(); 89 | } 90 | }; 91 | 92 | // This implementation is a bit weird, as we don't have a way to get all translations 93 | // We should improve the I18nService to support this 94 | (StatusCode::OK, Json(serde_json::json!({ 95 | "locale": locale.as_str() 96 | }))).into_response() 97 | } 98 | } -------------------------------------------------------------------------------- /src/interfaces/api/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod file_handler; 2 | pub mod folder_handler; 3 | pub mod i18n_handler; 4 | pub mod batch_handler; 5 | pub mod auth_handler; 6 | pub mod trash_handler; 7 | pub mod search_handler; 8 | pub mod share_handler; 9 | pub mod favorites_handler; 10 | pub mod recent_handler; 11 | pub mod webdav_handler; 12 | pub mod caldav_handler; 13 | 14 | /// Tipo de resultado para controladores de API 15 | pub type ApiResult = Result; -------------------------------------------------------------------------------- /src/interfaces/api/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod handlers; 2 | pub mod routes; 3 | 4 | pub use routes::create_api_routes; -------------------------------------------------------------------------------- /src/interfaces/middleware/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cache; 2 | pub mod auth; 3 | pub mod redirect; // Add redirect middleware for API to Axum transition -------------------------------------------------------------------------------- /src/interfaces/middleware/redirect.rs: -------------------------------------------------------------------------------- 1 | use std::task::{Context, Poll}; 2 | use std::future::Future; 3 | use std::pin::Pin; 4 | use axum::{ 5 | extract::Request, 6 | response::Response, 7 | middleware::Next, 8 | }; 9 | use axum::http::{uri::PathAndQuery, Uri}; 10 | use tower::{Layer, Service}; 11 | 12 | /// A middleware that redirects specific paths to the proper Axum routes. 13 | /// This is used during the transition from the custom HTTP server to Axum. 14 | pub struct RedirectMiddleware { 15 | inner: S, 16 | } 17 | 18 | impl Service for RedirectMiddleware 19 | where 20 | S: Service + Send + 'static, 21 | S::Future: Send + 'static, 22 | { 23 | type Response = S::Response; 24 | type Error = S::Error; 25 | // `BoxFuture` is a type alias for `Pin>` 26 | type Future = Pin> + Send>>; 27 | 28 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 29 | self.inner.poll_ready(cx) 30 | } 31 | 32 | fn call(&mut self, mut request: Request) -> Self::Future { 33 | // Log the incoming request 34 | let uri = request.uri().clone(); 35 | let path = uri.path().to_string(); 36 | 37 | // Check and potentially redirect file-related API routes 38 | if path.starts_with("/api/files") { 39 | // Handle file-related redirects 40 | if path == "/api/files/upload" { 41 | // This is already properly mapped in Axum routes 42 | tracing::debug!("File upload request detected: {}", path); 43 | } else if path.starts_with("/api/files/file-") { 44 | // File download request - let's adjust the URI to match the Axum route 45 | // Extract the ID from the path 46 | let file_id = &path[11..]; 47 | tracing::info!("Redirecting file download request: {} to /api/files/{}", path, file_id); 48 | 49 | // Create a new URI for the Axum route 50 | let uri_clone = uri.clone(); 51 | let mut parts = uri_clone.into_parts(); 52 | let query = parts.path_and_query 53 | .as_ref() 54 | .and_then(|pq| pq.query()) 55 | .map(|q| format!("?{}", q)) 56 | .unwrap_or_default(); 57 | 58 | let new_path = format!("/api/files/{}{}", file_id, query); 59 | parts.path_and_query = Some( 60 | PathAndQuery::from_maybe_shared(new_path.into_bytes()) 61 | .expect("Failed to create path and query") 62 | ); 63 | 64 | let new_uri = Uri::from_parts(parts).expect("Failed to create URI"); 65 | *request.uri_mut() = new_uri; 66 | } 67 | } else if path.starts_with("/api/folders") { 68 | // Handle folder-related redirects 69 | tracing::debug!("Folder request detected: {}", path); 70 | // We might need to add specific redirects for folder operations here 71 | } 72 | 73 | // Pass the request to the inner service 74 | let future = self.inner.call(request); 75 | 76 | Box::pin(async move { 77 | let response = future.await?; 78 | Ok(response) 79 | }) 80 | } 81 | } 82 | 83 | /// The layer that applies the RedirectMiddleware. 84 | #[derive(Clone)] 85 | pub struct RedirectLayer; 86 | 87 | impl Layer for RedirectLayer { 88 | type Service = RedirectMiddleware; 89 | 90 | fn layer(&self, inner: S) -> Self::Service { 91 | RedirectMiddleware { inner } 92 | } 93 | } 94 | 95 | /// Axum middleware function that can be applied directly to routes 96 | pub async fn redirect_middleware( 97 | request: Request, 98 | next: Next, 99 | ) -> Response { 100 | // Get the path 101 | let path = request.uri().path().to_string(); 102 | 103 | // Process the request based on the path 104 | if path.starts_with("/api/files") || path.starts_with("/api/folders") || path.starts_with("/api/auth") { 105 | tracing::debug!("API request detected in middleware: {}", path); 106 | // Log additional information about the request 107 | if let Some(content_type) = request.headers().get("content-type") { 108 | tracing::debug!("Content-Type: {:?}", content_type); 109 | } 110 | 111 | // For debugging auth-related requests 112 | if path.starts_with("/api/auth") { 113 | tracing::info!("Auth API request: {} method: {}", path, request.method()); 114 | } 115 | } 116 | 117 | // Continue the middleware chain 118 | next.run(request).await 119 | } -------------------------------------------------------------------------------- /src/interfaces/middleware/test_cache.rs: -------------------------------------------------------------------------------- 1 | use super::cache::{HttpCache, HttpCacheLayer, start_cache_cleanup_task}; 2 | use axum::{ 3 | routing::get, 4 | Router, 5 | response::IntoResponse, 6 | Json, 7 | extract::State, 8 | }; 9 | use serde::{Serialize, Deserialize}; 10 | use std::sync::Arc; 11 | use std::time::Duration; 12 | use std::net::SocketAddr; 13 | 14 | #[derive(Clone, Debug, Serialize, Deserialize)] 15 | struct TestResponse { 16 | message: &'static str, 17 | timestamp: u64, 18 | } 19 | 20 | // Test handler for a simple GET endpoint 21 | async fn test_handler() -> impl IntoResponse { 22 | // Create a simple response with a timestamp 23 | let timestamp = std::time::SystemTime::now() 24 | .duration_since(std::time::UNIX_EPOCH) 25 | .unwrap() 26 | .as_secs(); 27 | 28 | // Simulate some processing time 29 | tokio::time::sleep(Duration::from_millis(50)).await; 30 | 31 | let response = TestResponse { 32 | message: "Hello, this response is cacheable!", 33 | timestamp, 34 | }; 35 | 36 | // Log the response generation 37 | tracing::info!("Generated fresh response with timestamp: {}", timestamp); 38 | 39 | Json(response) 40 | } 41 | 42 | // Run a test server with HTTP caching enabled 43 | pub async fn run_test_server() { 44 | // Initialize HTTP cache with 10 seconds TTL 45 | let http_cache = HttpCache::with_max_age(10); 46 | 47 | // Start the cleanup task 48 | start_cache_cleanup_task(http_cache.clone()); 49 | 50 | // Create a test router with the cache middleware 51 | let app = Router::new() 52 | .route("/test", get(test_handler)) 53 | .layer(HttpCacheLayer::new(http_cache)); 54 | 55 | // Bind to a test port 56 | let addr = SocketAddr::from(([127, 0, 0, 1], 8086)); 57 | tracing::info!("HTTP Cache test server listening on {}", addr); 58 | 59 | // Start the server 60 | let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); 61 | axum::serve(listener, app).await.unwrap(); 62 | } -------------------------------------------------------------------------------- /src/interfaces/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod api; 2 | pub mod web; 3 | pub mod middleware; 4 | 5 | pub use api::create_api_routes; -------------------------------------------------------------------------------- /src/interfaces/web/mod.rs: -------------------------------------------------------------------------------- 1 | use axum::{ 2 | routing::get, 3 | Router, 4 | response::Html, 5 | }; 6 | use tower_http::services::ServeDir; 7 | use crate::common::di::AppState; 8 | use crate::common::config::AppConfig; 9 | 10 | /// Creates web routes for serving static files 11 | pub fn create_web_routes() -> Router { 12 | // Get config to access static path 13 | let config = AppConfig::from_env(); 14 | let static_path = config.static_path.clone(); 15 | 16 | Router::new() 17 | // Add specific route for login 18 | .route("/login", get(serve_login_page)) 19 | // Serve static files 20 | .fallback_service( 21 | ServeDir::new(static_path) 22 | ) 23 | } 24 | 25 | /// Serve the login page 26 | async fn serve_login_page() -> Html<&'static str> { 27 | Html(include_str!("../../../static/login.html")) 28 | } -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Exportar los módulos principales del proyecto 2 | pub mod common; 3 | pub mod domain; 4 | pub mod application; 5 | pub mod infrastructure; 6 | pub mod interfaces; 7 | 8 | // Re-exportaciones públicas comunes 9 | pub use application::services::folder_service::FolderService; 10 | pub use application::services::file_service::FileService; 11 | pub use application::services::i18n_application_service::I18nApplicationService; 12 | pub use application::services::storage_mediator::{StorageMediator, FileSystemStorageMediator}; 13 | pub use domain::services::path_service::PathService; 14 | pub use infrastructure::repositories::folder_fs_repository::FolderFsRepository; 15 | pub use infrastructure::repositories::file_fs_repository::FileFsRepository; 16 | pub use infrastructure::repositories::parallel_file_processor::ParallelFileProcessor; 17 | pub use infrastructure::services::buffer_pool::BufferPool; 18 | pub use infrastructure::services::compression_service::GzipCompressionService; -------------------------------------------------------------------------------- /static/css/auth.css: -------------------------------------------------------------------------------- 1 | /* Auth styles for OxiCloud */ 2 | .auth-container { 3 | display: flex; 4 | flex-direction: column; 5 | align-items: center; 6 | justify-content: center; 7 | height: 100vh; 8 | width: 100%; 9 | background-color: #f5f7fa; 10 | } 11 | 12 | .auth-panel { 13 | width: 400px; 14 | max-width: 90%; 15 | margin: 0 auto; 16 | background-color: white; 17 | border-radius: 10px; 18 | box-shadow: 0 5px 20px rgba(0,0,0,0.1); 19 | padding: 30px; 20 | text-align: center; 21 | } 22 | 23 | .auth-logo { 24 | display: flex; 25 | align-items: center; 26 | justify-content: center; 27 | margin-bottom: 20px; 28 | } 29 | 30 | .auth-logo-icon { 31 | width: 50px; 32 | height: 50px; 33 | background-color: #ff5e3a; 34 | border-radius: 50%; 35 | display: flex; 36 | align-items: center; 37 | justify-content: center; 38 | margin-right: 10px; 39 | } 40 | 41 | .auth-logo-icon svg { 42 | width: 30px; 43 | height: 30px; 44 | fill: white; 45 | } 46 | 47 | .auth-logo-text { 48 | font-size: 24px; 49 | font-weight: bold; 50 | color: #2a3042; 51 | } 52 | 53 | .auth-title { 54 | font-size: 20px; 55 | font-weight: bold; 56 | margin-bottom: 25px; 57 | color: #2a3042; 58 | } 59 | 60 | .auth-form { 61 | width: 100%; 62 | text-align: left; 63 | } 64 | 65 | .auth-input-group { 66 | margin-bottom: 20px; 67 | } 68 | 69 | .auth-label { 70 | display: block; 71 | margin-bottom: 8px; 72 | font-size: 14px; 73 | color: #4b5563; 74 | font-weight: 500; 75 | } 76 | 77 | .auth-input { 78 | width: 100%; 79 | padding: 12px 15px; 80 | border-radius: 8px; 81 | border: 1px solid #e2e8f0; 82 | font-size: 14px; 83 | background-color: #f9fafb; 84 | transition: border-color 0.2s; 85 | } 86 | 87 | .auth-input:focus { 88 | outline: none; 89 | border-color: #ff5e3a; 90 | box-shadow: 0 0 0 3px rgba(255, 94, 58, 0.1); 91 | } 92 | 93 | .auth-button { 94 | width: 100%; 95 | padding: 12px 15px; 96 | border-radius: 8px; 97 | background-color: #ff5e3a; 98 | color: white; 99 | font-weight: bold; 100 | border: none; 101 | cursor: pointer; 102 | font-size: 16px; 103 | transition: background-color 0.2s; 104 | margin-top: 10px; 105 | } 106 | 107 | .auth-button:hover { 108 | background-color: #e64a2e; 109 | } 110 | 111 | .auth-button:disabled { 112 | background-color: #f9a799; 113 | cursor: not-allowed; 114 | } 115 | 116 | .auth-toggle { 117 | margin-top: 20px; 118 | font-size: 14px; 119 | color: #718096; 120 | } 121 | 122 | .auth-toggle-link { 123 | color: #ff5e3a; 124 | cursor: pointer; 125 | text-decoration: none; 126 | font-weight: 500; 127 | } 128 | 129 | .auth-toggle-link:hover { 130 | text-decoration: underline; 131 | } 132 | 133 | .auth-error { 134 | background-color: #fee2e2; 135 | color: #b91c1c; 136 | padding: 10px 15px; 137 | border-radius: 8px; 138 | margin-bottom: 20px; 139 | font-size: 14px; 140 | display: none; 141 | } 142 | 143 | .auth-success { 144 | background-color: #dcfce7; 145 | color: #15803d; 146 | padding: 10px 15px; 147 | border-radius: 8px; 148 | margin-bottom: 20px; 149 | font-size: 14px; 150 | display: none; 151 | } 152 | 153 | /* Admin setup panel styles */ 154 | .admin-setup-panel { 155 | display: none; 156 | } 157 | 158 | .setup-steps { 159 | margin-bottom: 25px; 160 | display: flex; 161 | justify-content: space-between; 162 | } 163 | 164 | .setup-step { 165 | display: flex; 166 | flex-direction: column; 167 | align-items: center; 168 | width: 30%; 169 | } 170 | 171 | .step-number { 172 | width: 30px; 173 | height: 30px; 174 | background-color: #e2e8f0; 175 | border-radius: 50%; 176 | display: flex; 177 | align-items: center; 178 | justify-content: center; 179 | color: #64748b; 180 | font-weight: bold; 181 | margin-bottom: 5px; 182 | } 183 | 184 | .step-number.active { 185 | background-color: #ff5e3a; 186 | color: white; 187 | } 188 | 189 | .step-title { 190 | font-size: 12px; 191 | color: #64748b; 192 | } 193 | 194 | .step-title.active { 195 | color: #1e293b; 196 | font-weight: 500; 197 | } 198 | 199 | @media (max-width: 480px) { 200 | .auth-panel { 201 | width: 90%; 202 | padding: 20px; 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /static/css/favorites.css: -------------------------------------------------------------------------------- 1 | /* Estilos para la funcionalidad de favoritos */ 2 | 3 | /* Indicador de favorito */ 4 | .favorite-indicator { 5 | position: absolute; 6 | top: 10px; 7 | right: 10px; 8 | width: 25px; 9 | height: 25px; 10 | display: flex; 11 | align-items: center; 12 | justify-content: center; 13 | border-radius: 50%; 14 | color: #ccc; 15 | cursor: pointer; 16 | z-index: 5; 17 | transition: all 0.2s ease; 18 | } 19 | 20 | .favorite-indicator:hover { 21 | transform: scale(1.1); 22 | } 23 | 24 | .favorite-indicator.active { 25 | color: #ffc107; 26 | text-shadow: 0 0 5px rgba(255, 193, 7, 0.5); 27 | } 28 | 29 | /* Estilos para los elementos de la vista de favoritos */ 30 | .favorite-item { 31 | position: relative; 32 | } 33 | 34 | /* Ajustes para la vista de cuadrícula */ 35 | .file-card.favorite-item { 36 | border-left: 3px solid #ffc107; 37 | } 38 | 39 | /* Ajustes para la vista de lista */ 40 | .file-item.favorite-item { 41 | position: relative; 42 | grid-template-columns: 30px minmax(200px, 2fr) 1fr 1fr 120px; 43 | } 44 | 45 | .file-item.favorite-item .favorite-indicator { 46 | position: relative; 47 | top: 0; 48 | right: 0; 49 | width: 30px; 50 | height: 30px; 51 | } 52 | 53 | /* Estilos para el estado vacío específico de favoritos */ 54 | .favorites-empty-state { 55 | display: flex; 56 | flex-direction: column; 57 | align-items: center; 58 | justify-content: center; 59 | padding: 50px 20px; 60 | text-align: center; 61 | color: #6c757d; 62 | } 63 | 64 | .favorites-empty-state i { 65 | font-size: 48px; 66 | color: #ffc107; 67 | margin-bottom: 20px; 68 | opacity: 0.6; 69 | } 70 | 71 | .favorites-empty-state p { 72 | margin-bottom: 10px; 73 | max-width: 400px; 74 | } 75 | 76 | /* Animación para la estrella de favorito */ 77 | @keyframes favorite-pulse { 78 | 0% { transform: scale(1); } 79 | 50% { transform: scale(1.2); } 80 | 100% { transform: scale(1); } 81 | } 82 | 83 | .favorite-indicator.active { 84 | animation: favorite-pulse 0.3s ease; 85 | } -------------------------------------------------------------------------------- /static/css/fileViewer.css: -------------------------------------------------------------------------------- 1 | /* OxiCloud File Viewer Styles */ 2 | 3 | .file-viewer-container { 4 | position: fixed; 5 | top: 0; 6 | left: 0; 7 | width: 100%; 8 | height: 100%; 9 | background-color: rgba(0, 0, 0, 0.85); 10 | z-index: 3000; 11 | display: none !important; 12 | opacity: 0; 13 | transition: opacity 0.3s ease; 14 | justify-content: center; 15 | align-items: center; 16 | } 17 | 18 | .file-viewer-container.active { 19 | display: flex !important; 20 | opacity: 1; 21 | } 22 | 23 | .file-viewer-content { 24 | width: 90%; 25 | height: 90%; 26 | background-color: #fff; 27 | border-radius: 8px; 28 | overflow: hidden; 29 | display: flex; 30 | flex-direction: column; 31 | box-shadow: 0 10px 30px rgba(0,0,0,0.2); 32 | max-width: 1200px; 33 | } 34 | 35 | .file-viewer-header { 36 | display: flex; 37 | justify-content: space-between; 38 | align-items: center; 39 | padding: 15px 20px; 40 | background-color: #f8f9fa; 41 | border-bottom: 1px solid #e2e8f0; 42 | } 43 | 44 | .file-viewer-title { 45 | font-size: 18px; 46 | font-weight: 500; 47 | color: #2d3748; 48 | white-space: nowrap; 49 | overflow: hidden; 50 | text-overflow: ellipsis; 51 | } 52 | 53 | .file-viewer-close { 54 | background: none; 55 | border: none; 56 | font-size: 18px; 57 | cursor: pointer; 58 | color: #718096; 59 | width: 36px; 60 | height: 36px; 61 | border-radius: 50%; 62 | display: flex; 63 | align-items: center; 64 | justify-content: center; 65 | transition: background-color 0.2s; 66 | } 67 | 68 | .file-viewer-close:hover { 69 | background-color: #e2e8f0; 70 | color: #4a5568; 71 | } 72 | 73 | .file-viewer-area { 74 | flex-grow: 1; 75 | overflow: auto; 76 | background-color: #f8f9fa; 77 | display: flex; 78 | align-items: center; 79 | justify-content: center; 80 | position: relative; 81 | } 82 | 83 | .file-viewer-toolbar { 84 | display: flex; 85 | gap: 10px; 86 | padding: 10px 20px; 87 | background-color: #f8f9fa; 88 | border-top: 1px solid #e2e8f0; 89 | justify-content: flex-end; 90 | } 91 | 92 | .file-viewer-toolbar button { 93 | background: none; 94 | border: 1px solid #e2e8f0; 95 | border-radius: 4px; 96 | padding: 8px; 97 | cursor: pointer; 98 | display: flex; 99 | align-items: center; 100 | justify-content: center; 101 | color: #4a5568; 102 | transition: all 0.2s; 103 | } 104 | 105 | .file-viewer-toolbar button:hover { 106 | background-color: #e2e8f0; 107 | color: #2d3748; 108 | } 109 | 110 | /* Image viewer styles */ 111 | .file-viewer-image { 112 | max-width: 100%; 113 | max-height: 100%; 114 | object-fit: contain; 115 | transform-origin: center; 116 | transition: transform 0.2s ease; 117 | } 118 | 119 | /* PDF viewer styles */ 120 | .file-viewer-pdf { 121 | width: 100%; 122 | height: 100%; 123 | border: none; 124 | } 125 | 126 | /* Loader */ 127 | .file-viewer-loader { 128 | position: absolute; 129 | top: 50%; 130 | left: 50%; 131 | transform: translate(-50%, -50%); 132 | font-size: 24px; 133 | color: #4a5568; 134 | } 135 | 136 | /* Unsupported file message */ 137 | .file-viewer-unsupported { 138 | text-align: center; 139 | padding: 40px; 140 | max-width: 500px; 141 | } 142 | 143 | .file-viewer-unsupported i { 144 | font-size: 48px; 145 | color: #a0aec0; 146 | margin-bottom: 20px; 147 | } 148 | 149 | .file-viewer-unsupported p { 150 | margin-bottom: 20px; 151 | color: #4a5568; 152 | font-size: 16px; 153 | } 154 | 155 | .file-viewer-unsupported .download-btn { 156 | margin-top: 20px; 157 | } 158 | 159 | /* Responsive styles */ 160 | @media (max-width: 768px) { 161 | .file-viewer-content { 162 | width: 100%; 163 | height: 100%; 164 | border-radius: 0; 165 | } 166 | 167 | .file-viewer-toolbar { 168 | padding: 10px; 169 | } 170 | } -------------------------------------------------------------------------------- /static/css/inlineViewer.css: -------------------------------------------------------------------------------- 1 | /* OxiCloud Inline Viewer Styles */ 2 | 3 | .inline-viewer-modal { 4 | position: fixed; 5 | top: 0; 6 | left: 0; 7 | width: 100%; 8 | height: 100%; 9 | z-index: 9999; 10 | background-color: rgba(0, 0, 0, 0.85); 11 | display: none; 12 | opacity: 0; 13 | transition: opacity 0.3s ease; 14 | pointer-events: none; 15 | } 16 | 17 | .inline-viewer-modal.active { 18 | display: flex !important; 19 | opacity: 1; 20 | align-items: center; 21 | justify-content: center; 22 | pointer-events: all; 23 | } 24 | 25 | .inline-viewer-content { 26 | width: 90%; 27 | height: 90%; 28 | max-width: 1200px; 29 | background-color: #fff; 30 | border-radius: 8px; 31 | overflow: hidden; 32 | display: flex; 33 | flex-direction: column; 34 | box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3); 35 | } 36 | 37 | .inline-viewer-header { 38 | display: flex; 39 | justify-content: space-between; 40 | align-items: center; 41 | padding: 12px 16px; 42 | background-color: #f8f9fa; 43 | border-bottom: 1px solid #e2e8f0; 44 | } 45 | 46 | .inline-viewer-title { 47 | font-size: 18px; 48 | font-weight: 500; 49 | color: #2d3748; 50 | white-space: nowrap; 51 | overflow: hidden; 52 | text-overflow: ellipsis; 53 | } 54 | 55 | .inline-viewer-close { 56 | background: none; 57 | border: none; 58 | font-size: 18px; 59 | cursor: pointer; 60 | color: #718096; 61 | width: 36px; 62 | height: 36px; 63 | border-radius: 50%; 64 | display: flex; 65 | align-items: center; 66 | justify-content: center; 67 | transition: background-color 0.2s; 68 | } 69 | 70 | .inline-viewer-close:hover { 71 | background-color: #e2e8f0; 72 | color: #4a5568; 73 | } 74 | 75 | .inline-viewer-container { 76 | flex-grow: 1; 77 | overflow: auto; 78 | background-color: #f0f3f7; 79 | display: flex; 80 | align-items: center; 81 | justify-content: center; 82 | position: relative; 83 | } 84 | 85 | .inline-viewer-toolbar { 86 | display: flex; 87 | justify-content: space-between; 88 | align-items: center; 89 | padding: 12px 16px; 90 | background-color: #f8f9fa; 91 | border-top: 1px solid #e2e8f0; 92 | } 93 | 94 | .inline-viewer-download { 95 | background-color: #ff5e3a; 96 | color: white; 97 | border: none; 98 | border-radius: 4px; 99 | padding: 8px 16px; 100 | font-size: 14px; 101 | font-weight: 500; 102 | cursor: pointer; 103 | display: flex; 104 | align-items: center; 105 | gap: 8px; 106 | transition: background-color 0.2s; 107 | } 108 | 109 | .inline-viewer-download:hover { 110 | background-color: #e74c3c; 111 | } 112 | 113 | .inline-viewer-controls { 114 | display: flex; 115 | gap: 8px; 116 | } 117 | 118 | .inline-viewer-controls button { 119 | background-color: #f1f5f9; 120 | border: 1px solid #cbd5e1; 121 | border-radius: 4px; 122 | width: 36px; 123 | height: 36px; 124 | display: flex; 125 | align-items: center; 126 | justify-content: center; 127 | cursor: pointer; 128 | color: #64748b; 129 | transition: all 0.2s; 130 | } 131 | 132 | .inline-viewer-controls button:hover { 133 | background-color: #e2e8f0; 134 | color: #334155; 135 | } 136 | 137 | /* Image viewer */ 138 | .inline-viewer-image { 139 | max-width: 100%; 140 | max-height: 100%; 141 | object-fit: contain; 142 | transform-origin: center; 143 | transition: transform 0.2s ease; 144 | } 145 | 146 | /* PDF viewer */ 147 | .inline-viewer-pdf, 148 | .inline-viewer-pdf-fallback { 149 | width: 100%; 150 | height: 100%; 151 | border: none; 152 | } 153 | 154 | /* Only show fallback if object fails */ 155 | .inline-viewer-pdf + .inline-viewer-pdf-fallback { 156 | display: none; 157 | } 158 | 159 | .inline-viewer-pdf:not([data]), 160 | .inline-viewer-pdf[data=""] + .inline-viewer-pdf-fallback { 161 | display: block; 162 | } 163 | 164 | /* Loading indicator */ 165 | .inline-viewer-loader { 166 | position: absolute; 167 | top: 50%; 168 | left: 50%; 169 | transform: translate(-50%, -50%); 170 | font-size: 36px; 171 | color: #64748b; 172 | } 173 | 174 | /* Error and unsupported message */ 175 | .inline-viewer-message { 176 | padding: 32px; 177 | text-align: center; 178 | max-width: 400px; 179 | } 180 | 181 | .inline-viewer-icon { 182 | font-size: 64px; 183 | color: #cbd5e1; 184 | margin-bottom: 24px; 185 | } 186 | 187 | .inline-viewer-text { 188 | color: #64748b; 189 | line-height: 1.6; 190 | } 191 | 192 | .inline-viewer-text p { 193 | margin: 0 0 16px; 194 | } 195 | 196 | /* Responsive adjustments */ 197 | @media (max-width: 768px) { 198 | .inline-viewer-content { 199 | width: 100%; 200 | height: 100%; 201 | border-radius: 0; 202 | } 203 | 204 | .inline-viewer-controls { 205 | display: none; 206 | } 207 | } -------------------------------------------------------------------------------- /static/css/recent.css: -------------------------------------------------------------------------------- 1 | /* Estilos para la funcionalidad de archivos recientes */ 2 | 3 | /* Indicador de reciente */ 4 | .recent-indicator { 5 | position: absolute; 6 | top: 10px; 7 | right: 10px; 8 | width: 25px; 9 | height: 25px; 10 | display: flex; 11 | align-items: center; 12 | justify-content: center; 13 | border-radius: 50%; 14 | color: #6c757d; 15 | z-index: 5; 16 | } 17 | 18 | /* Estilos para los elementos de la vista de recientes */ 19 | .recent-item { 20 | position: relative; 21 | } 22 | 23 | /* Ajustes para la vista de cuadrícula */ 24 | .file-card.recent-item { 25 | border-left: 3px solid #6c757d; 26 | } 27 | 28 | /* Ajustes para la vista de lista */ 29 | .file-item.recent-item { 30 | position: relative; 31 | grid-template-columns: 30px minmax(200px, 2fr) 1fr 1fr 120px; 32 | } 33 | 34 | .file-item.recent-item .recent-indicator { 35 | position: relative; 36 | top: 0; 37 | right: 0; 38 | width: 30px; 39 | height: 30px; 40 | } 41 | 42 | /* Estilos para el estado vacío específico de recientes */ 43 | .recents-empty-state { 44 | display: flex; 45 | flex-direction: column; 46 | align-items: center; 47 | justify-content: center; 48 | padding: 50px 20px; 49 | text-align: center; 50 | color: #6c757d; 51 | } 52 | 53 | .recents-empty-state i { 54 | font-size: 48px; 55 | color: #6c757d; 56 | margin-bottom: 20px; 57 | opacity: 0.6; 58 | } 59 | 60 | .recents-empty-state p { 61 | margin-bottom: 10px; 62 | max-width: 400px; 63 | } 64 | 65 | /* Tooltip para tiempo de acceso */ 66 | .recent-item .file-info { 67 | cursor: help; 68 | } 69 | 70 | /* Animación para archivos recientes */ 71 | .recent-item { 72 | animation: recent-fade-in 0.3s ease; 73 | } 74 | 75 | @keyframes recent-fade-in { 76 | from { opacity: 0; transform: translateY(10px); } 77 | to { opacity: 1; transform: translateY(0); } 78 | } -------------------------------------------------------------------------------- /static/favicon.ico: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /static/identifier.sh: -------------------------------------------------------------------------------- 1 | while IFS= read -r -d '' file; do 2 | if grep -Iq . "$file"; then 3 | echo "===== $file =====" 4 | cat "$file" 5 | echo -e "\n" 6 | fi 7 | done < <(find . -type f -print0) 8 | 9 | -------------------------------------------------------------------------------- /static/js/languageSelector.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Language Selector Component for OxiCloud 3 | */ 4 | 5 | // Language codes and names 6 | const languages = [ 7 | { code: 'en', name: 'English' }, 8 | { code: 'es', name: 'Español' }, 9 | { code: 'zh', name: '中文' } 10 | ]; 11 | 12 | /** 13 | * Creates and initializes a language selector component 14 | * @param {string} containerId - ID of the container element 15 | */ 16 | function createLanguageSelector(containerId = 'language-selector') { 17 | // Get or create container 18 | let container = document.getElementById(containerId); 19 | if (!container) { 20 | console.warn(`Container with ID "${containerId}" not found, creating one.`); 21 | container = document.createElement('div'); 22 | container.id = containerId; 23 | document.body.appendChild(container); 24 | } 25 | 26 | // Create dropdown 27 | const select = document.createElement('select'); 28 | select.className = 'language-select'; 29 | select.setAttribute('aria-label', 'Select language'); 30 | 31 | // Add options 32 | languages.forEach(lang => { 33 | const option = document.createElement('option'); 34 | option.value = lang.code; 35 | option.textContent = lang.name; 36 | select.appendChild(option); 37 | }); 38 | 39 | // Set current language 40 | const currentLocale = window.i18n ? window.i18n.getCurrentLocale() : 'en'; 41 | select.value = currentLocale; 42 | 43 | // Add change event 44 | select.addEventListener('change', async (e) => { 45 | const locale = e.target.value; 46 | if (window.i18n) { 47 | await window.i18n.setLocale(locale); 48 | } 49 | }); 50 | 51 | // Add to container 52 | container.innerHTML = ''; 53 | container.appendChild(select); 54 | 55 | // Add event listener for locale changes 56 | window.addEventListener('localeChanged', (e) => { 57 | select.value = e.detail.locale; 58 | }); 59 | 60 | return container; 61 | } 62 | 63 | // Create language selector when DOM is ready 64 | document.addEventListener('DOMContentLoaded', () => { 65 | // Create language selector 66 | createLanguageSelector(); 67 | }); 68 | -------------------------------------------------------------------------------- /static/logo/logo-plain.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /static/oxicloud-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | OxiCloud 10 | 11 | -------------------------------------------------------------------------------- /static/sw.js: -------------------------------------------------------------------------------- 1 | // OxiCloud Service Worker 2 | const CACHE_NAME = 'oxicloud-cache-v1'; 3 | const ASSETS_TO_CACHE = [ 4 | '/', 5 | '/index.html', 6 | '/js/i18n.js', 7 | '/js/languageSelector.js', 8 | '/locales/en.json', 9 | '/locales/es.json', 10 | '/favicon.ico', 11 | 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css', 12 | 'https://cdn.jsdelivr.net/npm/alpinejs@3.12.3/dist/cdn.min.js' 13 | ]; 14 | 15 | // Install event - cache assets 16 | self.addEventListener('install', event => { 17 | event.waitUntil( 18 | caches.open(CACHE_NAME) 19 | .then(cache => { 20 | console.log('Cache opened'); 21 | return cache.addAll(ASSETS_TO_CACHE); 22 | }) 23 | .then(() => self.skipWaiting()) // Activate immediately 24 | ); 25 | }); 26 | 27 | // Activate event - clean old caches 28 | self.addEventListener('activate', event => { 29 | event.waitUntil( 30 | caches.keys().then(cacheNames => { 31 | return Promise.all( 32 | cacheNames.filter(cacheName => { 33 | return cacheName !== CACHE_NAME; 34 | }).map(cacheName => { 35 | return caches.delete(cacheName); 36 | }) 37 | ); 38 | }).then(() => self.clients.claim()) // Take control of clients 39 | ); 40 | }); 41 | 42 | // Fetch event - serve from cache, update cache from network 43 | self.addEventListener('fetch', event => { 44 | // Don't intercept API requests - let them go straight to the network 45 | if (event.request.url.includes('/api/')) { 46 | return; 47 | } 48 | 49 | event.respondWith( 50 | caches.match(event.request) 51 | .then(response => { 52 | // Cache hit - return the response from the cached version 53 | if (response) { 54 | // For non-core assets, still fetch from network for updates 55 | if (!ASSETS_TO_CACHE.includes(new URL(event.request.url).pathname)) { 56 | fetch(event.request).then(networkResponse => { 57 | if (networkResponse && networkResponse.status === 200) { 58 | const clonedResponse = networkResponse.clone(); 59 | caches.open(CACHE_NAME).then(cache => { 60 | cache.put(event.request, clonedResponse); 61 | }); 62 | } 63 | }).catch(() => { 64 | // Ignore network fetch errors - we already have a cached version 65 | }); 66 | } 67 | return response; 68 | } 69 | 70 | // Not in cache - get from network and add to cache 71 | return fetch(event.request).then(response => { 72 | if (!response || response.status !== 200 || response.type !== 'basic') { 73 | return response; 74 | } 75 | 76 | // Clone the response as it's a stream and can only be consumed once 77 | const responseToCache = response.clone(); 78 | 79 | caches.open(CACHE_NAME).then(cache => { 80 | cache.put(event.request, responseToCache); 81 | }); 82 | 83 | return response; 84 | }); 85 | }) 86 | ); 87 | }); 88 | 89 | // Background sync for failed requests 90 | self.addEventListener('sync', event => { 91 | if (event.tag === 'oxicloud-sync') { 92 | event.waitUntil( 93 | // Implement background sync for pending file operations 94 | Promise.resolve() // Placeholder for actual implementation 95 | ); 96 | } 97 | }); -------------------------------------------------------------------------------- /static/test.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Test OxiCloud 7 | 8 | 9 |

OxiCloud Test Page

10 |
11 | 12 | 13 | 14 | 67 | 68 | -------------------------------------------------------------------------------- /storage/.trash/trash_index.json: -------------------------------------------------------------------------------- 1 | [] -------------------------------------------------------------------------------- /storage/file_ids.json: -------------------------------------------------------------------------------- 1 | { 2 | "path_to_id": {}, 3 | "id_to_path": {}, 4 | "version": 0 5 | } -------------------------------------------------------------------------------- /storage/folder_ids.json: -------------------------------------------------------------------------------- 1 | { 2 | "path_to_id": {}, 3 | "id_to_path": {}, 4 | "version": 0 5 | } --------------------------------------------------------------------------------