diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..35196651 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,25 @@ +[alias] +cov = "llvm-cov" +cov-html = "llvm-cov --html" +cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" +time = "build --timings --all-targets" + +[build] +rustflags = [ + "-D", + "warnings", + "-D", + "future-incompatible", + "-D", + "let-underscore", + "-D", + "nonstandard-style", + "-D", + "rust-2018-compatibility", + "-D", + "rust-2018-idioms", + "-D", + "rust-2021-compatibility", + "-D", + "unused", +] diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..b67eebd8 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +/.env +/.env.local +/.git +/.git-blame-ignore +/.github +/.gitignore +/.vscode +/bin/ +/config-idx-back.local.toml +/config-tracker.local.toml +/config.local.toml +/config.toml +/cspell.json +/data_v2.db* +/data.db +/data.db* +/docker/ +/project-words.txt +/README.md +/rustfmt.toml +/storage/ +/target/ diff --git a/.env.local b/.env.local new file mode 100644 index 00000000..8d5f8e89 --- /dev/null +++ b/.env.local @@ -0,0 +1,6 @@ +DATABASE_URL=sqlite://storage/database/data.db?mode=rwc +TORRUST_IDX_BACK_CONFIG= +TORRUST_IDX_BACK_USER_UID=1000 +TORRUST_TRACKER_CONFIG= +TORRUST_TRACKER_DATABASE_DRIVER=sqlite3 +TORRUST_TRACKER_API_ADMIN_TOKEN=MyAccessToken diff --git a/.git-blame-ignore b/.git-blame-ignore new file mode 100644 index 00000000..749a0f1e --- /dev/null +++ b/.git-blame-ignore @@ -0,0 +1,4 @@ +# https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revs-fileltfilegt + +# Format the world! +9ddc079b00fc5d6ecd80199edc078d6793fb0a9c \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..2ae8963e --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +/.github/**/* @torrust/maintainers diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 00000000..becfbc1d --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,19 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: daily + target-branch: "develop" + labels: + - "Continuous Integration" + - "Dependencies" + + - package-ecosystem: cargo + directory: / + schedule: + interval: daily + target-branch: "develop" + labels: + - "Build | Project System" + - "Dependencies" diff --git a/.github/labels.json b/.github/labels.json new file mode 100644 index 00000000..6ccbeab1 --- /dev/null +++ b/.github/labels.json @@ -0,0 +1,254 @@ +[ + { + "name": "- Admin -", + "color": "FFFFFF", + "description": "Enjoyable to Install and Setup our Software", + "aliases": [] + }, + { + "name": "- Contributor -", + "color": "FFFFFF", + "description": "Nice to support Torrust", + "aliases": [] + }, + { + "name": "- Developer -", + "color": "FFFFFF", + "description": "Torrust Improvement Experience", + "aliases": [] + }, + { + "name": "- User -", + "color": "FFFFFF", + "description": "Enjoyable to Use our Software", + "aliases": [] + }, + { + "name": "Blocked", + "color": "000000", + "description": "Has Unsatisfied Dependency", + "aliases": [] + }, + { + "name": "Bug", + "color": "a80506", + "description": "Incorrect Behavior", + "aliases": ["bug"] + }, + { + "name": "Build | Project System", + "color": "99AAAA", + "description": "Compiling and Packaging", + "aliases": ["Rust"] + }, + { + "name": "Cannot Reproduce", + "color": "D3D3D3", + "description": "Inconsistent Observations", + "aliases": [] + }, + { + "name": "Code Cleanup / Refactoring", + "color": "055a8b", + "description": "Tidying and Making Neat", + "aliases": ["refactoring", "tidying"] + }, + { + "name": "Continuous Integration", + "color": "41c6b3", + "description": "Workflows and Automation", + "aliases": ["workflow"] + }, + { + "name": "Dependencies", + "color": "d4f8f6", + "description": "Related to Dependencies", + "aliases": ["dependencies"] + }, + { + "name": "Documentation", + "color": "3d2133", + "description": "Improves Instructions, Guides, and Notices", + "aliases": [] + }, + { + "name": "Duplicate", + "color": "cfd3d7", + "description": "Not Unique", + "aliases": [] + }, + { + "name": "Easy", + "color": "f0cff0", + "description": "Good for Newcomers", + "aliases": ["good first issue"] + }, + { + "name": "Enhancement / Feature Request", + "color": "c9ecbf", + "description": "Something New", + "aliases": ["enhancement"] + }, + { + "name": "External Tools", + "color": "a6006b", + "description": "3rd Party Systems", + "aliases": [] + }, + { + "name": "First Time Contribution", + "color": "f1e0e6", + "description": "Welcome to Torrust", + "aliases": [] + }, + { + "name": "Fixed", + "color": "8e4c42", + "description": "Not a Concern Anymore", + "aliases": [] + }, + { + "name": "Hard", + "color": "2c2c2c", + "description": "Non-Trivial", + "aliases": [] + }, + { + "name": "Help Wanted", + "color": "00896b", + "description": "More Contributions are Appreciated", + "aliases": [] + }, + { + "name": "High Priority", + "color": "ba3fbc", + "description": "Focus Required", + "aliases": [] + }, + { + "name": "Hold Merge", + "color": "9aafbe", + "description": "We are not Ready Yet", + "aliases": [] + }, + { + "name": "Installer | Package", + "color": "ed8b24", + "description": "Distribution to Users", + "aliases": [] + }, + { + "name": "Invalid", + "color": "c1c1c1", + "description": "This doesn't seem right", + "aliases": [] + }, + { + "name": "Legal", + "color": "463e60", + "description": "Licenses and other Official Documents", + "aliases": [] + }, + { + "name": "Low Priority", + "color": "43536b", + "description": "Not our Focus Now", + "aliases": [] + }, + { + "name": "Needs Feedback", + "color": "d6946c", + "description": "What dose the Community Think?", + "aliases": ["waiting for feedback"] + }, + { + "name": "Needs Rebase", + "color": "FBC002", + "description": "Base Branch has Incompatibilities", + "aliases": [] + }, + { + "name": "Needs Research", + "color": "4bc021", + "description": "We Need to Know More About This", + "aliases": [] + }, + { + "name": "Optimization", + "color": "faeba8", + "description": "Make it Faster", + "aliases": [] + }, + { + "name": "Portability", + "color": "95de82", + "description": "Distribution to More Places", + "aliases": [] + }, + { + "name": "Postponed", + "color": "dadada", + "description": "For Later", + "aliases": [] + }, + { + "name": "Quality & Assurance", + "color": "eea2e8", + "description": "Relates to QA, Testing, and CI", + "aliases": [] + }, + { + "name": "Question / Discussion", + "color": "f89d00", + "description": "Community Feedback", + "aliases": ["code question"] + }, + { + "name": "Regression", + "color": "d10588", + "description": "It dose not work anymore", + "aliases": [] + }, + { + "name": "Reviewed", + "color": "f4f4ea", + "description": "This Looks Good", + "aliases": [] + }, + { + "name": "Security", + "color": "650606", + "description": "Publicly Connected to Security", + "aliases": ["security"] + }, + { + "name": "Testing", + "color": "c5def5", + "description": "Checking Torrust", + "aliases": [] + }, + { + "name": "Translations", + "color": "0c86af", + "description": "Localization and Cultural Adaptions", + "aliases": [] + }, + { + "name": "Trivial", + "color": "5f9685", + "description": "Something Easy", + "aliases": [] + }, + { + "name": "Won't Fix", + "color": "070003", + "description": "Something Not Relevant", + "aliases": [] + }, + { + "name": "Workaround Possible", + "color": "eae3e7", + "description": "You can still do it another way", + "aliases": [] + } +] \ No newline at end of file diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml new file mode 100644 index 00000000..884a1584 --- /dev/null +++ b/.github/workflows/container.yaml @@ -0,0 +1,178 @@ +name: Container + +on: + push: + branches: + - "develop" + - "main" + - "releases/**/*" + pull_request: + branches: + - "develop" + - "main" + +env: + CARGO_TERM_COLOR: always + +jobs: + test: + name: Test (Docker) + runs-on: ubuntu-latest + + strategy: + matrix: + target: [debug, release] + + steps: + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v3 + + - id: build + name: Build + uses: docker/build-push-action@v5 + with: + file: ./Containerfile + push: false + load: true + target: ${{ matrix.target }} + tags: torrust-tracker:local + cache-from: type=gha + cache-to: type=gha + + - id: inspect + name: Inspect + run: docker image inspect torrust-tracker:local + + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: compose + name: Compose + run: docker compose build + + context: + name: Context + needs: test + runs-on: ubuntu-latest + + outputs: + continue: ${{ steps.check.outputs.continue }} + type: ${{ steps.check.outputs.type }} + version: ${{ steps.check.outputs.version }} + + steps: + - id: check + name: Check Context + run: | + if [[ "${{ github.repository }}" == "torrust/torrust-tracker" ]]; then + if [[ "${{ github.event_name }}" == "push" ]]; then + if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then + + echo "type=development" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + echo "On \`main\` Branch, Type: \`development\`" + + elif [[ "${{ github.ref }}" == "refs/heads/develop" ]]; then + + echo "type=development" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + echo "On \`develop\` Branch, Type: \`development\`" + + elif [[ $(echo "${{ github.ref }}" | grep -P '^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$') ]]; then + + version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') + echo "version=$version" >> $GITHUB_OUTPUT + echo "type=release" >> $GITHUB_OUTPUT + echo "continue=true" >> $GITHUB_OUTPUT + echo "In \`releases/$version\` Branch, Type: \`release\`" + + else + echo "Not Correct Branch. Will Not Continue" + fi + else + echo "Not a Push Event. Will Not Continue" + fi + else + echo "On a Forked Repository. Will Not Continue" + fi + + publish_development: + name: Publish (Development) + environment: dockerhub-torrust + needs: context + if: needs.context.outputs.continue == 'true' && needs.context.outputs.type == 'development' + runs-on: ubuntu-latest + + steps: + - id: meta + name: Docker Meta + uses: docker/metadata-action@v5 + with: + images: | + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + + - id: login + name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v5 + with: + file: ./Containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha + + publish_release: + name: Publish (Release) + environment: dockerhub-torrust + needs: context + if: needs.context.outputs.continue == 'true' && needs.context.outputs.type == 'release' + runs-on: ubuntu-latest + + steps: + - id: meta + name: Docker Meta + uses: docker/metadata-action@v5 + with: + images: | + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=semver,value=${{ needs.context.outputs.version }},pattern={{raw}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{version}} + type=semver,value=${{ needs.context.outputs.version }},pattern=v{{major}} + type=semver,value=${{ needs.context.outputs.version }},pattern={{major}}.{{minor}} + + - id: login + name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - id: setup + name: Setup Toolchain + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v5 + with: + file: ./Containerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml new file mode 100644 index 00000000..e84abf1a --- /dev/null +++ b/.github/workflows/coverage.yaml @@ -0,0 +1,82 @@ +name: Coverage + +on: + push: + branches: + - develop + pull_request_target: + branches: + - develop + +env: + CARGO_TERM_COLOR: always + +jobs: + report: + name: Report + environment: coverage + runs-on: ubuntu-latest + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" + RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" + + steps: + - id: checkout_push + if: github.event_name == 'push' + name: Checkout Repository (Push) + uses: actions/checkout@v4 + + - id: checkout_pull_request_target + if: github.event_name == 'pull_request_target' + name: Checkout Repository (Pull Request Target) + uses: actions/checkout@v4 + with: + ref: "refs/pull/${{ github.event.pull_request.number }}/head" + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: llvm-tools-preview + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + # Temporary Cleaning to avoid Rust Compiler Bug + - id: clean + name: Make Build Clean + run: cargo clean + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: grcov + + - id: imdl + name: Install Intermodal + run: cargo install imdl + + - id: check + name: Run Build Checks + run: cargo check --tests --benches --examples --workspace --all-targets --all-features + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + - id: coverage + name: Generate Coverage Report + uses: alekitto/grcov@v0.2 + + - id: upload + name: Upload Coverage Report + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ${{ steps.coverage.outputs.report }} + verbose: true + fail_ci_if_error: true diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml new file mode 100644 index 00000000..30b6d5de --- /dev/null +++ b/.github/workflows/deployment.yaml @@ -0,0 +1,55 @@ +name: Deployment + +on: + push: + branches: + - "releases/**/*" + +jobs: + test: + name: Test + runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + publish: + name: Publish + environment: deployment + needs: test + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + - id: publish + name: Publish Crates + env: + CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" + run: | + cargo publish -p torrust-index-located-error + cargo publish -p torrust-index diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml new file mode 100644 index 00000000..97aaa030 --- /dev/null +++ b/.github/workflows/labels.yaml @@ -0,0 +1,36 @@ +name: Labels +on: + workflow_dispatch: + push: + branches: + - develop + paths: + - "/.github/labels.json" + +jobs: + export: + name: Export Existing Labels + runs-on: ubuntu-latest + + steps: + - id: backup + name: Export to Workflow Artifact + uses: EndBug/export-label-config@v1 + + sync: + name: Synchronize Labels from Repo + needs: export + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: sync + name: Apply Labels from File + uses: EndBug/label-sync@da00f2c11fdb78e4fae44adac2fdd713778ea3e8 + with: + config-file: .github/labels.json + delete-other-labels: true + token: ${{ secrets.UPDATE_LABELS }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 3d7b0b30..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,90 +0,0 @@ -on: - push: - branches: - - main - -jobs: - test: - runs-on: ubuntu-latest - env: - CARGO_TERM_COLOR: always - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - name: Run databases - working-directory: ./tests - run: docker-compose up -d - - name: Wait for databases to start - run: sleep 15s - shell: bash - - uses: Swatinem/rust-cache@v1 - - name: Run tests - run: cargo test - - tag: - needs: test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Determine tag - id: tag - run: echo "::set-output name=release_tag::v$(grep -m 1 'version' Cargo.toml | awk '{print $3}' | tr -d '/"')" - outputs: - release_tag: ${{ steps.tag.outputs.release_tag }} - - build: - needs: tag - name: Build ${{ matrix.target }} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - include: - - target: x86_64-pc-windows-gnu - archive: zip - name: ${{ github.event.repository.name }}_${{ needs.tag.outputs.release_tag }}_x86_64-pc-windows-gnu - - target: x86_64-unknown-linux-musl - archive: tar.gz tar.xz - name: ${{ github.event.repository.name }}_${{ needs.tag.outputs.release_tag }}_x86_64-unknown-linux-musl - - target: x86_64-apple-darwin - archive: zip - name: ${{ github.event.repository.name }}_${{ needs.tag.outputs.release_tag }}_x86_64-apple-darwin - steps: - - uses: actions/checkout@master - - name: Compile builds - id: compile - uses: rust-build/rust-build.action@v1.3.2 - with: - RUSTTARGET: ${{ matrix.target }} - ARCHIVE_TYPES: ${{ matrix.archive }} - ARCHIVE_NAME: ${{ matrix.name }} - UPLOAD_MODE: none - - name: Upload artifact - uses: actions/upload-artifact@v3 - with: - name: torrust-index-backend - path: | - ${{ steps.compile.outputs.BUILT_ARCHIVE }} - ${{ steps.compile.outputs.BUILT_CHECKSUM }} - - release: - needs: [tag, build] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Download builds - uses: actions/download-artifact@v2 - with: - name: torrust-index-backend - path: torrust-index-backend - - name: Release - uses: softprops/action-gh-release@v1 - with: - generate_release_notes: true - tag_name: ${{ needs.tag.outputs.release_tag }} - files: | - torrust-index-backend/* - LICENSE diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml new file mode 100644 index 00000000..5ae97af4 --- /dev/null +++ b/.github/workflows/testing.yaml @@ -0,0 +1,134 @@ +name: Testing + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + format: + name: Formatting + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: nightly + components: rustfmt + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + # Temporary Cleaning to avoid Rust Compiler Bug + - id: clean + name: Make Build Clean + run: cargo clean + + - id: format + name: Run Formatting-Checks + run: cargo fmt --check + + check: + name: Static Analysis + runs-on: ubuntu-latest + needs: format + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: clippy + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + # Temporary Cleaning to avoid Rust Compiler Bug + - id: clean + name: Make Build Clean + run: cargo clean + + - id: check + name: Run Build Checks + run: cargo check --tests --benches --examples --workspace --all-targets --all-features + + - id: lint + name: Run Lint Checks + run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic + + - id: docs + name: Lint Documentation + env: + RUSTDOCFLAGS: "-D warnings" + run: cargo doc --no-deps --bins --examples --workspace --all-features + + unit: + name: Units + runs-on: ubuntu-latest + needs: check + + strategy: + matrix: + toolchain: [stable, nightly] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + # Temporary Cleaning to avoid Rust Compiler Bug + - id: clean + name: Make Build Clean + run: cargo clean + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-llvm-cov, cargo-nextest + + - id: imdl + name: Install Intermodal + run: cargo install imdl + + - id: test-docs + name: Run Documentation Tests + run: cargo test --doc + + - id: test + name: Run Unit Tests + run: cargo test --tests --benches --examples --workspace --all-targets --all-features + + - id: coverage + name: Generate Coverage Report + run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features diff --git a/.gitignore b/.gitignore index 1952496d..c7d2c3f0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,9 @@ +/.coverage/ /.env -/data.db* /config.toml +/data_v2.db* +/data.db* +/storage/ +/target /uploads/ +/.idea/ diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..bc463a8a --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "streetsidesoftware.code-spell-checker", + "rust-lang.rust-analyzer" + ] +} diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..e3ede37f --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,194 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug unit tests in library 'torrust-index'", + "cargo": { + "args": [ + "test", + "--no-run", + "--lib", + "--package=torrust-index" + ], + "filter": { + "name": "torrust-index", + "kind": "lib" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug executable 'main'", + "cargo": { + "args": [ + "build", + "--bin=main", + "--package=torrust-index" + ], + "filter": { + "name": "main", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug unit tests in executable 'main'", + "cargo": { + "args": [ + "test", + "--no-run", + "--bin=main", + "--package=torrust-index" + ], + "filter": { + "name": "main", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug executable 'import_tracker_statistics'", + "cargo": { + "args": [ + "build", + "--bin=import_tracker_statistics", + "--package=torrust-index" + ], + "filter": { + "name": "import_tracker_statistics", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug unit tests in executable 'import_tracker_statistics'", + "cargo": { + "args": [ + "test", + "--no-run", + "--bin=import_tracker_statistics", + "--package=torrust-index" + ], + "filter": { + "name": "import_tracker_statistics", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug executable 'parse_torrent'", + "cargo": { + "args": [ + "build", + "--bin=parse_torrent", + "--package=torrust-index" + ], + "filter": { + "name": "parse_torrent", + "kind": "bin" + } + }, + "args": ["./tests/fixtures/torrents/not-working-with-two-nodes.torrent"], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug unit tests in executable 'parse_torrent'", + "cargo": { + "args": [ + "test", + "--no-run", + "--bin=parse_torrent", + "--package=torrust-index" + ], + "filter": { + "name": "parse_torrent", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug executable 'upgrade'", + "cargo": { + "args": [ + "build", + "--bin=upgrade", + "--package=torrust-index" + ], + "filter": { + "name": "upgrade", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug unit tests in executable 'upgrade'", + "cargo": { + "args": [ + "test", + "--no-run", + "--bin=upgrade", + "--package=torrust-index" + ], + "filter": { + "name": "upgrade", + "kind": "bin" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + }, + { + "type": "lldb", + "request": "launch", + "name": "Debug integration test 'mod'", + "cargo": { + "args": [ + "test", + "--no-run", + "--test=mod", + "--package=torrust-index" + ], + "filter": { + "name": "mod", + "kind": "test" + } + }, + "args": [], + "cwd": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..661243fb --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,28 @@ +{ + "[rust]": { + "editor.formatOnSave": true + }, + "rust-analyzer.checkOnSave": true, + "rust-analyzer.check.command": "clippy", + "rust-analyzer.check.allTargets": true, + "rust-analyzer.check.extraArgs": [ + "--", + "-D", + "clippy::correctness", + "-D", + "clippy::suspicious", + "-W", + "clippy::complexity", + "-W", + "clippy::perf", + "-W", + "clippy::style", + "-W", + "clippy::pedantic", + ], + "evenBetterToml.formatter.allowedBlankLines": 1, + "evenBetterToml.formatter.columnWidth": 130, + "evenBetterToml.formatter.trailingNewline": true, + "evenBetterToml.formatter.reorderKeys": true, + "evenBetterToml.formatter.reorderArrays": true, +} \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index e7a6994e..144798bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,326 +3,248 @@ version = 3 [[package]] -name = "actix-codec" -version = "0.4.0" +name = "addr2line" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d5dbeb2d9e51344cb83ca7cc170f1217f9fe25bfc50160e6e200b5c31c1019a" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "bitflags", - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite", - "tokio", - "tokio-util", + "gimli", ] [[package]] -name = "actix-cors" -version = "0.6.0-beta.2" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01552b8facccd5d7a4cc5d8e2b07d306160c97a4968181c2db965533389c8725" -dependencies = [ - "actix-service", - "actix-web", - "derive_more", - "futures-util", - "log", - "once_cell", - "smallvec", -] +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] -name = "actix-http" -version = "3.0.0-beta.9" +name = "ahash" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01260589f1aafad11224002741eb37bc603b4ce55b4e3556d2b2122f9aac7c51" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-tls", - "actix-utils", - "ahash", - "base64", - "bitflags", - "brotli2", - "bytes", - "bytestring", - "derive_more", - "encoding_rs", - "flate2", - "futures-core", - "futures-util", - "h2", - "http", - "httparse", - "itoa 0.4.7", - "language-tags", - "local-channel", - "log", - "mime", + "getrandom", "once_cell", - "percent-encoding", - "pin-project", - "pin-project-lite", - "rand", - "regex", - "serde 1.0.144", - "sha-1 0.9.8", - "smallvec", - "time 0.2.27", - "tokio", - "zstd", + "version_check", ] [[package]] -name = "actix-macros" -version = "0.2.1" +name = "ahash" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f86cd6857c135e6e9fe57b1619a88d1f94a7df34c00e11fe13e64fd3438837" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ - "quote", - "syn", + "cfg-if", + "getrandom", + "once_cell", + "version_check", ] [[package]] -name = "actix-multipart" -version = "0.4.0-beta.5" +name = "aho-corasick" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a32d8964e147f1e411b38cd08a28eb37915be6797191a394fe0ad73f36441a99" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ - "actix-utils", - "actix-web", - "bytes", - "derive_more", - "futures-core", - "futures-util", - "httparse", - "local-waker", - "log", - "mime", - "twoway", + "memchr", ] [[package]] -name = "actix-router" -version = "0.2.7" +name = "alloc-no-stdlib" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad299af73649e1fc893e333ccf86f377751eb95ff875d095131574c6f43452c" -dependencies = [ - "bytestring", - "http", - "log", - "regex", - "serde 1.0.144", -] +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] -name = "actix-rt" -version = "2.2.0" +name = "alloc-stdlib" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc7d7cd957c9ed92288a7c3c96af81fa5291f65247a76a34dac7b6af74e52ba0" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ - "actix-macros", - "futures-core", - "tokio", + "alloc-no-stdlib", ] [[package]] -name = "actix-server" -version = "2.0.0-beta.5" +name = "allocator-api2" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26369215fcc3b0176018b3b68756a8bcc275bb000e6212e454944913a1f9bf87" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "log", - "mio", - "num_cpus", - "slab", - "tokio", -] +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] -name = "actix-service" -version = "2.0.0" +name = "android-tzdata" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f5f9d66a8730d0fae62c26f3424f5751e5518086628a40b7ab6fca4a705034" -dependencies = [ - "futures-core", - "paste", - "pin-project-lite", -] +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] -name = "actix-tls" -version = "3.0.0-beta.5" +name = "android_system_properties" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b7bb60840962ef0332f7ea01a57d73a24d2cb663708511ff800250bbfef569" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "derive_more", - "futures-core", - "http", - "log", - "tokio-util", + "libc", ] [[package]] -name = "actix-utils" -version = "3.0.0" +name = "argon2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e491cbaac2e7fc788dfff99ff48ef317e23b3cf63dbaf7aaab6418f40f92aa94" +checksum = "17ba4cac0a46bc1d2912652a751c47f2a9f3a7fe89bcae2275d418f5270402f9" dependencies = [ - "local-waker", - "pin-project-lite", + "base64ct", + "blake2", + "cpufeatures", + "password-hash", ] [[package]] -name = "actix-web" -version = "4.0.0-beta.8" +name = "arrayref" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c503f726f895e55dac39adeafd14b5ee00cc956796314e9227fc7ae2e176f443" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "ahash", - "bytes", - "cfg-if", - "cookie", - "derive_more", - "either", - "encoding_rs", - "futures-core", - "futures-util", - "itoa 0.4.7", - "language-tags", - "log", - "mime", - "once_cell", - "paste", - "pin-project", - "regex", - "serde 1.0.144", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2", - "time 0.2.27", - "url", -] +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] -name = "actix-web-codegen" -version = "0.5.0-beta.3" +name = "arrayvec" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d048c6986743105c1e8e9729fbc8d5d1667f2f62393a58be8d85a7d9a5a6c8d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] -name = "adler" -version = "1.0.2" +name = "arrayvec" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] -name = "ahash" -version = "0.7.6" +name = "async-compression" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" dependencies = [ - "getrandom", - "once_cell", - "version_check", + "brotli", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", ] [[package]] -name = "aho-corasick" -version = "0.7.18" +name = "async-trait" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ - "memchr", + "proc-macro2", + "quote", + "syn 2.0.38", ] [[package]] -name = "argon2" -version = "0.4.1" +name = "atoi" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4ce4441f99dbd377ca8a8f57b698c44d0d6e712d8329b5040da5a64aa1ce73" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" dependencies = [ - "base64ct", - "blake2", - "password-hash", + "num-traits", ] [[package]] -name = "arrayvec" -version = "0.5.2" +name = "autocfg" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "async-trait" -version = "0.1.52" +name = "axum" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ - "proc-macro2", - "quote", - "syn", + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "multer", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", ] [[package]] -name = "atoi" -version = "1.0.0" +name = "axum-core" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c57d12312ff59c811c0643f4d80830505833c9ffaebd193d819392b265be8e" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ - "num-traits 0.2.14", + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", ] [[package]] -name = "autocfg" -version = "1.0.1" +name = "backtrace" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] [[package]] -name = "base-x" -version = "0.2.8" +name = "base64" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.13.0" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "base64ct" -version = "1.0.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a32fd6af2b5827bce66c29053ba0e7c42b9dcab01835835058558c10851a46b" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "binascii" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bitflags" @@ -331,98 +253,95 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -name = "bitvec" -version = "0.19.5" +name = "bitflags" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321" +checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" dependencies = [ - "funty", - "radium", - "tap", - "wyz", + "serde", ] [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.3", + "digest", ] [[package]] name = "block-buffer" -version = "0.9.0" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] -name = "block-buffer" -version = "0.10.2" +name = "brotli" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" dependencies = [ - "generic-array", + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", ] [[package]] -name = "brotli-sys" -version = "0.3.2" +name = "brotli-decompressor" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4445dea95f4c2b41cde57cc9fee236ae4dbae88d8fcbdb4750fc1bb5d86aaecd" +checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" dependencies = [ - "cc", - "libc", + "alloc-no-stdlib", + "alloc-stdlib", ] [[package]] -name = "brotli2" -version = "0.3.2" +name = "bstr" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb036c3eade309815c15ddbacec5b22c4d1f3983a774ab2eac2e3e9ea85568e" +checksum = "c79ad7fb2dd38f3dabd76b09c6a5a20c038fc0213ef1e9afd30eb777f120f019" dependencies = [ - "brotli-sys", - "libc", + "memchr", + "serde", ] [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] -name = "byteorder" -version = "1.4.3" +name = "bytemuck" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "374d28ec25809ee0e23827c2ab573d729e293f281dfe393500e7ad618baa61c6" [[package]] -name = "bytes" -version = "1.2.1" +name = "byteorder" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] -name = "bytestring" -version = "1.0.0" +name = "bytes" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90706ba19e97b90786e19dc0d5e2abd80008d99d4c0c5d1ad0b5e72cec7c494d" -dependencies = [ - "bytes", -] +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cc" -version = "1.0.68" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", + "libc", ] [[package]] @@ -433,44 +352,51 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ - "libc", - "num-integer", - "num-traits 0.2.14", - "time 0.1.43", - "winapi", + "android-tzdata", + "iana-time-zone", + "num-traits", + "windows-targets", +] + +[[package]] +name = "colored" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +dependencies = [ + "is-terminal", + "lazy_static", + "windows-sys", ] [[package]] name = "config" -version = "0.11.0" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" +checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" dependencies = [ + "async-trait", + "json5", "lazy_static", - "nom 5.1.2", + "nom", + "pathdiff", + "ron", "rust-ini", - "serde 1.0.144", - "serde-hjson", + "serde", "serde_json", - "toml", + "toml 0.5.11", "yaml-rust", ] [[package]] name = "const-oid" -version = "0.7.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - -[[package]] -name = "const_fn" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" +checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" [[package]] name = "convert_case" @@ -478,22 +404,11 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "cookie" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f1c7727e460397e56abc4bddc1d49e07a1ad78fc98eb2e1c8f032a58a2f80d" -dependencies = [ - "percent-encoding", - "time 0.2.27", - "version_check", -] - [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -501,48 +416,48 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "crc" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53757d12b596c16c78b83458d732a5d1a17ab3f53f2f7412f6fb57cc8a140ab3" +checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" dependencies = [ "crc-catalog", ] [[package]] name = "crc-catalog" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0165d2900ae6778e36e80bbc4da3b5eefccee9ba939761f9c2882a5d9af3ff" +checksum = "9cace84e55f07e7301bae1c519df89cdad8cc3cd868413d3fdbdeca9ff3db484" [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-queue" -version = "0.3.2" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if", "crossbeam-utils", @@ -550,125 +465,162 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.11" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", - "once_cell", ] [[package]] -name = "crypto-bigint" -version = "0.3.2" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", - "subtle", + "typenum", ] [[package]] -name = "crypto-common" -version = "0.1.6" +name = "data-url" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "3a30bfce702bcfa94e906ef82421f2c0e61c076ad76030c16ee5d2e9a32fe193" dependencies = [ - "generic-array", - "typenum", + "matches", ] [[package]] name = "der" -version = "0.5.1" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", - "crypto-bigint", "pem-rfc7468", + "zeroize", ] [[package]] -name = "derive_more" -version = "0.99.14" +name = "deranged" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc7b9cef1e351660e5443924e4f43ab25fbbed3e9a5f052df3677deb4d6b320" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive-new" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535" dependencies = [ - "convert_case", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] -name = "digest" -version = "0.9.0" +name = "derive_more" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "generic-array", + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", ] [[package]] name = "digest" -version = "0.10.3" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "block-buffer 0.10.2", + "block-buffer", + "const-oid", "crypto-common", "subtle", ] [[package]] -name = "dirs" -version = "4.0.0" +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "either" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" dependencies = [ - "dirs-sys", + "serde", ] [[package]] -name = "dirs-sys" -version = "0.3.7" +name = "email-encoding" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" +checksum = "dbfb21b9878cf7a348dcb8559109aabc0ec40d69924bd706fa5149846c4fef75" dependencies = [ - "libc", - "redox_users", - "winapi", + "base64 0.21.4", + "memchr", ] [[package]] -name = "discard" -version = "1.0.4" +name = "email_address" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" +checksum = "e2153bd83ebc09db15bcbdc3e2194d901804952e3dc96967e1cd3b0c5c32d112" +dependencies = [ + "serde", +] [[package]] -name = "dotenvy" -version = "0.15.3" +name = "encoding_rs" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da3db6fcad7c1fc4abdd99bf5276a4db30d6a819127903a709ed41e5ff016e84" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ - "dirs", + "cfg-if", ] [[package]] -name = "either" -version = "1.6.1" +name = "equivalent" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] -name = "encoding_rs" -version = "0.8.28" +name = "errno" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ "cfg-if", + "home", + "windows-sys", ] [[package]] @@ -679,47 +631,68 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fastrand" -version = "1.5.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b394ed3d285a429378d3b384b9eb1285267e7df4b166df24b7a6939a04dc392e" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] -name = "filetime" -version = "0.2.15" +name = "fastrand" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "975ccf83d8d9d0d84682850a38c8169027be83368805971cc4f238c2b245bc98" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "fdeflate" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "winapi", + "simd-adler32", +] + +[[package]] +name = "fern" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" +dependencies = [ + "log", ] +[[package]] +name = "finl_unicode" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" + [[package]] name = "flate2" -version = "1.0.20" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ - "cfg-if", "crc32fast", - "libc", "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" + [[package]] name = "flume" -version = "0.10.14" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" dependencies = [ "futures-core", "futures-sink", - "pin-project", - "spin 0.9.4", + "spin 0.9.8", ] [[package]] @@ -728,6 +701,17 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "fontdb" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01b07f5c05414a0d8caba4c17eef8dc8b5c8955fc7c68d324191c7a56d3f3449" +dependencies = [ + "log", + "memmap2", + "ttf-parser", +] + [[package]] name = "foreign-types" version = "0.3.2" @@ -745,25 +729,18 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ - "matches", "percent-encoding", ] -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - [[package]] name = "futures" -version = "0.3.15" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -776,9 +753,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -786,15 +763,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -803,9 +780,9 @@ dependencies = [ [[package]] name = "futures-intrusive" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62007592ac46aa7c2b6416f7deb9a8a8f63a01e0f1d6e1787d5630170db2b63e" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", @@ -814,38 +791,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.38", ] [[package]] name = "futures-sink" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.24" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -861,9 +838,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -871,20 +848,50 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", "wasi", ] +[[package]] +name = "gimli" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" + +[[package]] +name = "globset" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +dependencies = [ + "aho-corasick", + "bstr", + "fnv", + "log", + "regex", +] + +[[package]] +name = "globwalk" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93e3af942408868f6934a7b85134a3230832b9977cf66125df2f9edcfce4ddcc" +dependencies = [ + "bitflags 1.3.2", + "ignore", + "walkdir", +] + [[package]] name = "h2" -version = "0.3.4" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" dependencies = [ "bytes", "fnv", @@ -892,7 +899,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -905,35 +912,42 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +dependencies = [ + "ahash 0.8.3", + "allocator-api2", ] [[package]] name = "hashlink" -version = "0.8.0" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452c155cb93fecdfb02a73dd57b5d8e442c2063bd7aac72f1bc5e4263a43086" +checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown", + "hashbrown 0.14.1", ] [[package]] name = "heck" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.19" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -941,13 +955,31 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hkdf" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + [[package]] name = "home" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ - "winapi", + "windows-sys", ] [[package]] @@ -963,43 +995,49 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", - "itoa 0.4.7", + "itoa", ] [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.12" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", @@ -1010,9 +1048,9 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.7", + "itoa", "pin-project-lite", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -1032,106 +1070,184 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "idna" -version = "0.2.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] +[[package]] +name = "ignore" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbe7873dab538a9a44ad79ede1faf5f30d49f9a5c883ddbab48bce81b64b7492" +dependencies = [ + "globset", + "lazy_static", + "log", + "memchr", + "regex", + "same-file", + "thread_local", + "walkdir", + "winapi-util", +] + [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +dependencies = [ + "equivalent", + "hashbrown 0.14.1", ] [[package]] name = "instant" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "ipnet" -version = "2.3.1" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" + +[[package]] +name = "is-terminal" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi", + "rustix", + "windows-sys", +] [[package]] name = "itertools" -version = "0.10.3" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] -name = "itoa" -version = "1.0.3" +name = "jobserver" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +dependencies = [ + "libc", +] [[package]] -name = "itoap" -version = "1.0.1" +name = "jpeg-decoder" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9028f49264629065d057f340a86acb84867925865f73bbf8d47b4d149a7e88b8" +checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] -name = "jobserver" -version = "0.1.24" +name = "js-sys" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ - "libc", + "wasm-bindgen", ] [[package]] -name = "js-sys" -version = "0.3.53" +name = "json5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" dependencies = [ - "wasm-bindgen", + "pest", + "pest_derive", + "serde", ] [[package]] name = "jsonwebtoken" -version = "8.1.1" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64", + "base64 0.21.4", "pem", "ring", - "serde 1.0.144", + "serde", "serde_json", "simple_asn1", ] [[package]] -name = "language-tags" -version = "0.3.2" +name = "kurbo" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" +checksum = "7a53776d271cfb873b17c618af0298445c88afc52837f3e948fa3fafd131f449" +dependencies = [ + "arrayvec 0.7.4", +] [[package]] name = "lazy_static" @@ -1144,62 +1260,51 @@ dependencies = [ [[package]] name = "lettre" -version = "0.10.0-rc.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8697ded52353bdd6fec234b3135972433397e86d0493d9fc38fbf407b7c106a" +checksum = "76bd09637ae3ec7bd605b8e135e757980b3968430ff2b1a4a94fb7769e50166d" dependencies = [ "async-trait", - "base64", - "fastrand", + "base64 0.21.4", + "email-encoding", + "email_address", + "fastrand 1.9.0", "futures-io", "futures-util", "hostname", "httpdate", - "idna", + "idna 0.3.0", "mime", "native-tls", - "nom 6.1.2", + "nom", "once_cell", "quoted_printable", - "r2d2", - "regex", "rustls", + "rustls-pemfile", + "socket2 0.4.9", "tokio", + "tokio-native-tls", "tokio-rustls", - "webpki", "webpki-roots", ] -[[package]] -name = "lexical-core" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" -dependencies = [ - "arrayvec", - "bitflags", - "cfg-if", - "ryu", - "static_assertions", -] - [[package]] name = "libc" -version = "0.2.132" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libm" -version = "0.2.2" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a33a362ce288760ec6a508b94caaec573ae7d3bbbd91b87aa0bad4456839db" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libsqlite3-sys" -version = "0.24.2" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "898745e570c7d0453cc1fbc4a701eb6c662ed54e8fec8b7d14be137ebeeb9d14" +checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" dependencies = [ "cc", "pkg-config", @@ -1208,45 +1313,31 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" - -[[package]] -name = "local-channel" -version = "0.1.2" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6246c68cf195087205a0512559c97e15eaf95198bf0e206d662092cdcb03fe9f" -dependencies = [ - "futures-core", - "futures-sink", - "futures-util", - "local-waker", -] +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] -name = "local-waker" -version = "0.1.1" +name = "linux-raw-sys" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f9a2d3e27ce99ce2c3aad0b09b1a7b916293ea9b2bf624c13fe646fadd8da4" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" -version = "0.4.4" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "match_cfg" @@ -1256,65 +1347,107 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matches" -version = "0.1.8" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" + +[[package]] +name = "matchit" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "memmap2" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" +dependencies = [ + "libc", +] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mime_guess" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] [[package]] name = "minimal-lexical" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6595bb28ed34f43c3fe088e48f6cfb2e033cab45f25a5384d5fdf564fbc8c4b2" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", - "autocfg", + "simd-adler32", ] [[package]] name = "mio" -version = "0.7.13" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", - "miow", - "ntapi", - "winapi", + "wasi", + "windows-sys", ] [[package]] -name = "miow" -version = "0.3.7" +name = "multer" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" dependencies = [ - "winapi", + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "log", + "memchr", + "mime", + "spin 0.9.8", + "version_check", ] [[package]] name = "native-tls" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -1324,76 +1457,43 @@ dependencies = [ "openssl-sys", "schannel", "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "nom" -version = "5.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" -dependencies = [ - "lexical-core", - "memchr", - "version_check", -] - -[[package]] -name = "nom" -version = "6.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" -dependencies = [ - "bitvec", - "funty", - "memchr", - "version_check", -] - -[[package]] -name = "nom" -version = "7.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffd9d26838a953b4af82cbeb9f1592c6798916983959be223a7124e992742c1" -dependencies = [ - "memchr", - "minimal-lexical", - "version_check", + "security-framework-sys", + "tempfile", ] [[package]] -name = "ntapi" -version = "0.3.6" +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ - "winapi", + "memchr", + "minimal-lexical", ] [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", + "num-traits", ] [[package]] name = "num-bigint-dig" -version = "0.8.1" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566d173b2f9406afbc5510a90925d5a2cd80cae4605631f1212303df265de011" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" dependencies = [ "byteorder", "lazy_static", "libm", "num-integer", "num-iter", - "num-traits 0.2.14", + "num-traits", "rand", "smallvec", "zeroize", @@ -1401,12 +1501,12 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-traits 0.2.14", + "num-traits", ] [[package]] @@ -1417,23 +1517,14 @@ checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.14", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.14", + "num-traits", ] [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", @@ -1441,98 +1532,111 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] -name = "num_threads" -version = "0.1.6" +name = "object" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ - "libc", + "memchr", ] [[package]] name = "once_cell" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f7254b99e31cad77da24b08ebf628882739a608578bb1bcdfc1f9c21260d7c0" - -[[package]] -name = "opaque-debug" -version = "0.3.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.36" +version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags", + "bitflags 2.4.0", "cfg-if", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.66" +version = "0.9.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" +checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" dependencies = [ - "autocfg", "cc", "libc", "pkg-config", "vcpkg", ] +[[package]] +name = "ordered-multimap" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" +dependencies = [ + "dlv-list", + "hashbrown 0.12.3", +] + [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", - "instant", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-targets", ] [[package]] name = "password-hash" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", "rand_core", @@ -1541,59 +1645,128 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.9" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" + +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + +[[package]] +name = "pbkdf2" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest", + "hmac", + "password-hash", + "sha2", +] [[package]] name = "pem" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" dependencies = [ - "base64", + "base64 0.13.1", ] [[package]] name = "pem-rfc7468" -version = "0.3.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ "base64ct", ] [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" + +[[package]] +name = "pest" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.38", +] + +[[package]] +name = "pest_meta" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +dependencies = [ + "once_cell", + "pest", + "sha2", +] + +[[package]] +name = "pico-args" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468" [[package]] name = "pin-project" -version = "1.0.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.38", ] [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -1603,84 +1776,79 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs1" -version = "0.3.3" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ "der", "pkcs8", - "zeroize", + "spki", ] [[package]] name = "pkcs8" -version = "0.8.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", "spki", - "zeroize", ] [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] -name = "ppv-lite86" -version = "0.2.10" +name = "png" +version = "0.17.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd75bf2d8dd3702b9707cdbc56a5b9ef42cec752eb8b3bafc01234558442aa64" +dependencies = [ + "bitflags 1.3.2", + "crc32fast", + "fdeflate", + "flate2", + "miniz_oxide", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] -name = "proc-macro-hack" -version = "0.5.19" +name = "ppv-lite86" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] name = "quote" -version = "1.0.19" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f53dc8cf16a769a6f677e09e7ff2cd4be1ea0f48754aac39520536962011de0d" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "quoted_printable" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1238256b09923649ec89b08104c4dfe9f6cb2fea734a5db5384e44916d59e9c5" - -[[package]] -name = "r2d2" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" -dependencies = [ - "log", - "parking_lot", - "scheduled-thread-pool", -] - -[[package]] -name = "radium" -version = "0.5.3" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" +checksum = "5a3866219251662ec3b26fc217e3e05bf9c4f84325234dfb96bf0bf840889e49" [[package]] name = "rand" @@ -1705,87 +1873,89 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] -name = "redox_syscall" -version = "0.2.16" +name = "rctree" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags", -] +checksum = "9ae028b272a6e99d9f8260ceefa3caa09300a8d6c8d2b2001316474bc52122e9" [[package]] -name = "redox_users" -version = "0.4.3" +name = "redox_syscall" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "getrandom", - "redox_syscall", - "thiserror", + "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.6.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick", "memchr", + "regex-automata", "regex-syntax", ] [[package]] -name = "regex-syntax" -version = "0.6.27" +name = "regex-automata" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "regex-syntax" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" [[package]] name = "reqwest" -version = "0.11.4" +version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64", + "base64 0.21.4", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", "hyper-tls", "ipnet", "js-sys", - "lazy_static", "log", "mime", + "mime_guess", "native-tls", + "once_cell", "percent-encoding", "pin-project-lite", - "serde 1.0.144", + "serde", "serde_json", "serde_urlencoded", + "system-configuration", "tokio", "tokio-native-tls", + "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", @@ -1793,6 +1963,30 @@ dependencies = [ "winreg", ] +[[package]] +name = "resvg" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "256cc9203115db152290219f35f3362e729301b59e2a391fb2721fe3fa155352" +dependencies = [ + "jpeg-decoder", + "log", + "pico-args", + "png", + "rgb", + "tiny-skia", + "usvg", +] + +[[package]] +name = "rgb" +version = "0.8.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59" +dependencies = [ + "bytemuck", +] + [[package]] name = "ring" version = "0.16.20" @@ -1808,128 +2002,193 @@ dependencies = [ "winapi", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64 0.13.1", + "bitflags 1.3.2", + "serde", +] + +[[package]] +name = "roxmltree" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" +dependencies = [ + "xmlparser", +] + [[package]] name = "rsa" -version = "0.6.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" +checksum = "6ab43bb47d23c1a631b4b680199a45255dce26fa9ab2fa902581f624ff13e6a8" dependencies = [ "byteorder", - "digest 0.10.3", + "const-oid", + "digest", "num-bigint-dig", "num-integer", "num-iter", - "num-traits 0.2.14", + "num-traits", "pkcs1", "pkcs8", "rand_core", - "smallvec", + "signature", + "spki", "subtle", "zeroize", ] [[package]] name = "rust-ini" -version = "0.13.0" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" +dependencies = [ + "bitflags 2.4.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + [[package]] name = "rustls" -version = "0.19.1" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ - "base64", "log", "ring", + "rustls-webpki 0.101.6", "sct", - "webpki", ] [[package]] -name = "ryu" -version = "1.0.5" +name = "rustls-pemfile" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +dependencies = [ + "base64 0.21.4", +] [[package]] -name = "sailfish" -version = "0.4.0" +name = "rustls-webpki" +version = "0.100.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "948a7edfc2f03d7c58a097dda25ed29440a72e8528894a6e182fe9171195fed1" +checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" dependencies = [ - "itoap", - "ryu", - "sailfish-macros", - "version_check", + "ring", + "untrusted", ] [[package]] -name = "sailfish-compiler" -version = "0.4.0" +name = "rustls-webpki" +version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f0a01133d6ce146020e6416ac6a823f813f1cbb30ff77548b4fa20749524947" +checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ - "filetime", - "home", - "memchr", - "proc-macro2", - "quote", - "serde 1.0.144", - "syn", - "toml", + "ring", + "untrusted", ] [[package]] -name = "sailfish-macros" +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "rustybuzz" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86326c1f1dce0b316e0a47071f683b185417dc64e1a704380b5c706b09e871b1" +checksum = "44561062e583c4873162861261f16fd1d85fe927c4904d71329a4fe43dc355ef" dependencies = [ - "proc-macro2", - "sailfish-compiler", + "bitflags 1.3.2", + "bytemuck", + "smallvec", + "ttf-parser", + "unicode-bidi-mirroring", + "unicode-ccc", + "unicode-general-category", + "unicode-script", ] [[package]] -name = "schannel" -version = "0.1.19" +name = "ryu" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" + +[[package]] +name = "safe_arch" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "c1ff3d6d9696af502cc3110dacce942840fb06ff4514cad92236ecc455f2ce05" dependencies = [ - "lazy_static", - "winapi", + "bytemuck", ] [[package]] -name = "scheduled-thread-pool" -version = "0.2.5" +name = "same-file" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "parking_lot", + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys", ] [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -1937,11 +2196,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.3.1" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -1950,9 +2209,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -1960,149 +2219,149 @@ dependencies = [ [[package]] name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "serde" -version = "0.8.23" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.144" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-hjson" -version = "0.9.1" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" -dependencies = [ - "lazy_static", - "num-traits 0.1.43", - "regex", - "serde 0.8.23", +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_bencode" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" +checksum = "a70dfc7b7438b99896e7f8992363ab8e2c4ba26aa5ec675d32d1c3c2c33d413e" dependencies = [ - "serde 1.0.144", + "serde", "serde_bytes", ] [[package]] name = "serde_bytes" -version = "0.11.5" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" +checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" dependencies = [ - "serde 1.0.144", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.144" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.38", ] [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "itoa 0.4.7", + "itoa", "ryu", - "serde 1.0.144", + "serde", +] + +[[package]] +name = "serde_path_to_error" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +dependencies = [ + "serde", ] [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 0.4.7", + "itoa", "ryu", - "serde 1.0.144", + "serde", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ - "block-buffer 0.9.0", "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest", ] [[package]] -name = "sha-1" -version = "0.10.0" +name = "sha1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "sha2" -version = "0.10.5" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9db03534dff993187064c4e0c05a5708d2a9728ace9a8959b77bedf415dac5" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest", ] [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +dependencies = [ + "digest", + "rand_core", +] + +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "simple_asn1" version = "0.6.2" @@ -2110,33 +2369,61 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", - "num-traits 0.2.14", + "num-traits", "thiserror", - "time 0.3.14", + "time", +] + +[[package]] +name = "simplecss" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a11be7c62927d9427e9f40f3444d5499d868648e2edbc4e2116de69e7ec0e89d" +dependencies = [ + "log", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" -version = "0.4.3" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" -version = "1.9.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", ] +[[package]] +name = "socket2" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +dependencies = [ + "libc", + "windows-sys", +] + [[package]] name = "spin" version = "0.5.2" @@ -2145,18 +2432,18 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" -version = "0.9.4" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" dependencies = [ "lock_api", ] [[package]] name = "spki" -version = "0.5.4" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" dependencies = [ "base64ct", "der", @@ -2164,354 +2451,468 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.1.8" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b7922be017ee70900be125523f38bdd644f4f06a1b16e8fa5a8ee8c34bffd4" +checksum = "6b7b278788e7be4d0d29c0f39497a0eef3fba6bbc8e70d8bf7fde46edeaa9e85" dependencies = [ "itertools", - "nom 7.0.0", + "nom", "unicode_categories", ] [[package]] name = "sqlx" -version = "0.6.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788841def501aabde58d3666fcea11351ec3962e6ea75dbcd05c84a71d68bcd1" +checksum = "0e50c216e3624ec8e7ecd14c6a6a6370aad6ee5d8cfc3ab30b5162eeeef2ed33" dependencies = [ "sqlx-core", "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", ] [[package]] name = "sqlx-core" -version = "0.6.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c21d3b5e7cadfe9ba7cdc1295f72cc556c750b4419c27c219c0693198901f8e" +checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d" dependencies = [ - "ahash", + "ahash 0.8.3", "atoi", - "bitflags", "byteorder", "bytes", "crc", "crossbeam-queue", - "digest 0.10.3", "dotenvy", "either", "event-listener", - "flume", "futures-channel", "futures-core", - "futures-executor", "futures-intrusive", + "futures-io", "futures-util", - "generic-array", "hashlink", "hex", - "indexmap", - "itoa 1.0.3", - "libc", - "libsqlite3-sys", + "indexmap 2.0.2", "log", "memchr", - "num-bigint", + "native-tls", "once_cell", "paste", "percent-encoding", - "rand", - "rsa", - "sha-1 0.10.0", + "serde", + "serde_json", "sha2", "smallvec", "sqlformat", - "sqlx-rt", - "stringprep", "thiserror", - "time 0.3.14", + "time", + "tokio", "tokio-stream", + "tracing", "url", ] [[package]] name = "sqlx-macros" -version = "0.6.1" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a793bb3ba331ec8359c1853bd39eed32cdd7baaf22c35ccf5c92a7e8d1189ec" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 1.0.109", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4adfd2df3557bddd3b91377fc7893e8fa899e9b4061737cbade4e1bb85f1b45c" +checksum = "0a4ee1e104e00dedb6aa5ffdd1343107b0a4702e862a84320ee7cc74782d96fc" dependencies = [ "dotenvy", "either", "heck", + "hex", "once_cell", "proc-macro2", "quote", + "serde", + "serde_json", "sha2", "sqlx-core", - "sqlx-rt", - "syn", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 1.0.109", + "tempfile", + "tokio", "url", ] [[package]] -name = "sqlx-rt" -version = "0.6.1" +name = "sqlx-mysql" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be52fc7c96c136cedea840ed54f7d446ff31ad670c9dea95ebcb998530971a3" +checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db" dependencies = [ - "native-tls", + "atoi", + "base64 0.21.4", + "bitflags 2.4.0", + "byteorder", + "bytes", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", "once_cell", - "tokio", - "tokio-native-tls", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "time", + "tracing", + "whoami", ] [[package]] -name = "standback" -version = "0.2.17" +name = "sqlx-postgres" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" +checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624" dependencies = [ - "version_check", + "atoi", + "base64 0.21.4", + "bitflags 2.4.0", + "byteorder", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror", + "time", + "tracing", + "whoami", ] [[package]] -name = "static_assertions" -version = "1.1.0" +name = "sqlx-sqlite" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f" +dependencies = [ + "atoi", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "sqlx-core", + "time", + "tracing", + "url", +] [[package]] -name = "stdweb" -version = "0.4.20" +name = "stringprep" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", + "finl_unicode", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "svgtypes" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22975e8a2bac6a76bb54f898a6b18764633b00e780330f0b689f65afb3975564" +dependencies = [ + "siphasher", ] [[package]] -name = "stdweb-derive" -version = "0.5.3" +name = "syn" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", - "serde 1.0.144", - "serde_derive", - "syn", + "unicode-ident", ] [[package]] -name = "stdweb-internal-macros" -version = "0.2.9" +name = "syn" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ - "base-x", "proc-macro2", "quote", - "serde 1.0.144", - "serde_derive", - "serde_json", - "sha1", - "syn", + "unicode-ident", ] [[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" +name = "sync_wrapper" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] -name = "stringprep" -version = "0.1.2" +name = "system-configuration" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", ] [[package]] -name = "subtle" -version = "2.4.1" +name = "system-configuration-sys" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] [[package]] -name = "syn" -version = "1.0.94" +name = "tempfile" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a07e33e919ebcd69113d5be0e4d70c5707004ff45188910106854f38b960df4a" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", + "cfg-if", + "fastrand 2.0.1", + "redox_syscall", + "rustix", + "windows-sys", ] [[package]] -name = "tap" -version = "1.0.1" +name = "tera" +version = "1.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "970dff17c11e884a4a09bc76e3a17ef71e01bb13447a11e85226e254fe6d10b8" +dependencies = [ + "globwalk", + "lazy_static", + "pest", + "pest_derive", + "regex", + "serde", + "serde_json", + "unic-segment", +] + +[[package]] +name = "text-colorizer" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +checksum = "b30f9b94bd367aacc3f62cd28668b10c7ae1784c7d27e223a1c21646221a9166" +dependencies = [ + "colored", +] [[package]] -name = "tempfile" -version = "3.2.0" +name = "text-to-png" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "b77c9daf0c55b10ef445266dbf0d58705c80496526de2c00643459958d956663" dependencies = [ - "cfg-if", - "libc", - "rand", - "redox_syscall", - "remove_dir_all", - "winapi", + "derive-new", + "fontdb", + "lazy_static", + "png", + "resvg", + "siphasher", + "thiserror", + "tiny-skia", + "usvg", + "xml-rs", ] [[package]] name = "thiserror" -version = "1.0.34" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1b05ca9d106ba7d2e31a9dab4a64e7be2cce415321966ea3132c49a656e252" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.34" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8f2591983642de85c921015f3f070c665a197ed69e417af436115e3a1407487" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.38", ] [[package]] -name = "time" -version = "0.1.43" +name = "thread_local" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "libc", - "winapi", + "cfg-if", + "once_cell", ] [[package]] name = "time" -version = "0.2.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros 0.1.1", - "version_check", - "winapi", + "deranged", + "itoa", + "powerfmt", + "serde", + "time-core", + "time-macros", ] [[package]] -name = "time" -version = "0.3.14" +name = "time-core" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b" -dependencies = [ - "itoa 1.0.3", - "libc", - "num_threads", - "time-macros 0.2.4", -] +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.1.1" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ - "proc-macro-hack", - "time-macros-impl", + "time-core", ] [[package]] -name = "time-macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" - -[[package]] -name = "time-macros-impl" -version = "0.1.2" +name = "tiny-skia" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" +checksum = "d049bfef0eaa2521e75d9ffb5ce86ad54480932ae19b85f78bec6f52c4d30d78" dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", + "arrayref", + "arrayvec 0.5.2", + "bytemuck", + "cfg-if", + "png", + "safe_arch", ] [[package]] name = "tinyvec" -version = "1.2.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.13.1" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52963f91310c08d91cb7bff5786dfc8b79642ab839e188187e92105dbfb9d2c8" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ - "autocfg", + "backtrace", "bytes", "libc", - "memchr", "mio", "num_cpus", - "once_cell", - "parking_lot", "pin-project-lite", "signal-hook-registry", + "socket2 0.5.4", "tokio-macros", - "winapi", + "windows-sys", ] [[package]] name = "tokio-macros" -version = "1.3.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.38", ] [[package]] name = "tokio-native-tls" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", @@ -2519,20 +2920,19 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ "rustls", "tokio", - "webpki", ] [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", @@ -2541,141 +2941,337 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", ] [[package]] name = "toml" -version = "0.5.8" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "serde 1.0.144", + "indexmap 2.0.2", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] -name = "torrust-index-backend" -version = "2.0.0-dev.1" +name = "torrust-index" +version = "3.0.0-alpha.1" dependencies = [ - "actix-cors", - "actix-multipart", - "actix-web", "argon2", "async-trait", + "axum", + "binascii", + "bytes", "chrono", "config", "derive_more", + "email_address", + "fern", "futures", + "hex", + "hyper", + "indexmap 2.0.2", "jsonwebtoken", + "lazy_static", "lettre", + "log", + "pbkdf2", + "rand", "rand_core", "regex", "reqwest", - "sailfish", - "serde 1.0.144", + "serde", "serde_bencode", "serde_bytes", "serde_derive", "serde_json", - "sha-1 0.10.0", + "sha-1", "sqlx", + "tempfile", + "tera", + "text-colorizer", + "text-to-png", + "thiserror", "tokio", - "toml", + "toml 0.8.2", + "torrust-index-located-error", + "tower-http", "urlencoding", + "uuid", + "which", +] + +[[package]] +name = "torrust-index-located-error" +version = "3.0.0-alpha.1" +dependencies = [ + "log", + "thiserror", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +dependencies = [ + "async-compression", + "bitflags 2.4.0", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", ] +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" dependencies = [ - "cfg-if", + "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.38", +] + [[package]] name = "tracing-core" -version = "0.1.18" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] -name = "twoway" -version = "0.2.2" +name = "ttf-parser" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ae2f58a822f08abdaf668897e96a5656fe72f5a9ce66422423e8849384872e6" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + +[[package]] +name = "unic-char-property" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" +checksum = "a8c57a407d9b6fa02b4795eb81c5b6652060a15a7903ea981f3d723e6c0be221" dependencies = [ - "memchr", - "unchecked-index", + "unic-char-range", ] [[package]] -name = "typenum" -version = "1.15.0" +name = "unic-char-range" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "0398022d5f700414f6b899e10b8348231abf9173fa93144cbc1a43b9793c1fbc" [[package]] -name = "unchecked-index" -version = "0.2.2" +name = "unic-common" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeba86d422ce181a719445e51872fa30f1f7413b62becb52e95ec91aa262d85c" +checksum = "80d7ff825a6a654ee85a63e80f92f054f904f21e7d12da4e22f9834a4aaa35bc" [[package]] -name = "unicode-bidi" -version = "0.3.5" +name = "unic-segment" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" +checksum = "e4ed5d26be57f84f176157270c112ef57b86debac9cd21daaabbe56db0f88f23" dependencies = [ - "matches", + "unic-ucd-segment", +] + +[[package]] +name = "unic-ucd-segment" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2079c122a62205b421f499da10f3ee0f7697f012f55b675e002483c73ea34700" +dependencies = [ + "unic-char-property", + "unic-char-range", + "unic-ucd-version", ] +[[package]] +name = "unic-ucd-version" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96bd2f2237fe450fcd0a1d2f5f4e91711124f7857ba2e964247776ebeeb7b0c4" +dependencies = [ + "unic-common", +] + +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-bidi-mirroring" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56d12260fb92d52f9008be7e4bca09f584780eb2266dc8fecc6a192bec561694" + +[[package]] +name = "unicode-ccc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2520efa644f8268dce4dcd3050eaa7fc044fca03961e9998ac7e2e92b77cf1" + +[[package]] +name = "unicode-general-category" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07547e3ee45e28326cc23faac56d44f58f16ab23e413db526debce3b0bfd2742" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-script" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d817255e1bed6dfd4ca47258685d14d2bdcfbc64fdc9e3819bd5848057b8ecc" + [[package]] name = "unicode-segmentation" -version = "1.8.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] -name = "unicode-xid" -version = "0.2.2" +name = "unicode-vo" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "b1d386ff53b415b7fe27b50bb44679e2cc4660272694b7b6f3326d8480823a94" [[package]] name = "unicode_categories" @@ -2691,21 +3287,56 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.4.0", "percent-encoding", ] [[package]] name = "urlencoding" -version = "2.1.0" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "usvg" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f472f6f5d41d3eaef059bc893dcd2382eefcdda3e04ebe0b2860c56b538e491e" +dependencies = [ + "base64 0.13.1", + "data-url", + "flate2", + "float-cmp", + "fontdb", + "kurbo", + "log", + "pico-args", + "rctree", + "roxmltree", + "rustybuzz", + "simplecss", + "siphasher", + "svgtypes", + "ttf-parser", + "unicode-bidi", + "unicode-script", + "unicode-vo", + "xmlwriter", +] + +[[package]] +name = "uuid" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" +checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +dependencies = [ + "getrandom", +] [[package]] name = "vcpkg" @@ -2715,58 +3346,65 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "walkdir" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +dependencies = [ + "same-file", + "winapi-util", +] [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.76" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", - "serde 1.0.144", - "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.76" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.38", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.26" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fded345a6559c2cfee778d562300c581f7d4ff3edb9b0d230d69800d213972" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -2776,9 +3414,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.76" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2786,52 +3424,60 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.76" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.76" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.53" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] -name = "webpki" -version = "0.21.4" +name = "webpki-roots" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" dependencies = [ - "ring", - "untrusted", + "rustls-webpki 0.100.3", ] [[package]] -name = "webpki-roots" -version = "0.21.1" +name = "which" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ - "webpki", + "either", + "home", + "once_cell", + "rustix", ] +[[package]] +name = "whoami" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" + [[package]] name = "winapi" version = "0.3.9" @@ -2848,26 +3494,132 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "winnow" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" -version = "0.7.0" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "winapi", + "cfg-if", + "windows-sys", ] [[package]] -name = "wyz" -version = "0.2.0" +name = "xml-rs" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" + +[[package]] +name = "xmlparser" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" + +[[package]] +name = "xmlwriter" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +checksum = "ec7a2a501ed189703dba8b08142f057e887dfc4b2cc4db2d343ac6376ba3e0b9" [[package]] name = "yaml-rust" @@ -2880,35 +3632,34 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" [[package]] name = "zstd" -version = "0.7.0+zstd.1.4.9" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9428752481d8372e15b1bf779ea518a179ad6c771cca2d2c60e4fbff3cc2cd52" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "3.1.0+zstd.1.4.9" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa1926623ad7fe406e090555387daf73db555b948134b4d73eac5eb08fb666d" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" dependencies = [ - "libc", "zstd-sys", ] [[package]] name = "zstd-sys" -version = "1.5.0+zstd.1.4.9" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e6c094340240369025fc6b731b054ee2a834328fa584310ac96aa4baebdc465" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 4d43f3e7..bdbbfe48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,35 +1,80 @@ [package] -name = "torrust-index-backend" -version = "2.0.0-dev.1" -authors = ["Mick van Dijke ", "Wesley Bijleveld "] +default-run = "torrust-index" +name = "torrust-index" +readme = "README.md" + +authors.workspace = true +description.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[workspace.package] +authors = ["Nautilus Cyberneering , Mick van Dijke "] +categories = ["network-programming", "web-programming"] +description = "A BitTorrent Index" +documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" +homepage = "https://torrust.com/" +keywords = ["bittorrent", "file-sharing", "index", "peer-to-peer", "torrent"] +license = "AGPL-3.0-only" +publish = true +repository = "https://github.com/torrust/torrust-tracker" +rust-version = "1.72" +version = "3.0.0-alpha.1" [profile.dev.package.sqlx-macros] opt-level = 3 [dependencies] -actix-web = "4.0.0-beta.8" -actix-multipart = "0.4.0-beta.5" -actix-cors = "0.6.0-beta.2" -async-trait = "0.1.52" -futures = "0.3.5" -sqlx = { version = "0.6.1", features = [ "runtime-tokio-native-tls", "sqlite", "mysql", "migrate", "time" ] } -config = "0.11" -toml = "0.5" -derive_more = "0.99" -serde = { version = "1.0", features = ["rc"] } +argon2 = "0" +async-trait = "0" +axum = { version = "0", features = ["multipart"] } +binascii = "0" +bytes = "1" +chrono = { version = "0", default-features = false, features = ["clock"] } +config = "0" +derive_more = "0" +email_address = "0" +fern = "0" +futures = "0" +hex = "0" +hyper = "0" +indexmap = "2" +jsonwebtoken = "8" +lazy_static = "1.4.0" +lettre = { version = "0", features = ["builder", "smtp-transport", "tokio1", "tokio1-native-tls", "tokio1-rustls-tls"] } +log = "0" +pbkdf2 = { version = "0", features = ["simple"] } +rand_core = { version = "0", features = ["std"] } +regex = "1" +reqwest = { version = "0", features = ["json", "multipart"] } +serde = { version = "1", features = ["rc"] } +serde_bencode = "0" +serde_bytes = "0" serde_derive = "1" serde_json = "1" -serde_bencode = "0.2.3" -serde_bytes = "0.11" -urlencoding = "2.1.0" -argon2 = "0.4.1" -rand_core = { version = "0.6", features = ["std"] } -chrono = "0.4.19" -jsonwebtoken = "8.1.1" -sha-1 = "0.10.0" -reqwest = { version = "0.11.4", features = [ "json" ] } -tokio = {version = "1.13", features = ["macros", "io-util", "net", "time", "rt-multi-thread", "fs", "sync", "signal"]} -lettre = { version = "0.10.0-rc.3", features = ["builder", "tokio1", "tokio1-rustls-tls", "smtp-transport"]} -sailfish = "0.4.0" -regex = "1.6.0" +sha-1 = "0" +sqlx = { version = "0", features = ["migrate", "mysql", "runtime-tokio-native-tls", "sqlite", "time"] } +tera = { version = "1", default-features = false } +text-colorizer = "1" +text-to-png = "0" +thiserror = "1" +tokio = { version = "1", features = ["fs", "io-util", "macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +toml = "0" +torrust-index-located-error = { version = "3.0.0-alpha.1", path = "packages/located-error" } +tower-http = { version = "0", features = ["compression-full", "cors"] } +urlencoding = "2" +uuid = { version = "1", features = ["v4"] } + +[dev-dependencies] +rand = "0" +tempfile = "3" +uuid = { version = "1", features = ["v4"] } +which = "4" diff --git a/Containerfile b/Containerfile new file mode 100644 index 00000000..378a9b10 --- /dev/null +++ b/Containerfile @@ -0,0 +1,140 @@ +# syntax=docker/dockerfile:latest + +# Torrust Index + +## Builder Image +FROM rust:bookworm as chef +WORKDIR /tmp +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN cargo binstall --no-confirm cargo-chef cargo-nextest + +## Tester Image +FROM rust:slim-bookworm as tester +WORKDIR /tmp + +RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash +RUN cargo binstall --no-confirm cargo-nextest imdl + +COPY ./share/ /app/share/torrust +RUN mkdir -p /app/share/torrust/default/database/; \ + sqlite3 /app/share/torrust/default/database/index.sqlite3.db "VACUUM;" + +## Su Exe Compile +FROM docker.io/library/gcc:bookworm as gcc +COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ +RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec + + +## Chef Prepare (look at project and see wat we need) +FROM chef AS recipe +WORKDIR /build/src +COPY . /build/src +RUN cargo chef prepare --recipe-path /build/recipe.json + + +## Cook (debug) +FROM chef AS dependencies_debug +WORKDIR /build/src +COPY --from=recipe /build/recipe.json /build/recipe.json +RUN cargo chef cook --tests --benches --examples --workspace --all-targets --all-features --recipe-path /build/recipe.json +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/temp.tar.zst ; rm -f /build/temp.tar.zst + +## Cook (release) +FROM chef AS dependencies +WORKDIR /build/src +COPY --from=recipe /build/recipe.json /build/recipe.json +RUN cargo chef cook --tests --benches --examples --workspace --all-targets --all-features --recipe-path /build/recipe.json --release +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/temp.tar.zst --release ; rm -f /build/temp.tar.zst + + +## Build Archive (debug) +FROM dependencies_debug AS build_debug +WORKDIR /build/src +COPY . /build/src +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-index-debug.tar.zst + +## Build Archive (release) +FROM dependencies AS build +WORKDIR /build/src +COPY . /build/src +RUN cargo nextest archive --tests --benches --examples --workspace --all-targets --all-features --archive-file /build/torrust-index.tar.zst --release + + +# Extract and Test (debug) +FROM tester as test_debug +WORKDIR /test +COPY . /test/src/ +COPY --from=build_debug \ + /build/torrust-index-debug.tar.zst \ + /test/torrust-index-debug.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-index-debug.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json + +RUN mkdir -p /app/bin/; cp -l /test/src/target/debug/torrust-index /app/bin/torrust-index +# RUN mkdir /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-index | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin + +# Extract and Test (release) +FROM tester as test +WORKDIR /test +COPY . /test/src +COPY --from=build \ + /build/torrust-index.tar.zst \ + /test/torrust-index.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-index.tar.zst +RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json + +RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-index /app/bin/torrust-index +# RUN mkdir -p /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-index | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 +RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin + + +## Runtime +FROM gcr.io/distroless/cc-debian12:debug as runtime +RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] +COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec + +ARG TORRUST_INDEX_PATH_CONFIG="/etc/torrust/index/index.toml" +ARG TORRUST_INDEX_DATABASE_DRIVER="sqlite3" +ARG USER_ID=1000 +ARG UDP_PORT=6969 +ARG HTTP_PORT=7070 +ARG API_PORT=1212 + +ENV TORRUST_INDEX_PATH_CONFIG=${TORRUST_INDEX_PATH_CONFIG} +ENV TORRUST_INDEX_DATABASE_DRIVER=${TORRUST_INDEX_DATABASE_DRIVER} +ENV USER_ID=${USER_ID} +ENV UDP_PORT=${UDP_PORT} +ENV HTTP_PORT=${HTTP_PORT} +ENV API_PORT=${API_PORT} +ENV TZ=Etc/UTC + +EXPOSE ${UDP_PORT}/udp +EXPOSE ${HTTP_PORT}/tcp +EXPOSE ${API_PORT}/tcp + +RUN mkdir -p /var/lib/torrust/index /var/log/torrust/index /etc/torrust/index + +ENV ENV=/etc/profile +COPY --chmod=0555 ./share/container/entry_script_sh /usr/local/bin/entry.sh + +VOLUME ["/var/lib/torrust/index","/var/log/torrust/index","/etc/torrust/index"] + +ENV RUNTIME="runtime" +ENTRYPOINT ["/usr/local/bin/entry.sh"] + + +## Torrust-Index (debug) +FROM runtime as debug +ENV RUNTIME="debug" +COPY --from=test_debug /app/ /usr/ +RUN env +CMD ["sh"] + +## Torrust-Index (release) (default) +FROM runtime as release +ENV RUNTIME="release" +COPY --from=test /app/ /usr/ +# HEALTHCHECK CMD ["/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "localhost:${API_PORT}/version"] +CMD ["/usr/bin/torrust-index"] diff --git a/LICENSE b/LICENSE index 6618a8f4..0ad25db4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,13 +1,661 @@ -# Multiple Licenses + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 -This repository has multiple licenses depending on the content type, the date of contributions or stemming from external component licenses that were not developed by any of Torrust team members or Torrust repository contributors. + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. -The two main applicable license to most of its content are: + Preamble -- For Code -- [agpl-3.0](https://github.com/torrust/torrust-index/blob/main/licensing/agpl-3.0.md) + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. -- For Media (Images, etc.) -- [cc-by-sa](https://github.com/torrust/torrust-index/blob/main/licensing/cc-by-sa.md) + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. -We make the exception that projects that distribute this work only need to include the name and version of the license, instead of needing to include them verbatim in the package. + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. -If you want to read more about all the licenses and how they apply please refer to the [contributor agreement](https://github.com/torrust/torrust-index/blob/main/licensing/contributor_agreement_v01.md). + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 130113c3..50030d01 100644 --- a/README.md +++ b/README.md @@ -1,109 +1,207 @@ -# Torrust Index Backend +# Torrust Index -![README HEADER](./img/Torrust_Repo_BackEnd_Readme_Header-20220615.jpg) +[![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] -![Open Source](https://badgen.net/badge/Open%20Source/100%25/DA2CE7) -![Cool](https://badgen.net/badge/Cool/100%25/FF7F50) +__Torrust Index__, is a library for [BitTorrent][bittorrent] Files. Written in [Rust Language][rust] with the [axum] web framework. ___This index aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ -![Nautilus Sponsored](https://badgen.net/badge/Sponsor/Nautilus%20Cyberneering/red) +> This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. ---- +## About +The core purpose of a [BitTorrent][bittorrent] Index is to maintain a database that connects torrent files with useful metadata. Allowing a community of users to keep track their torrents in a well organized and informative manner. -## 📢Important Updates 📢 +The __Torrust Index__ serves a [high-level api][api] for our [Torrust Index GUI][gui] client. It also connects to the [management api][api_tracker] of our [Torrust Tracker][tracker], to provide statistic and whitelisting functionally. -- None at the moment [ACCESS ALL UPDATES](https://github.com/torrust/torrust-index-backend/wiki/Project-Updates) +## Key Features ---- +- [x] High Quality and Modern Rust Codebase. +- [x] [Documentation][docs] Generated from Code Comments. +- [x] [Comprehensive Suit][coverage] of Unit and Functional Tests. +- [x] Good Performance in Busy Conditions. +- [x] Native `IPv4` and `IPv6` support. +- [x] Persistent `SQLite3` or `MySQL` Databases. -## Index +## Getting Started -- [PROJECT DESCRIPTION](#project-description) -- [PROJECT ROADMAP](#project_roadmap) -- [DOCUMENTATION](#documentation) -- [INSTALLATION](#installation) -- [CONTACT & CONTRIBUTING](#contact_and_contributing) -- [CREDITS](#credits) +### Upgrading +If you are using `Version 1` of `torrust-tracker-backend`, please view our [upgrading guide][upgrade.md]. -## Project Description +### Container Version -This repository serves as the backend for the [Torrust Index](https://github.com/torrust/torrust-index) project. +The Torrust Index is [deployed to DockerHub][dockerhub], you can run a demo immediately with the following commands: -### Roadmap +#### Docker: -*Coming soon.* +```sh +docker run -it torrust/index:develop +``` +> Please read our [container guide][containers.md] for more information. + +#### Podman: + +```sh +podman run -it torrust/index:develop +``` +> Please read our [container guide][containers.md] for more information. + +### Development Version + +- Please assure you have the ___[latest stable (or nightly) version of rust][rust]___. +- Please assure that you computer has enough ram. ___Recommended 16GB.___ + +#### Checkout, Test and Run: + +```sh +# Checkout repository into a new folder: +git clone https://github.com/torrust/torrust-index.git + +# Change into directory and create a empty database file: +cd torrust-index +mkdir -p ./storage/index/lib/database/ +touch ./storage/index/lib/database/sqlite3.db + +# Check all tests in application: +cargo test --tests --benches --examples --workspace --all-targets --all-features + +# Run the index: +cargo run +``` +#### Customization: + +```sh +# Copy the default configuration into the standard location: +mkdir -p ./storage/index/etc/ +cp ./share/default/config/index.development.sqlite3.toml ./storage/index/etc/index.toml + +# Customize the index configuration (for example): +vim ./storage/index/etc/index.toml + +# Run the index with the updated configuration: +TORRUST_INDEX_PATH_CONFIG="./storage/index/etc/index.toml" cargo run +``` + +_Optionally, you may choose to supply the entire configuration as an environmental variable:_ + +```sh +# Use a configuration supplied on an environmental variable: +TORRUST_INDEX_CONFIG=$(cat "./storage/index/etc/index.toml") cargo run +``` + +_For deployment you __should__ override the `tracker_api_token` by using an environmental variable:_ + +```sh +# Please use the secret that you generated for the torrust-tracker configuration. +# Override secret in configuration using an environmental variable +TORRUST_INDEX_CONFIG=$(cat "./storage/index/etc/index.toml") \ + TORRUST_INDEX_TRACKER_API_TOKEN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ + cargo run +``` + +> Please view our [crate documentation][docs] for more detailed instructions. + +### Services +The following services are provided by the default configuration: + +- API + - `http://127.0.0.1:3001/`. ## Documentation -You can read the documentation [here](https://torrust.com/torrust-index/install/#installing-the-backend). +- [API (Version 1)][api] + +## Contributing +We are happy to support and welcome new people to our project. Please consider our [contributor guide][guide.md].
+This is an open-source community supported project. We welcome contributions from the community! + +__How can you contribute?__ + +- Bug reports and feature requests. +- Code contributions. You can start by looking at the issues labeled "[good first issues]". +- Documentation improvements. Check the [documentation][docs] and [API documentation][api] for typos, errors, or missing information. +- Participation in the community. You can help by answering questions in the [discussions]. -## Installation +## License -1. Install prerequisites: +**Copyright (c) 2023 The Torrust Developers.** - - [Rust/Cargo](https://www.rust-lang.org/) - Compiler toolchain & Package Manager (cargo). +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Affero General Public License][AGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. -2. Clone the repository: +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. - ```bash - git clone https://github.com/torrust/torrust-index-backend.git - ``` +You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see . -3. Open the project directory and create a file called: `.env`: +Some files include explicit copyright notices and/or license notices. - ```bash - cd torrust-index-backend - echo "DATABASE_URL=sqlite://data.db?mode=rwc" > .env - ``` +### Legacy Exception -4. Install sqlx-cli and build the sqlite database: +For prosperity, versions of Torrust Tracker that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [AGPL-3.0-only][AGPL_3_0] license. - ```bash - cargo install sqlx-cli - sqlx db setup - ``` +## Contributor Agreement +The copyright of the Torrust Tracker is retained by the respective authors. -5. Build the binaries: +**Contributors agree:** +- That all their contributions be granted a license(s) **compatible** with the [Torrust Trackers License](#License). +- That all contributors signal **clearly** and **explicitly** any other compilable licenses if they are not: *[AGPL-3.0-only with the legacy MIT-0 exception](#License)*. - ```bash - cargo build --release - ``` +**The Torrust-Tracker project has no copyright assignment agreement.** -6. Run the backend once to generate the `config.toml` file: +_We kindly ask you to take time and consider The Torrust Project [Contributor Agreement][agreement.md] in full._ - ```bash - ./target/release/torrust-index-backend - ``` +## Acknowledgments -7. Edit the newly generated `config.toml` file ([config.toml documentation](https://torrust.github.io/torrust-tracker/CONFIG.html)): +This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. - ```bash - nano config.toml - ``` -8. Run the backend again: - ```bash - ./target/torrust-index-backend - ``` +[container_wf]: ../../actions/workflows/container.yaml +[container_wf_b]: ../../actions/workflows/container.yaml/badge.svg +[coverage_wf]: ../../actions/workflows/coverage.yaml +[coverage_wf_b]: ../../actions/workflows/coverage.yaml/badge.svg +[deployment_wf]: ../../actions/workflows/deployment.yaml +[deployment_wf_b]: ../../actions/workflows/deployment.yaml/badge.svg +[testing_wf]: ../../actions/workflows/testing.yaml +[testing_wf_b]: ../../actions/workflows/testing.yaml/badge.svg -## Contact and Contributing +[bittorrent]: http://bittorrent.org/ +[rust]: https://www.rust-lang.org/ +[axum]: https://github.com/tokio-rs/axum +[newtrackon]: https://newtrackon.com/ +[coverage]: https://app.codecov.io/gh/torrust/torrust-index +[torrust]: https://torrust.com/ -Feel free to contact us via: +[tracker]: https://github.com/torrust/torrust-tracker +[gui]: https://github.com/torrust/torrust-index-gui -Message `Warm Beer#3352` on Discord or email `mick@dutchbits.nl`. +[dockerhub]: https://hub.docker.com/r/torrust/index/tags -or +[torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 -Please make suggestions and report any **Torrust Index Back End** specific bugs you find to the issue tracker of this repository [here](https://github.com/torrust/torrust-index-backend/issues) +[BEP 00]: https://www.bittorrent.org/beps/bep_0000.html +[BEP 03]: https://www.bittorrent.org/beps/bep_0003.html +[BEP 07]: https://www.bittorrent.org/beps/bep_0007.html +[BEP 15]: https://www.bittorrent.org/beps/bep_0015.html +[BEP 23]: https://www.bittorrent.org/beps/bep_0023.html +[BEP 27]: https://www.bittorrent.org/beps/bep_0027.html +[BEP 48]: https://www.bittorrent.org/beps/bep_0048.html -**Torrust Index Front End** specific issues can be submitted [here](https://github.com/torrust/torrust-index-frontend/issues). +[containers.md]: ./docs/containers.md +[upgrade.md]: ./upgrades/from_v1_0_0_to_v2_0_0/README.md -Universal issues with the **Torrust Index** can be submitted [here](https://github.com/torrust/torrust-index/issues). Ideas and feature requests are welcome as well! +[docs]: https://docs.rs/torrust-index/latest/torrust_index/ +[api]: https://docs.rs/torrust-index/latest/torrust_index/web/api/v1/ +[api_tracker]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 ---- +[good first issues]: https://github.com/torrust/torrust-index/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 +[discussions]: https://github.com/torrust/torrust-index/discussions -## Credits & Sponsors +[guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md +[agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md -This project was developed by [Dutch Bits](https://dutchbits.nl) for [Nautilus Cyberneering GmbH](https://nautilus-cyberneering.de/). +[AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ -The project has been possible through the support and contribution of both Nautilus Cyberneering, its team and collaborators, as well as that of our great open source contributors. Thank you to you all! +[nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ +[Dutch Bits]: https://dutchbits.nl +[Naim A.]: https://github.com/naim94a/udpt +[greatest-ape]: https://github.com/greatest-ape/aquatic +[Power2All]: https://github.com/power2all diff --git a/build.rs b/build.rs new file mode 100644 index 00000000..d5068697 --- /dev/null +++ b/build.rs @@ -0,0 +1,5 @@ +// generated by `sqlx migrate build-script` +fn main() { + // trigger recompilation when a new migration is added + println!("cargo:rerun-if-changed=migrations"); +} diff --git a/codecov.yaml b/codecov.yaml new file mode 100644 index 00000000..f0878195 --- /dev/null +++ b/codecov.yaml @@ -0,0 +1,6 @@ +coverage: + status: + project: + default: + target: auto + threshold: 0.5% diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 00000000..613ed0b0 --- /dev/null +++ b/compose.yaml @@ -0,0 +1,81 @@ +name: torrust +services: + + index: + image: torrust-index:release + tty: true + environment: + - TORRUST_INDEX_CONFIG=${TORRUST_TRACKER_CONFIG} + - TORRUST_INDEX_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-sqlite3} + - TORRUST_INDEX_TRACKER_API_TOKEN=${TORRUST_INDEX_TRACKER_API_TOKEN:-MyAccessToken} + networks: + - server_side + ports: + - 3001:3001 + volumes: + - ./storage/tracker/lib:/var/lib/torrust/index:Z + - ./storage/tracker/log:/var/log/torrust/index:Z + - ./storage/tracker/etc:/etc/torrust/index:Z + depends_on: + - tracker + - mailcatcher + - mysql + + tracker: + image: torrust/tracker:develop + tty: true + environment: + - TORRUST_TRACKER_CONFIG=${TORRUST_TRACKER_CONFIG} + - TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-sqlite3} + - TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} + networks: + - server_side + ports: + - 6969:6969/udp + - 7070:7070 + - 1212:1212 + volumes: + - ./storage/tracker/lib:/var/lib/torrust/tracker:Z + - ./storage/tracker/log:/var/log/torrust/tracker:Z + - ./storage/tracker/etc:/etc/torrust/tracker:Z + depends_on: + - mysql + + mailcatcher: + image: dockage/mailcatcher:0.8.2 + networks: + - server_side + ports: + - 1080:1080 + - 1025:1025 + + mysql: + image: mysql:8.0 + command: '--default-authentication-plugin=mysql_native_password' + healthcheck: + test: + [ + 'CMD-SHELL', + 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent' + ] + interval: 3s + retries: 5 + start_period: 30s + environment: + - MYSQL_ROOT_HOST=% + - MYSQL_ROOT_PASSWORD=root_secret_password + - MYSQL_DATABASE=${TORRUST_IDX_BACK_MYSQL_DATABASE:-torrust_index_e2e_testing} + - MYSQL_USER=db_user + - MYSQL_PASSWORD=db_user_secret_password + networks: + - server_side + ports: + - 3306:3306 + volumes: + - mysql_data:/var/lib/mysql + +networks: + server_side: {} + +volumes: + mysql_data: {} diff --git a/contrib/dev-tools/container/build.sh b/contrib/dev-tools/container/build.sh new file mode 100755 index 00000000..21be00a3 --- /dev/null +++ b/contrib/dev-tools/container/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_IDX_BACK_USER_UID=${TORRUST_IDX_BACK_USER_UID:-1000} +TORRUST_IDX_BACK_RUN_AS_USER=${TORRUST_IDX_BACK_RUN_AS_USER:-appuser} + +echo "Building docker image ..." +echo "TORRUST_IDX_BACK_USER_UID: $TORRUST_IDX_BACK_USER_UID" +echo "TORRUST_IDX_BACK_RUN_AS_USER: $TORRUST_IDX_BACK_RUN_AS_USER" + +docker build \ + --build-arg UID="$TORRUST_IDX_BACK_USER_UID" \ + --build-arg RUN_AS_USER="$TORRUST_IDX_BACK_RUN_AS_USER" \ + -t torrust-index . diff --git a/contrib/dev-tools/container/e2e/mysql/e2e-env-reset.sh b/contrib/dev-tools/container/e2e/mysql/e2e-env-reset.sh new file mode 100755 index 00000000..afe138ac --- /dev/null +++ b/contrib/dev-tools/container/e2e/mysql/e2e-env-reset.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Delete the databases and recreate them. + +docker compose down + +# Index + +# Database credentials +MYSQL_USER="root" +MYSQL_PASSWORD="root_secret_password" +MYSQL_HOST="localhost" +MYSQL_DATABASE="torrust_index_e2e_testing" + +# Create the MySQL database for the index. Assumes MySQL client is installed. +echo "Creating MySQL database $MYSQL_DATABASE for E2E testing ..." +mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $MYSQL_DATABASE; CREATE DATABASE $MYSQL_DATABASE;" + +# Tracker + +# Delete tracker database +rm -f ./storage/tracker/lib/database/torrust_tracker_e2e_testing.db + +# Generate storage directory if it does not exist +mkdir -p "./storage/tracker/lib/database" + +# Generate the sqlite database for the tracker if it does not exist +if ! [ -f "./storage/tracker/lib/database/torrust_tracker_e2e_testing.db" ]; then + sqlite3 ./storage/tracker/lib/database/torrust_tracker_e2e_testing.db "VACUUM;" +fi + +./docker/bin/e2e/mysql/e2e-env-up.sh diff --git a/contrib/dev-tools/container/e2e/mysql/e2e-env-restart.sh b/contrib/dev-tools/container/e2e/mysql/e2e-env-restart.sh new file mode 100755 index 00000000..92088547 --- /dev/null +++ b/contrib/dev-tools/container/e2e/mysql/e2e-env-restart.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker compose down +./docker/bin/e2e/mysql/e2e-env-up.sh diff --git a/contrib/dev-tools/container/e2e/mysql/e2e-env-up.sh b/contrib/dev-tools/container/e2e/mysql/e2e-env-up.sh new file mode 100755 index 00000000..9b83c782 --- /dev/null +++ b/contrib/dev-tools/container/e2e/mysql/e2e-env-up.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_IDX_BACK_USER_UID=${TORRUST_IDX_BACK_USER_UID:-1000} \ + docker compose build + +TORRUST_IDX_BACK_USER_UID=${TORRUST_IDX_BACK_USER_UID:-1000} \ + TORRUST_IDX_BACK_CONFIG=$(cat config-idx-back.mysql.local.toml) \ + TORRUST_IDX_BACK_MYSQL_DATABASE="torrust_index_e2e_testing" \ + TORRUST_TRACKER_CONFIG=$(cat config-tracker.local.toml) \ + TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-mysql} \ + TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} \ + docker compose up -d + diff --git a/contrib/dev-tools/container/e2e/run-e2e-tests.sh b/contrib/dev-tools/container/e2e/run-e2e-tests.sh new file mode 100755 index 00000000..cca2640a --- /dev/null +++ b/contrib/dev-tools/container/e2e/run-e2e-tests.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +CURRENT_USER_NAME=$(whoami) +CURRENT_USER_ID=$(id -u) +echo "User name: $CURRENT_USER_NAME" +echo "User id: $CURRENT_USER_ID" + +TORRUST_IDX_BACK_USER_UID=$CURRENT_USER_ID +TORRUST_TRACKER_USER_UID=$CURRENT_USER_ID +export TORRUST_IDX_BACK_USER_UID +export TORRUST_TRACKER_USER_UID + +wait_for_container_to_be_healthy() { + local container_name="$1" + local max_retries="$2" + local retry_interval="$3" + local retry_count=0 + + while [ $retry_count -lt "$max_retries" ]; do + container_health="$(docker inspect --format='{{json .State.Health}}' "$container_name")" + if [ "$container_health" != "{}" ]; then + container_status="$(echo "$container_health" | jq -r '.Status')" + if [ "$container_status" == "healthy" ]; then + echo "Container $container_name is healthy" + return 0 + fi + fi + + retry_count=$((retry_count + 1)) + echo "Waiting for container $container_name to become healthy (attempt $retry_count of $max_retries)..." + sleep "$retry_interval" + done + + echo "Timeout reached, container $container_name is not healthy" + return 1 +} + +# Install tool to create torrent files. +# It's needed by some tests to generate and parse test torrent files. +cargo install imdl || exit 1 + +# Install app (no docker) that will run the test suite against the E2E testing +# environment (in docker). +cp .env.local .env || exit 1 +./bin/install.sh || exit 1 + +# TEST USING SQLITE +echo "Running E2E tests using SQLite ..." + +# Start E2E testing environment +./docker/bin/e2e/sqlite/e2e-env-up.sh || exit 1 + +wait_for_container_to_be_healthy torrust-mysql-1 10 3 +# todo: implement healthchecks for tracker and index and wait until they are healthy +#wait_for_container torrust-tracker-1 10 3 +#wait_for_container torrust-idx-back-1 10 3 +sleep 20s + +# Just to make sure that everything is up and running +docker ps + +# Run E2E tests with shared app instance +TORRUST_IDX_BACK_E2E_SHARED=true TORRUST_IDX_BACK_E2E_CONFIG_PATH="./config-idx-back.sqlite.local.toml" cargo test || exit 1 + +# Stop E2E testing environment +docker compose down + +# TEST USING MYSQL +echo "Running E2E tests using MySQL ..." + +# Start E2E testing environment +./docker/bin/e2e/mysql/e2e-env-up.sh || exit 1 + +wait_for_container_to_be_healthy torrust-mysql-1 10 3 +# todo: implement healthchecks for tracker and index and wait until they are healthy +#wait_for_container torrust-tracker-1 10 3 +#wait_for_container torrust-idx-back-1 10 3 +sleep 20s + +# Just to make sure that everything is up and running +docker ps + +# Database credentials +MYSQL_USER="root" +MYSQL_PASSWORD="root_secret_password" +MYSQL_HOST="localhost" +MYSQL_DATABASE="torrust_index_e2e_testing" + +# Create the MySQL database for the index. Assumes MySQL client is installed. +echo "Creating MySQL database $MYSQL_DATABASE for for E2E testing ..." +mysql -h $MYSQL_HOST -u $MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $MYSQL_DATABASE;" + +# Run E2E tests with shared app instance +TORRUST_IDX_BACK_E2E_SHARED=true TORRUST_IDX_BACK_E2E_CONFIG_PATH="./config-idx-back.mysql.local.toml" cargo test || exit 1 + +# Stop E2E testing environment +docker compose down diff --git a/contrib/dev-tools/container/e2e/sqlite/e2e-env-reset.sh b/contrib/dev-tools/container/e2e/sqlite/e2e-env-reset.sh new file mode 100755 index 00000000..f0ff3a2d --- /dev/null +++ b/contrib/dev-tools/container/e2e/sqlite/e2e-env-reset.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Delete the databases and recreate them. + +docker compose down + +rm -f ./storage/database/torrust_index_e2e_testing.db +rm -f ./storage/tracker/lib/database/torrust_tracker_e2e_testing.db + +# Generate storage directory if it does not exist +mkdir -p "./storage/database" + +# Generate the sqlite database for the index if it does not exist +if ! [ -f "./storage/database/torrust_index_e2e_testing.db" ]; then + sqlite3 ./storage/database/torrust_index_e2e_testing.db "VACUUM;" +fi + +# Generate the sqlite database for the tracker if it does not exist +if ! [ -f "./storage/tracker/lib/database/torrust_tracker_e2e_testing.db" ]; then + sqlite3 ./storage/tracker/lib/database/torrust_tracker_e2e_testing.db "VACUUM;" +fi + +./docker/bin/e2e/sqlite/e2e-env-up.sh diff --git a/contrib/dev-tools/container/e2e/sqlite/e2e-env-restart.sh b/contrib/dev-tools/container/e2e/sqlite/e2e-env-restart.sh new file mode 100755 index 00000000..768f50cb --- /dev/null +++ b/contrib/dev-tools/container/e2e/sqlite/e2e-env-restart.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker compose down +./docker/bin/e2e/sqlite/e2e-env-up.sh diff --git a/contrib/dev-tools/container/e2e/sqlite/e2e-env-up.sh b/contrib/dev-tools/container/e2e/sqlite/e2e-env-up.sh new file mode 100755 index 00000000..b55cd564 --- /dev/null +++ b/contrib/dev-tools/container/e2e/sqlite/e2e-env-up.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_IDX_BACK_USER_UID=${TORRUST_IDX_BACK_USER_UID:-1000} \ + docker compose build + +TORRUST_IDX_BACK_USER_UID=${TORRUST_IDX_BACK_USER_UID:-1000} \ + TORRUST_IDX_BACK_CONFIG=$(cat config-idx-back.sqlite.local.toml) \ + TORRUST_IDX_BACK_MYSQL_DATABASE="torrust_index_e2e_testing" \ + TORRUST_TRACKER_CONFIG=$(cat config-tracker.local.toml) \ + TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-sqlite3} \ + TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} \ + docker compose up -d + diff --git a/contrib/dev-tools/container/install.sh b/contrib/dev-tools/container/install.sh new file mode 100755 index 00000000..a5896937 --- /dev/null +++ b/contrib/dev-tools/container/install.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +./docker/bin/build.sh +./bin/install.sh diff --git a/contrib/dev-tools/container/run.sh b/contrib/dev-tools/container/run.sh new file mode 100755 index 00000000..19df5d3a --- /dev/null +++ b/contrib/dev-tools/container/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +TORRUST_IDX_BACK_USER_UID=${TORRUST_IDX_BACK_USER_UID:-1000} +TORRUST_IDX_BACK_CONFIG=$(cat config.toml) + +docker run -it \ + --user="$TORRUST_IDX_BACK_USER_UID" \ + --publish 3001:3001/tcp \ + --env TORRUST_IDX_BACK_CONFIG="$TORRUST_IDX_BACK_CONFIG" \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust-index diff --git a/contrib/dev-tools/init/install-local.sh b/contrib/dev-tools/init/install-local.sh new file mode 100755 index 00000000..3396c047 --- /dev/null +++ b/contrib/dev-tools/init/install-local.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# This script is only intended to be used for local development or testing environments. + +# Generate storage directory if it does not exist +mkdir -p ./storage/index/lib/database + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/index/lib/database/sqlite3.db" ]; then + # todo: it should get the path from tracker.toml and only do it when we use sqlite + sqlite3 ./storage/index/lib/database/sqlite3.db "VACUUM;" +fi diff --git a/contrib/dev-tools/su-exec/LICENSE b/contrib/dev-tools/su-exec/LICENSE new file mode 100644 index 00000000..f623b904 --- /dev/null +++ b/contrib/dev-tools/su-exec/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 ncopa + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/contrib/dev-tools/su-exec/Makefile b/contrib/dev-tools/su-exec/Makefile new file mode 100644 index 00000000..bda76895 --- /dev/null +++ b/contrib/dev-tools/su-exec/Makefile @@ -0,0 +1,17 @@ + +CFLAGS ?= -Wall -Werror -g +LDFLAGS ?= + +PROG := su-exec +SRCS := $(PROG).c + +all: $(PROG) + +$(PROG): $(SRCS) + $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) + +$(PROG)-static: $(SRCS) + $(CC) $(CFLAGS) -o $@ $^ -static $(LDFLAGS) + +clean: + rm -f $(PROG) $(PROG)-static diff --git a/contrib/dev-tools/su-exec/README.md b/contrib/dev-tools/su-exec/README.md new file mode 100644 index 00000000..2b051737 --- /dev/null +++ b/contrib/dev-tools/su-exec/README.md @@ -0,0 +1,46 @@ +# su-exec +switch user and group id, setgroups and exec + +## Purpose + +This is a simple tool that will simply execute a program with different +privileges. The program will be executed directly and not run as a child, +like su and sudo does, which avoids TTY and signal issues (see below). + +Notice that su-exec depends on being run by the root user, non-root +users do not have permission to change uid/gid. + +## Usage + +```shell +su-exec user-spec command [ arguments... ] +``` + +`user-spec` is either a user name (e.g. `nobody`) or user name and group +name separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values +can be used instead of names. Example: + +```shell +$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf +``` + +## TTY & parent/child handling + +Notice how `su` will make `ps` be a child of a shell while `su-exec` +just executes `ps` directly. + +```shell +$ docker run -it --rm alpine:edge su postgres -c 'ps aux' +PID USER TIME COMMAND + 1 postgres 0:00 ash -c ps aux + 12 postgres 0:00 ps aux +$ docker run -it --rm -v $PWD/su-exec:/sbin/su-exec:ro alpine:edge su-exec postgres ps aux +PID USER TIME COMMAND + 1 postgres 0:00 ps aux +``` + +## Why reinvent gosu? + +This does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu) +but it is only 10kb instead of 1.8MB. + diff --git a/contrib/dev-tools/su-exec/su-exec.c b/contrib/dev-tools/su-exec/su-exec.c new file mode 100644 index 00000000..499071c6 --- /dev/null +++ b/contrib/dev-tools/su-exec/su-exec.c @@ -0,0 +1,109 @@ +/* set user and group id and exec */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static char *argv0; + +static void usage(int exitcode) +{ + printf("Usage: %s user-spec command [args]\n", argv0); + exit(exitcode); +} + +int main(int argc, char *argv[]) +{ + char *user, *group, **cmdargv; + char *end; + + uid_t uid = getuid(); + gid_t gid = getgid(); + + argv0 = argv[0]; + if (argc < 3) + usage(0); + + user = argv[1]; + group = strchr(user, ':'); + if (group) + *group++ = '\0'; + + cmdargv = &argv[2]; + + struct passwd *pw = NULL; + if (user[0] != '\0') { + uid_t nuid = strtol(user, &end, 10); + if (*end == '\0') + uid = nuid; + else { + pw = getpwnam(user); + if (pw == NULL) + err(1, "getpwnam(%s)", user); + } + } + if (pw == NULL) { + pw = getpwuid(uid); + } + if (pw != NULL) { + uid = pw->pw_uid; + gid = pw->pw_gid; + } + + setenv("HOME", pw != NULL ? pw->pw_dir : "/", 1); + + if (group && group[0] != '\0') { + /* group was specified, ignore grouplist for setgroups later */ + pw = NULL; + + gid_t ngid = strtol(group, &end, 10); + if (*end == '\0') + gid = ngid; + else { + struct group *gr = getgrnam(group); + if (gr == NULL) + err(1, "getgrnam(%s)", group); + gid = gr->gr_gid; + } + } + + if (pw == NULL) { + if (setgroups(1, &gid) < 0) + err(1, "setgroups(%i)", gid); + } else { + int ngroups = 0; + gid_t *glist = NULL; + + while (1) { + int r = getgrouplist(pw->pw_name, gid, glist, &ngroups); + + if (r >= 0) { + if (setgroups(ngroups, glist) < 0) + err(1, "setgroups"); + break; + } + + glist = realloc(glist, ngroups * sizeof(gid_t)); + if (glist == NULL) + err(1, "malloc"); + } + } + + if (setgid(gid) < 0) + err(1, "setgid(%i)", gid); + + if (setuid(uid) < 0) + err(1, "setuid(%i)", uid); + + execvp(cmdargv[0], cmdargv); + err(1, "%s", cmdargv[0]); + + return 1; +} diff --git a/cspell.json b/cspell.json new file mode 100644 index 00000000..8b597b71 --- /dev/null +++ b/cspell.json @@ -0,0 +1,13 @@ +{ + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "./project-words.txt", + "addWords": true + } + ], + "dictionaries": ["project-words"], + "ignorePaths": ["target", "/project-words.txt"] +} diff --git a/docs/adrs/20230510152112_lowercas_infohashes.md b/docs/adrs/20230510152112_lowercas_infohashes.md new file mode 100644 index 00000000..f03bb12b --- /dev/null +++ b/docs/adrs/20230510152112_lowercas_infohashes.md @@ -0,0 +1,20 @@ +# Lowercase infohashes + +## Description + +We use both uppercase and lowercase infohashes. This is a problem because +we have to check both cases. For example, we have to convert to uppercase before +inserting into the database or querying the database. + +The database and API URLs use uppercase infohashes, and they are case-sensitive. + +## Agreement + +We agree on use lowercase infohashes everywhere and try to convert then as soon +as possible from the input. + +There is no specific reason to use lowercase infohashes, but we have to choose +one of them. We decided to use lowercase because the infohash is a hash, and +hashes are usually lowercase. + +We will change them progressively. diff --git a/docs/adrs/20230824135449_ignore_non_standard_fields_in_info_dictionary.md b/docs/adrs/20230824135449_ignore_non_standard_fields_in_info_dictionary.md new file mode 100644 index 00000000..870656db --- /dev/null +++ b/docs/adrs/20230824135449_ignore_non_standard_fields_in_info_dictionary.md @@ -0,0 +1,138 @@ +# Ignore non-standard fields in info dictionary + +This is a temporary solution to avoid problems with non-standard fields in the +info dictionary. In the future, we could add support for them. + +## Context + +In torrents, custom fields in the info dictionary can lead to mismatches in our system. + +## Problem + +Some torrents might include custom fields in the info dictionary. **Parsing non-standard fields generates a different info-hash for the indexed torrent**, leading to potential issues and misrepresentations. + +A sample JSON version of a torrent with a `collections` custom field int the `info` dictionary: + +```json +{ + "announce": "https://academictorrents.com/announce.php", + "announce-list": [ + [ + "https://academictorrents.com/announce.php" + ], + [ + "https://ipv6.academictorrents.com/announce.php" + ], + [ + "udp://tracker.opentrackr.org:1337/announce" + ], + [ + "udp://tracker.openbittorrent.com:80/announce" + ], + [ + "http://bt1.archive.org:6969/announce" + ], + [ + "http://bt2.archive.org:6969/announce" + ] + ], + "comment": "This content hosted at the Internet Archive at https://archive.org/details/rapppid-weights.tar\nFiles may have changed, which prevents torrents from downloading correctly or completely; please check for an updated torrent at https://archive.org/download/rapppid-weights.tar/rapppid-weights.tar_archive.torrent\nNote: retrieval usually requires a client that supports webseeding (GetRight style).\nNote: many Internet Archive torrents contain a 'pad file' directory. This directory and the files within it may be erased once retrieval completes.\nNote: the file rapppid-weights.tar_meta.xml contains metadata about this torrent's contents.", + "created by": "ia_make_torrent", + "creation date": 1689273787, + "info": { + "collections": [ + "org.archive.rapppid-weights.tar" + ], + "files": [ + { + "crc32": "57d33fcc", + "length": 11528324, + "md5": "e91bb4ba82695161be68f8b33ae76142", + "mtime": "1689273730", + "path": [ + "RAPPPID Weights.tar.gz" + ], + "sha1": "45970ef33cb3049a7a8629e40c8f5e5268d1dc53" + }, + { + "crc32": "c658fd4f", + "length": 20480, + "md5": "a782b2a53ba49f0d45f3dd6e35e0d593", + "mtime": "1689273783", + "path": [ + "rapppid-weights.tar_meta.sqlite" + ], + "sha1": "bcb06b3164f1d2aba22ef6046eb80f65264e9fba" + }, + { + "crc32": "8140a5c7", + "length": 1044, + "md5": "1bab21e50e06ab42d3a77d872bf252e5", + "mtime": "1689273763", + "path": [ + "rapppid-weights.tar_meta.xml" + ], + "sha1": "b2f0f2bbec34aa9140fb9ac3fcb190588a496aa3" + } + ], + "name": "rapppid-weights.tar", + "piece length": 524288, + "pieces": "AB EC 55 6E 0F 7B E7 D3 30 0C F6 68 8C 90 6D 99 0C 3E 32 B5 2C F2 B6 7C 0C 32 52 BC 72 6F 07 1E 73 AB 76 F1 BC 32 2B FC 21 D4 7F 1A E9 72 35 40 7E C3 B4 89 09 2B ED 4B D8 B0 6C 65 8C 27 58 AE FB 72 75 73 44 37 88 28 20 D2 94 BD A4 6A F8 D2 A6 FD 02 65 1C 1C DF B8 56 6D 3A D2 7E A7 3D CA E2 49 F7 36 8D 17 77 6E 32 AD EF A5 44 C2 8F B6 9C 24 56 AD E8 FB 7B A6 71 C0 81 E5 43 03 91 D4 4F B0 A6 64 CA 29 1B 0D 1D 40 7D 39 4E 76 96 EB 01 18 F3 F5 50 8E 2F FA 54 FC 49 66 85 D8 38 87 78 9B 0A 8F 7A A3 2C 8F 72 36 AD 6D 74 0B FC C5 57 71 86 FB F3 CF CA C9 DA EC 61 62 A2 2A 1B A7 85 89 91 8F AA C0 C0 CB 0D 57 D8 B7 E7 64 4D F2 84 73 76 98 FB 3A 17 48 D7 9C 01 FE CA 6D 1F C5 97 34 05 54 39 DA C2 6E 17 41 11 69 F3 46 D1 7D 16 D3 C0 87 3B C3 B2 0C 1D E0 E2 49 C3 05 D2 4C 00 5A 5B 78 01 12 3E BF 52 43 49 6D 1A EE 23 79 D2 0E 28 B6 84 7E C5 ED 79 DE 64 02 ED 47 71 3D 93 16 C4 A2 76 18 77 54 C5 31 48 96 3A 51 C1 4A 92 90 91 F3 CF 48 5B 24 86 55 A8 EB 0C C6 2D 86 E2 29 56 09 2C 38 0B CD C1 CA 45 E6 64 6A 47 FE BB 2E 47 9A 77 45 29 E9 72 19 20 6F 42 79 2B 37 B9 53 25 ED 0F 29 04 D5 E2 96 F1 DE CF 99 BE 32 AA B8 0A 1D 0B 9F B9 D6 AB 5C 50 43 78 85 41 09 01 24 CF E0 89 76 5B 4D A9 CA 72 C0 DF 92 47 0F 0D CE CA 96 C6 7E A5 41 5F 2B A7 BB 04 CC F7 44 7F 94 1E 24 D2 1B 17 CA 18 79 90 A3 D6 20 75 A2 96 68 06 58 5A DE F5 2C 1A 90 22 72 33 8E D5 B2 A8 FA E5 E9 E7 69 62 02 7C 09 B3 4C" + }, + "locale": "en", + "title": "rapppid-weights.tar", + "url-list": [ + "https://archive.org/download/", + "http://ia902702.us.archive.org/22/items/", + "http://ia802702.us.archive.org/22/items/" + ] +} +``` + +> NOTICE: The `collections` field. + +At the moment we are only parsing these fields from the `info` dictionary: + +```rust +pub struct TorrentInfo { + pub name: String, + #[serde(default)] + pub pieces: Option, + #[serde(rename = "piece length")] + pub piece_length: i64, + #[serde(default)] + pub md5sum: Option, + #[serde(default)] + pub length: Option, + #[serde(default)] + pub files: Option>, + #[serde(default)] + pub private: Option, + #[serde(default)] + pub path: Option>, + #[serde(default)] + #[serde(rename = "root hash")] + pub root_hash: Option, + #[serde(default)] + pub source: Option, +} +``` + +> WARNING!: If the uploaded torrent has a non-standard field in the info dictionary, +> it will not only be ignore but it will produce a different info-hash for the indexed torrent. + +## Agreement + +1. Temporary Solution: Ignore all non-standard fields in the info dictionary. +2. Communication: Users will be alerted about this decision through UI warnings and documentation. +3. Future Consideration: There is a potential to support these fields in future iterations. + +## Rationale + +- Prioritizing standard fields ensures uniformity in the representation of torrents. +- Warnings and documentation provide transparency to users. +- A future-proof approach leaves room for possible expansion or reconsideration. + +## Other considerations + +The source field migth be considered a non-standard field, because it's not included in any BEP, but this field is being parsed and stored in the database since it seems to be widely used by private trackers. diff --git a/docs/adrs/README.md b/docs/adrs/README.md new file mode 100644 index 00000000..85986fc3 --- /dev/null +++ b/docs/adrs/README.md @@ -0,0 +1,23 @@ +# Architectural Decision Records (ADRs) + +This directory contains the architectural decision records (ADRs) for the +project. ADRs are a way to document the architectural decisions made in the +project. + +More info: . + +## How to add a new record + +For the prefix: + +```s +date -u +"%Y%m%d%H%M%S" +``` + +Then you can create a new markdown file with the following format: + +```s +20230510152112_title.md +``` + +For the time being, we are not following any specific template. diff --git a/docs/containers.md b/docs/containers.md new file mode 100644 index 00000000..576f6149 --- /dev/null +++ b/docs/containers.md @@ -0,0 +1,221 @@ +# Containers (Docker or Podman) + +## Demo environment +It is simple to setup the index with the default +configuration and run it using the pre-built public docker image: + + +With Docker: + +```sh +docker run -it torrust/index:latest +``` + +or with Podman: + +```sh +podman run -it torrust/index:latest +``` + + +## Requirements +- Tested with recent versions of Docker or Podman. + +## Volumes +The [Containerfile](../Containerfile) (i.e. the Dockerfile) Defines Three Volumes: + +```Dockerfile +VOLUME ["/var/lib/torrust/index","/var/log/torrust/index","/etc/torrust/index"] +``` + +When instancing the container image with the `docker run` or `podman run` command, we map these volumes to the local storage: + +```s +./storage/index/lib -> /var/lib/torrust/index +./storage/index/log -> /var/log/torrust/index +./storage/index/etc -> /etc/torrust/index +``` + +> NOTE: You can adjust this mapping for your preference, however this mapping is the default in our guides and scripts. + +### Pre-Create Host-Mapped Folders: +Please run this command where you wish to run the container: + +```sh +mkdir -p ./storage/index/lib/ ./storage/index/log/ ./storage/index/etc/ +``` + +### Matching Ownership ID's of Host Storage and Container Volumes +It is important that the `torrust` user has the same uid `$(id -u)` as the host mapped folders. In our [entry script](../share/container/entry_script_sh), installed to `/usr/local/bin/entry.sh` inside the container, switches to the `torrust` user created based upon the `USER_UID` environmental variable. + +When running the container, you may use the `--env USER_ID="$(id -u)"` argument that gets the current user-id and passes to the container. + +### Mapped Tree Structure +Using the standard mapping defined above produces this following mapped tree: + +```s +storage/index/ +├── lib +│ ├── database +│ │   └── sqlite3.db => /var/lib/torrust/index/database/sqlite3.db [auto populated] +│ └── tls +│ ├── localhost.crt => /var/lib/torrust/index/tls/localhost.crt [user supplied] +│ └── localhost.key => /var/lib/torrust/index/tls/localhost.key [user supplied] +├── log => /var/log/torrust/index (future use) +└── etc + └── index.toml => /etc/torrust/index/index.toml [auto populated] +``` + +> NOTE: you only need the `tls` directory and certificates in case you have enabled SSL. + +## Building the Container + +### Clone and Change into Repository + +```sh +# Inside your dev folder +git clone https://github.com/torrust/torrust-index.git; cd torrust-index +``` + +### (Docker) Setup Context +Before starting, if you are using docker, it is helpful to reset the context to the default: + +```sh +docker context use default +``` + +### (Docker) Build + +```sh +# Release Mode +docker build --target release --tag torrust-index:release --file Containerfile . + +# Debug Mode +docker build --target debug --tag torrust-index:debug --file Containerfile . +``` + +### (Podman) Build + +```sh +# Release Mode +podman build --target release --tag torrust-index:release --file Containerfile . + +# Debug Mode +podman build --target debug --tag torrust-index:debug --file Containerfile . +``` + +## Running the Container + +### Basic Run +No arguments are needed for simply checking the container image works: + +#### (Docker) Run Basic + +```sh +# Release Mode +docker run -it torrust-index:release + +# Debug Mode +docker run -it torrust-index:debug +``` +#### (Podman) Run Basic + +```sh +# Release Mode +podman run -it torrust-index:release + +# Debug Mode +podman run -it torrust-index:debug +``` + +### Arguments +The arguments need to be placed before the image tag. i.e. + +`run [arguments] torrust-index:release` + +#### Environmental Variables: +Environmental variables are loaded through the `--env`, in the format `--env VAR="value"`. + +The following environmental variables can be set: + +- `TORRUST_INDEX_PATH_CONFIG` - The in-container path to the index configuration file, (default: `"/etc/torrust/index/index.toml"`). +- `TORRUST_INDEX_TRACKER_API_TOKEN` - Override of the admin token. If set, this value overrides any value set in the config. +- `TORRUST_INDEX_DATABASE_DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_INDEX_CONFIG` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_INDEX_CONFIG=$(cat index-index.toml)`). +- `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). +- `API_PORT` - The port for the index API. This should match the port used in the configuration, (default `3001`). + + +### Sockets +Socket ports used internally within the container can be mapped to with the `--publish` argument. + +The format is: `--publish [optional_host_ip]:[host_port]:[container_port]/[optional_protocol]`, for example: `--publish 127.0.0.1:8080:80/tcp`. + +The default ports can be mapped with the following: + +```s +--publish 0.0.0.0:3001:3001/tcp +``` + +> NOTE: Inside the container it is necessary to expose a socket with the wildcard address `0.0.0.0` so that it may be accessible from the host. Verify that the configuration that the sockets are wildcard. + +### Volumes +By default the container will use install volumes for `/var/lib/torrust/index`, `/var/log/torrust/index`, and `/etc/torrust/index`, however for better administration it good to make these volumes host-mapped. + +The argument to host-map volumes is `--volume`, with the format: `--volume=[host-src:]container-dest[:]`. + +The default mapping can be supplied with the following arguments: + +```s +--volume ./storage/index/lib:/var/lib/torrust/index:Z \ +--volume ./storage/index/log:/var/log/torrust/index:Z \ +--volume ./storage/index/etc:/etc/torrust/index:Z \ +``` + + +Please not the `:Z` at the end of the podman `--volume` mapping arguments, this is to give read-write permission on SELinux enabled systemd, if this doesn't work on your system, you can use `:rw` instead. + +## Complete Example: + +### With Docker + +```sh +## Setup Docker Default Context +docker context use default + +## Build Container Image +docker build --target release --tag torrust-index:release --file Containerfile . + +## Setup Mapped Volumes +mkdir -p ./storage/index/lib/ ./storage/index/log/ ./storage/index/etc/ + +## Run Torrust Index Container Image +docker run -it \ + --env TORRUST_INDEX_TRACKER_API_TOKEN="MySecretToken" \ + --env USER_ID="$(id -u)" \ + --publish 0.0.0.0:3001:3001/tcp \ + --volume ./storage/index/lib:/var/lib/torrust/index:Z \ + --volume ./storage/index/log:/var/log/torrust/index:Z \ + --volume ./storage/index/etc:/etc/torrust/index:Z \ + torrust-index:release +``` + +### With Podman + +```sh +## Build Container Image +podman build --target release --tag torrust-index:release --file Containerfile . + +## Setup Mapped Volumes +mkdir -p ./storage/index/lib/ ./storage/index/log/ ./storage/index/etc/ + +## Run Torrust Index Container Image +podman run -it \ + --env TORRUST_INDEX_TRACKER_API_TOKEN="MySecretToken" \ + --env USER_ID="$(id -u)" \ + --publish 0.0.0.0:3001:3001/tcp \ + --volume ./storage/index/lib:/var/lib/torrust/index:Z \ + --volume ./storage/index/log:/var/log/torrust/index:Z \ + --volume ./storage/index/etc:/etc/torrust/index:Z \ + torrust-index:release +``` diff --git a/img/Torrust_Repo_BackEnd_Readme_Header-20220615.jpg b/docs/images/Torrust_Repo_BackEnd_Readme_Header-20220615.jpg similarity index 100% rename from img/Torrust_Repo_BackEnd_Readme_Header-20220615.jpg rename to docs/images/Torrust_Repo_BackEnd_Readme_Header-20220615.jpg diff --git a/docs/licenses/LICENSE-AGPL_3_0 b/docs/licenses/LICENSE-AGPL_3_0 new file mode 100644 index 00000000..2beb9e16 --- /dev/null +++ b/docs/licenses/LICENSE-AGPL_3_0 @@ -0,0 +1,662 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. + diff --git a/licensing/old_commits/mit-0.md b/docs/licenses/LICENSE-MIT_0 similarity index 95% rename from licensing/old_commits/mit-0.md rename to docs/licenses/LICENSE-MIT_0 index e08ee6e9..fc06cc4f 100644 --- a/licensing/old_commits/mit-0.md +++ b/docs/licenses/LICENSE-MIT_0 @@ -1,4 +1,4 @@ -# Copyright 2021 Nautilus Cyberneering GmbH +MIT No Attribution Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software diff --git a/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent new file mode 100644 index 00000000..1a08a811 Binary files /dev/null and b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent differ diff --git a/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json new file mode 100644 index 00000000..caaa1a41 --- /dev/null +++ b/docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent.json @@ -0,0 +1,10 @@ +{ + "created by": "qBittorrent v4.4.1", + "creation date": 1679674628, + "info": { + "length": 172204, + "name": "mandelbrot_2048x2048.png", + "piece length": 16384, + "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" + } +} \ No newline at end of file diff --git a/docs/media/torrust_logo.png b/docs/media/torrust_logo.png new file mode 100644 index 00000000..28abf137 Binary files /dev/null and b/docs/media/torrust_logo.png differ diff --git a/docs/release_process.md b/docs/release_process.md new file mode 100644 index 00000000..da94ea6b --- /dev/null +++ b/docs/release_process.md @@ -0,0 +1,95 @@ +# Torrust Index Release Process (v2.2.2) + +## Version: +> **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** +> +> *The `develop` branch uses the (semantic version) suffix `-develop`.* + +## Process: + +**Note**: this guide assumes that the your git `torrust` remote is like this: + +```sh +git remote show torrust +``` + +```s +* remote torrust + Fetch URL: git@github.com:torrust/torrust-index.git + Push URL: git@github.com:torrust/torrust-index.git +... +``` + + +### 1. The `develop` branch is ready for a release. +The `develop` branch should have the version `[semantic version]-develop` that is ready to be released. + +### 2. Stage `develop` HEAD for merging into the `main` branch: + +```sh +git fetch --all +git push --force torrust develop:staging/main +``` + +### 3. Create Release Commit: + +```sh +git stash +git switch staging/main +git reset --hard torrust/staging/main +# change `[semantic version]-develop` to `[semantic version]`. +git add -A +git commit -m "release: version [semantic version]" +git push torrust +``` + +### 4. Create and Merge Pull Request from `staging/main` into `main` branch. + +Pull request title format: "Release Version `[semantic version]`". + +This pull request merges the new version into the `main` branch. + +### 5. Push new version from `main` HEAD to `releases/v[semantic version]` branch: + +```sh +git fetch --all +git push torrust main:releases/v[semantic version] +``` + +> **Check that the deployment is successful!** + +### 6. Create Release Tag: + +```sh +git switch releases/v[semantic version] +git tag --sign v[semantic version] +git push --tags torrust +``` + +### 7. Create Release on Github from Tag. +This is for those who wish to download the source code. + +### 8. Stage `main` HEAD for merging into the `develop` branch: +Merge release back into the develop branch. + +```sh +git fetch --all +git push --force torrust main:staging/develop +``` +### 9. Create Comment that bumps next development version: + +```sh +git stash +git switch staging/develop +git reset --hard torrust/staging/develop +# change `[semantic version]` to `(next)[semantic version]-develop`. +git add -A +git commit -m "develop: bump to version (next)[semantic version]-develop" +git push torrust +``` + +### 10. Create and Merge Pull Request from `staging/develop` into `develop` branch. + +Pull request title format: "Version `[semantic version]` was Released". + +This pull request merges the new release into the `develop` branch and bumps the version number. diff --git a/licensing/agpl-3.0.md b/licensing/agpl-3.0.md deleted file mode 100644 index f2a1b1b6..00000000 --- a/licensing/agpl-3.0.md +++ /dev/null @@ -1,660 +0,0 @@ -# GNU AFFERO GENERAL PUBLIC LICENSE - -Version 3, 19 November 2007 - -Copyright (C) 2007 Free Software Foundation, Inc. - - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -## Preamble - -The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - -The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains -free software for all its users. - -When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - -Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - -A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - -The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - -An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing -under this license. - -The precise terms and conditions for copying, distribution and -modification follow. - -## TERMS AND CONDITIONS - -### 0. Definitions - -"This License" refers to version 3 of the GNU Affero General Public -License. - -"Copyright" also means copyright-like laws that apply to other kinds -of works, such as semiconductor masks. - -"The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - -To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of -an exact copy. The resulting work is called a "modified version" of -the earlier work or a work "based on" the earlier work. - -A "covered work" means either the unmodified Program or a work based -on the Program. - -To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - -To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user -through a computer network, with no transfer of a copy, is not -conveying. - -An interactive user interface displays "Appropriate Legal Notices" to -the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - -### 1. Source Code - -The "source code" for a work means the preferred form of the work for -making modifications to it. "Object code" means any non-source form of -a work. - -A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - -The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - -The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can -regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same -work. - -### 2. Basic Permissions - -All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, -without conditions so long as your license otherwise remains in force. -You may convey covered works to others for the sole purpose of having -them make modifications exclusively for you, or provide you with -facilities for running those works, provided that you comply with the -terms of this License in conveying all material for which you do not -control copyright. Those thus making or running the covered works for -you must do so exclusively on your behalf, under your direction and -control, on terms that prohibit them from making any copies of your -copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the -conditions stated below. Sublicensing is not allowed; section 10 makes -it unnecessary. - -### 3. Protecting Users' Legal Rights From Anti-Circumvention Law - -No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - -When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such -circumvention is effected by exercising rights under this License with -respect to the covered work, and you disclaim any intention to limit -operation or modification of the work as a means of enforcing, against -the work's users, your or third parties' legal rights to forbid -circumvention of technological measures. - -### 4. Conveying Verbatim Copies - -You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - -### 5. Conveying Modified Source Versions - -You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these -conditions: - -- a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. -- b) The work must carry prominent notices stating that it is - released under this License and any conditions added under - section 7. This requirement modifies the requirement in section 4 - to "keep intact all notices". -- c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. -- d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - -A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - -### 6. Conveying Non-Source Forms - -You may convey a covered work in object code form under the terms of -sections 4 and 5, provided that you also convey the machine-readable -Corresponding Source under the terms of this License, in one of these -ways: - -- a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. -- b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the Corresponding - Source from a network server at no charge. -- c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. -- d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. -- e) Convey the object code using peer-to-peer transmission, - provided you inform other peers where the object code and - Corresponding Source of the work are being offered to the general - public at no charge under subsection 6d. - -A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - -A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, -family, or household purposes, or (2) anything designed or sold for -incorporation into a dwelling. In determining whether a product is a -consumer product, doubtful cases shall be resolved in favor of -coverage. For a particular product received by a particular user, -"normally used" refers to a typical or common use of that class of -product, regardless of the status of the particular user or of the way -in which the particular user actually uses, or expects or is expected -to use, the product. A product is a consumer product regardless of -whether the product has substantial commercial, industrial or -non-consumer uses, unless such uses represent the only significant -mode of use of the product. - -"Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to -install and execute modified versions of a covered work in that User -Product from a modified version of its Corresponding Source. The -information must suffice to ensure that the continued functioning of -the modified object code is in no case prevented or interfered with -solely because modification has been made. - -If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - -The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or -updates for a work that has been modified or installed by the -recipient, or for the User Product in which it has been modified or -installed. Access to a network may be denied when the modification -itself materially and adversely affects the operation of the network -or violates the rules and protocols for communication across the -network. - -Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - -### 7. Additional Terms - -"Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders -of that material) supplement the terms of this License with terms: - -- a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or -- b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or -- c) Prohibiting misrepresentation of the origin of that material, - or requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or -- d) Limiting the use for publicity purposes of names of licensors - or authors of the material; or -- e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or -- f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions - of it) with contractual assumptions of liability to the recipient, - for any liability that these contractual assumptions directly - impose on those licensors and authors. - -All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; the -above requirements apply either way. - -### 8. Termination - -You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - -However, if you cease all violation of this License, then your license -from a particular copyright holder is reinstated (a) provisionally, -unless and until the copyright holder explicitly and finally -terminates your license, and (b) permanently, if the copyright holder -fails to notify you of the violation by some reasonable means prior to -60 days after the cessation. - -Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - -Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - -### 9. Acceptance Not Required for Having Copies - -You are not required to accept this License in order to receive or run -a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - -### 10. Automatic Licensing of Downstream Recipients - -Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - -An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - -### 11. Patents - -A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - -A contributor's "essential patent claims" are all patent claims owned -or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - -In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - -If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - -A patent license is "discriminatory" if it does not include within the -scope of its coverage, prohibits the exercise of, or is conditioned on -the non-exercise of one or more of the rights that are specifically -granted under this License. You may not convey a covered work if you -are a party to an arrangement with a third party that is in the -business of distributing software, under which you make payment to the -third party based on the extent of your activity of conveying the -work, and under which the third party grants, to any of the parties -who would receive the covered work from you, a discriminatory patent -license (a) in connection with copies of the covered work conveyed by -you (or copies made from those copies), or (b) primarily for and in -connection with specific products or compilations that contain the -covered work, unless you entered into that arrangement, or that patent -license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - -### 12. No Surrender of Others' Freedom - -If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under -this License and any other pertinent obligations, then as a -consequence you may not convey it at all. For example, if you agree to -terms that obligate you to collect a royalty for further conveying -from those to whom you convey the Program, the only way you could -satisfy both those terms and this License would be to refrain entirely -from conveying the Program. - -### 13. Remote Network Interaction; Use with the GNU General Public License - -Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your -version supports such interaction) an opportunity to receive the -Corresponding Source of your version by providing access to the -Corresponding Source from a network server at no charge, through some -standard or customary means of facilitating copying of software. This -Corresponding Source shall include the Corresponding Source for any -work covered by version 3 of the GNU General Public License that is -incorporated pursuant to the following paragraph. - -Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - -### 14. Revised Versions of this License - -The Free Software Foundation may publish revised and/or new versions -of the GNU Affero General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever -published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions -of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - -Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - -### 15. Disclaimer of Warranty - -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT -WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND -PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE -DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR -CORRECTION. - -### 16. Limitation of Liability - -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR -CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES -ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT -NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR -LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM -TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER -PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -### 17. Interpretation of Sections 15 and 16 - -If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - -END OF TERMS AND CONDITIONS - -## How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these -terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively state -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as - published by the Free Software Foundation, either version 3 of the - License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper -mail. - -If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for -the specific requirements. - -You should also get your employer (if you work as a programmer) or -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. For more information on this, and how to apply and follow -the GNU AGPL, see . diff --git a/licensing/cc-by-sa.md b/licensing/cc-by-sa.md deleted file mode 100644 index d9eb1cc2..00000000 --- a/licensing/cc-by-sa.md +++ /dev/null @@ -1,175 +0,0 @@ -# Creative Commons Attribution-ShareAlike 4.0 International - - - -Creative Commons Corporation (“Creative Commonsâ€) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is†basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. - -## Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. - -* __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). - -* __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). - -## Creative Commons Attribution-ShareAlike 4.0 International Public License - -By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. - -### Section 1 – Definitions - -a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. - -b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. - -c. __BY-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License. - -d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. - -e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. - -f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. - -g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. - -h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. - -i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. - -j. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. - -k. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. - -l. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. - -m. __You__ means the individual or entity exercising the Licensed Rights under this Public License. __Your__ has a corresponding meaning. - -### Section 2 – Scope - -a. ___License grant.___ - - 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: - - A. reproduce and Share the Licensed Material, in whole or in part; and - - B. produce, reproduce, and Share Adapted Material. - - 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. - - 3. __Term.__ The term of this Public License is specified in Section 6(a). - - 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. - - 5. __Downstream recipients.__ - - A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. - - B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. - - C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. - - 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). - -b. ___Other rights.___ - - 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this Public License. - - 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. - -### Section 3 – License Conditions - -Your exercise of the Licensed Rights is expressly made subject to the following conditions. - -a. ___Attribution.___ - - 1. If You Share the Licensed Material (including in modified form), You must: - - A. retain the following if it is supplied by the Licensor with the Licensed Material: - - i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of warranties; - - v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; - - B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and - - C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. - - 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. - -b. ___ShareAlike.___ - -In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. - -1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. - -2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. - -3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. - -### Section 4 – Sui Generis Database Rights - -Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: - -a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; - -b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and - -c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. - -### Section 5 – Disclaimer of Warranties and Limitation of Liability - -a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ - -b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ - -c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. - -### Section 6 – Term and Termination - -a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. - -b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. - -c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. - -d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. - -### Section 7 – Other Terms and Conditions - -a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. - -b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. - -### Section 8 – Interpretation - -a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. - -b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. - -c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. - -d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. - -> Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.†The text of the Creative Commons public licenses is dedicated to the public domain under the [CC0 Public Domain Dedication](https://creativecommons.org/publicdomain/zero/1.0/legalcode). Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons†or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. -> -> Creative Commons may be contacted at creativecommons.org. - - diff --git a/licensing/contributor_agreement_v01.md b/licensing/contributor_agreement_v01.md deleted file mode 100644 index 13397c61..00000000 --- a/licensing/contributor_agreement_v01.md +++ /dev/null @@ -1,103 +0,0 @@ -# The Torrust Contributor Agreement - - - -[Version 2021-11-10] - -## Goal - -We require that contributors to Torrust (as defined below) agree to this Torrust Contributor Agreement (NCCA) to ensure that contributions to Torrust have acceptable licensing terms. - -## Non-Goals - -The NCCA is _not_ a copyright assignment agreement. - -The NCCA does _not_ somehow supersede the existing licensing terms that apply to Torrust contributions. There are two important subpoints here. First, the NCCA does not apply to upstream code (or other material) that you didn't write; indeed, it would be preposterous for it to attempt to do so. Note the narrow way in which we have defined capital-c "Contribution". - -Second, the main provision of the NCCA specifies that a default license will apply to code that you wrote, but only to the extent that you have not bothered to put an explicit license on it. Therefore, the NCCA is _not_ some sort of special permissive license granted to any party, despite the explicit choice of a more restrictive license by you or by upstream developers. - -## Terms - -### Section 1 – Definitions - -__"Acceptable License For Torrust"__ means a license selected from the appropriate categorical sublist of the full list of acceptable licenses for Torrust, currently located at , as that list may be revised from time to time by Torrust. "Acceptable Licenses For Torrust" means that full list. - -__"AGPLv3"__ means the license identified as "Affero General Public License 3.0", as published at . - -__"CC-0"__ means the Creative Commons 1.0 Universal license, as published at . - -__"CC-BY-SA"__ means the Creative Commons Attribution-ShareAlike 4.0 International license, as published at . - -__"Code"__ means (i) software code, (ii) any other functional material whose principal purpose is to control or facilitate the building of packages, such as an RPM spec file, (iii) font files, and (iv) other kinds of copyrightable material that the Torrust has classified as "code" rather than "content". - -__"Content"__ means any copyrightable material that is not Code, such as, without limitation, (i) non-functional data sets, (ii) documentation, (iii) wiki edits, (iv) music files, (v) graphic image files, (vi) help files, and (vii) other kinds of copyrightable material that the Torrust Council has classified as "content" rather than "code". - -__"Contribution"__ means a Work that You created, excluding any portion that was created by someone else. (For example, if You Submit a package to Torrust, the spec file You write may be a Contribution, but all the upstream code in the associated Package that You did not write is not a Contribution for purposes of this NCCA.) A Contribution consists either of Code or of Content. - -__"Current Default License"__, with respect to a Contribution, means (i) if the Contribution is Code, the AGPLv3 License, and (ii) if the Contribution is Content, CC-BY-SA. - -__"Future Public Domain License"__, with respect to a Contribution, means (i) if the Contribution is Code, the MIT-0 License, and (ii) if the Contribution is Content, CC-0. - -__"Licensed"__ means covered by explicit licensing terms that are conspicuous and readily discernible to recipients. -"Submit" means to use some mode of digital communication (for example, without limitation, mailing lists, bug tracking systems, and source code version control systems administered by Torrust) to voluntarily provide a Contribution to Torrust. -"Unlicensed" means not Licensed. - -__"MIT-0"__ means the license identified as "MIT No Attribution", as published at . - -__"Torrust"__ means the community project led by the Torrust . -"Torrust Community" means (i) all Torrust participants, and (ii) all persons receiving Contributions directly or indirectly from or through Torrust. - -__"Work"__ means a copyrightable work of authorship. A Work may be a portion of a larger Work, and a Work may be a modification of or addition to another Work. "You" means the individual accepting this instance of the NCCA. - -### Section 2 – List of Acceptable Licenses for Torrust - -- CC-BY-SA -- CC-0 -- AGPLv3 -- MIT-0 - -### Section 3 – Copyright Permission Required for All Contributions - -If You are not the copyright holder of a given Contribution that You wish to Submit to Torrust (for example, if Your employer or university holds copyright in it), it is Your responsibility to first obtain authorization from the copyright holder to Submit the Contribution under the terms of this NCCA on behalf of, or otherwise with the permission of, that copyright holder. One form of such authorization is for the copyright holder to place, or permit You to place, an Acceptable License For Torrust on the Contribution. - -### Section 4 – Licensed Contributions - -If Your Contribution is Licensed, Your Contribution will be governed by the terms under which it has been licensed. - -### Section 5 – Default Licensing of Unlicensed Contributions - -If You Submit an Unlicensed Contribution to Torrust, the license to the Torrust Community for that Contribution shall be the Current Default License. - -The Torrust may, by public announcement, subsequently designate an additional or alternative default license for a given category of Contribution (a "Later Default License"). A Later Default License shall be chosen from the appropriate categorical sublist of Acceptable Licenses For Torrust. -Once a Later Default License has been designated, Your Unlicensed Contribution shall also be licensed to the Torrust Community under that Later Default License. Such designation shall not affect the continuing applicability of the Current Default License to Your Contribution. - -You consent to having Torrust provide reasonable notice of Your licensing of Your Contribution under the Current Default License (and, if applicable, a Later Default License) in a manner determined by Torrust. - -### Section 6 – Automatic Future Public Domain License - -You consent that your contribution under the Current Default License is granted the Future Public Domain License __automatically__ after 5 years of submission. - -### Section 7 – Public Domain United States Government Works - -Sections 3 through 6 of this NCCA do not apply to any Contribution to the extent that it is a work of the United States Government for which copyright is unavailable under 17 U.S.C. 105. - -### Section 8 – Acceptance - -You must signify Your assent to the terms of this NCCA through specific electronic means established by Torrust. - -You may also, at Your option, and without eliminating the requirement set forth in the preceding paragraph, send a copy of this NCCA, bearing Your written signature indicating Your acceptance of its terms, by email to legal@torrust.com, or by postal mail to: - - Torrust Legal - c/o Nautilus Cyberneering GmbH - Oberhachingerstr. 46B - 2031 Grünwald - Germany - -### Section 9 – Notes - -This document is based upon: - -[The Fedora Project Contributor Agreement](https://fedoraproject.org/w/index.php?title=Legal:Fedora_Project_Contributor_Agreement&oldid=629385). -[Version 2021-05-04] - - diff --git a/licensing/file_header_agplv3.txt b/licensing/file_header_agplv3.txt deleted file mode 100644 index 0fe415a5..00000000 --- a/licensing/file_header_agplv3.txt +++ /dev/null @@ -1,21 +0,0 @@ - Torrust Index - - Project owner: Nautilus Cyberneering GmbH. - Github repository: https://github.com/torrust/torrust - Project description: - Torrust is a suite of client-server software for hosting online torrent indexes. - - Copyright (C) 2021 Nautilus Cyberneering GmbH - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as - published by the Free Software Foundation, either version 3 of the - License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . diff --git a/licensing/old_commits/cc0.md b/licensing/old_commits/cc0.md deleted file mode 100644 index 2b04180a..00000000 --- a/licensing/old_commits/cc0.md +++ /dev/null @@ -1,45 +0,0 @@ -# Creative Commons CC0 1.0 Universal - - - -CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER. - -## Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights. - -1. __Copyright and Related Rights.__ A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work; - - ii. moral rights retained by the original author(s) and/or performer(s); - - iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work; - - iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below; - - v. rights protecting the extraction, dissemination, use and reuse of data in a Work; - - vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and - - vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof. - -2. __Waiver.__ To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose. - -3. __Public License Fallback.__ Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose. - -4. __Limitations and Disclaimers.__ - - a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document. - - b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law. - - c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work. - - d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. - - diff --git a/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql new file mode 100644 index 00000000..92949e96 --- /dev/null +++ b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users CHANGE date_registered date_registered DATETIME DEFAULT NULL \ No newline at end of file diff --git a/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql b/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql new file mode 100644 index 00000000..352a5e8f --- /dev/null +++ b/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users ADD COLUMN date_imported DATETIME DEFAULT NULL \ No newline at end of file diff --git a/migrations/mysql/20230321122049_torrust_torrent_tags.sql b/migrations/mysql/20230321122049_torrust_torrent_tags.sql new file mode 100644 index 00000000..6205d59a --- /dev/null +++ b/migrations/mysql/20230321122049_torrust_torrent_tags.sql @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_tags ( + tag_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(255) NOT NULL, + date_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); diff --git a/migrations/mysql/20230321122825_torrust_torrent_tag_links.sql b/migrations/mysql/20230321122825_torrust_torrent_tag_links.sql new file mode 100644 index 00000000..f23cf89c --- /dev/null +++ b/migrations/mysql/20230321122825_torrust_torrent_tag_links.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_tag_links ( + torrent_id INTEGER NOT NULL, + tag_id INTEGER NOT NULL, + FOREIGN KEY (torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES torrust_torrent_tags(tag_id) ON DELETE CASCADE, + PRIMARY KEY (torrent_id, tag_id) +); diff --git a/migrations/mysql/20230627103405_torrust_allow_null_categories.sql b/migrations/mysql/20230627103405_torrust_allow_null_categories.sql new file mode 100644 index 00000000..9a540278 --- /dev/null +++ b/migrations/mysql/20230627103405_torrust_allow_null_categories.sql @@ -0,0 +1,6 @@ +-- Step 1: Allow null categories for torrents +ALTER TABLE torrust_torrents MODIFY category_id INTEGER NULL; + +-- Step 2: Set torrent category to NULL when category is deleted +ALTER TABLE `torrust_torrents` DROP FOREIGN KEY `torrust_torrents_ibfk_2`; +ALTER TABLE `torrust_torrents` ADD CONSTRAINT `torrust_torrents_ibfk_2` FOREIGN KEY (`category_id`) REFERENCES `torrust_categories` (`category_id`) ON DELETE SET NULL; diff --git a/migrations/mysql/20230627144318_torrust_covert_infohashes_to_lowercase.sql b/migrations/mysql/20230627144318_torrust_covert_infohashes_to_lowercase.sql new file mode 100644 index 00000000..7014ed01 --- /dev/null +++ b/migrations/mysql/20230627144318_torrust_covert_infohashes_to_lowercase.sql @@ -0,0 +1 @@ +UPDATE torrust_torrents SET info_hash = LOWER(info_hash); \ No newline at end of file diff --git a/migrations/mysql/20230803160604_torrust_torrents_add_source.sql b/migrations/mysql/20230803160604_torrust_torrents_add_source.sql new file mode 100644 index 00000000..5bee0b38 --- /dev/null +++ b/migrations/mysql/20230803160604_torrust_torrents_add_source.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_torrents ADD COLUMN source TEXT DEFAULT NULL diff --git a/migrations/mysql/20230824164316_torrust_torrents_add_original_info_hash.sql b/migrations/mysql/20230824164316_torrust_torrents_add_original_info_hash.sql new file mode 100644 index 00000000..e81cb96c --- /dev/null +++ b/migrations/mysql/20230824164316_torrust_torrents_add_original_info_hash.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_torrents ADD COLUMN original_info_hash TEXT DEFAULT NULL \ No newline at end of file diff --git a/migrations/mysql/20230905091837_torrust_multiple_original_infohashes.sql b/migrations/mysql/20230905091837_torrust_multiple_original_infohashes.sql new file mode 100644 index 00000000..e11a1052 --- /dev/null +++ b/migrations/mysql/20230905091837_torrust_multiple_original_infohashes.sql @@ -0,0 +1,46 @@ +-- Step 1: Create a new table with all infohashes +CREATE TABLE torrust_torrent_info_hashes ( + info_hash CHAR(40) NOT NULL, + canonical_info_hash CHAR(40) NOT NULL, + original_is_known BOOLEAN NOT NULL, + PRIMARY KEY(info_hash), + FOREIGN KEY(canonical_info_hash) REFERENCES torrust_torrents(info_hash) ON DELETE CASCADE +); + +-- Step 2: Create one record for each torrent with only the canonical infohash. +-- The original infohash is NULL so we do not know if it was the same. +-- This happens if the uploaded torrent was uploaded before introducing +-- the feature to store the original infohash +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT info_hash, info_hash, FALSE + FROM torrust_torrents + WHERE original_info_hash IS NULL; + +-- Step 3: Create one record for each torrent with the same original and +-- canonical infohashes. +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT info_hash, info_hash, TRUE + FROM torrust_torrents + WHERE original_info_hash IS NOT NULL + AND info_hash = original_info_hash; + +-- Step 4: Create two records for each torrent with a different original and +-- canonical infohashes. One record with the same original and canonical +-- infohashes and one record with the original infohash and the canonical +-- one. +-- Insert the canonical infohash +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT info_hash, info_hash, TRUE + FROM torrust_torrents + WHERE original_info_hash IS NOT NULL + AND info_hash != original_info_hash; +-- Insert the original infohash pointing to the canonical +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT original_info_hash, info_hash, TRUE + FROM torrust_torrents + WHERE original_info_hash IS NOT NULL + AND info_hash != original_info_hash; + +-- Step 5: Delete the `torrust_torrents::original_info_hash` column +ALTER TABLE torrust_torrents DROP COLUMN original_info_hash; + diff --git a/migrations/mysql/20230914155441_torrust_no_duplicate_tags.sql b/migrations/mysql/20230914155441_torrust_no_duplicate_tags.sql new file mode 100644 index 00000000..6b513fbe --- /dev/null +++ b/migrations/mysql/20230914155441_torrust_no_duplicate_tags.sql @@ -0,0 +1,12 @@ +-- Step 1 & 2: Identify and update the duplicate names +UPDATE torrust_torrent_tags +JOIN ( + SELECT name + FROM torrust_torrent_tags + GROUP BY name + HAVING COUNT(*) > 1 +) AS DuplicateNames ON torrust_torrent_tags.name = DuplicateNames.name +SET torrust_torrent_tags.name = CONCAT(torrust_torrent_tags.name, '_', torrust_torrent_tags.tag_id); + +-- Step 3: Add the UNIQUE constraint to the name column +ALTER TABLE torrust_torrent_tags ADD UNIQUE (name); diff --git a/migrations/mysql/20230918103654_torrust_add_comment_field_to_torrent.sql b/migrations/mysql/20230918103654_torrust_add_comment_field_to_torrent.sql new file mode 100644 index 00000000..2ecee2a9 --- /dev/null +++ b/migrations/mysql/20230918103654_torrust_add_comment_field_to_torrent.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_torrents ADD COLUMN comment TEXT NULL; diff --git a/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql new file mode 100644 index 00000000..5757849c --- /dev/null +++ b/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS torrust_users_new ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + date_registered TEXT DEFAULT NULL, + administrator BOOL NOT NULL DEFAULT FALSE +); + +INSERT INTO torrust_users_new SELECT * FROM torrust_users; + +DROP TABLE torrust_users; + +ALTER TABLE torrust_users_new RENAME TO torrust_users \ No newline at end of file diff --git a/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql b/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql new file mode 100644 index 00000000..96dddd2f --- /dev/null +++ b/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users ADD COLUMN date_imported TEXT DEFAULT NULL \ No newline at end of file diff --git a/migrations/sqlite3/20230321122049_torrust_torrent_tags.sql b/migrations/sqlite3/20230321122049_torrust_torrent_tags.sql new file mode 100644 index 00000000..0f71de15 --- /dev/null +++ b/migrations/sqlite3/20230321122049_torrust_torrent_tags.sql @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_tags ( + tag_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(255) NOT NULL, + date_created TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); diff --git a/migrations/sqlite3/20230321122825_torrust_torrent_tag_links.sql b/migrations/sqlite3/20230321122825_torrust_torrent_tag_links.sql new file mode 100644 index 00000000..f23cf89c --- /dev/null +++ b/migrations/sqlite3/20230321122825_torrust_torrent_tag_links.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_tag_links ( + torrent_id INTEGER NOT NULL, + tag_id INTEGER NOT NULL, + FOREIGN KEY (torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + FOREIGN KEY (tag_id) REFERENCES torrust_torrent_tags(tag_id) ON DELETE CASCADE, + PRIMARY KEY (torrent_id, tag_id) +); diff --git a/migrations/sqlite3/20230627103405_torrust_allow_null_categories.sql b/migrations/sqlite3/20230627103405_torrust_allow_null_categories.sql new file mode 100644 index 00000000..f2c2b13e --- /dev/null +++ b/migrations/sqlite3/20230627103405_torrust_allow_null_categories.sql @@ -0,0 +1,28 @@ +-- Step 1: Create a new table with the new structure +CREATE TABLE IF NOT EXISTS "torrust_torrents_new" ( + "torrent_id" INTEGER NOT NULL, + "uploader_id" INTEGER NOT NULL, + "category_id" INTEGER NULL, + "info_hash" TEXT NOT NULL UNIQUE, + "size" INTEGER NOT NULL, + "name" TEXT NOT NULL, + "pieces" TEXT NOT NULL, + "piece_length" INTEGER NOT NULL, + "private" BOOLEAN DEFAULT NULL, + "root_hash" INT NOT NULL DEFAULT 0, + "date_uploaded" TEXT NOT NULL, + FOREIGN KEY("uploader_id") REFERENCES "torrust_users"("user_id") ON DELETE CASCADE, + FOREIGN KEY("category_id") REFERENCES "torrust_categories"("category_id") ON DELETE SET NULL, + PRIMARY KEY("torrent_id" AUTOINCREMENT) +); + +-- Step 2: Copy rows from the current table to the new table +INSERT INTO torrust_torrents_new (torrent_id, uploader_id, category_id, info_hash, size, name, pieces, piece_length, private, root_hash, date_uploaded) +SELECT torrent_id, uploader_id, category_id, info_hash, size, name, pieces, piece_length, private, root_hash, date_uploaded +FROM torrust_torrents; + +-- Step 3: Delete the current table +DROP TABLE torrust_torrents; + +-- Step 1: Rename the new table +ALTER TABLE torrust_torrents_new RENAME TO torrust_torrents; diff --git a/migrations/sqlite3/20230627144318_torrust_covert_infohashes_to_lowercase.sql b/migrations/sqlite3/20230627144318_torrust_covert_infohashes_to_lowercase.sql new file mode 100644 index 00000000..7014ed01 --- /dev/null +++ b/migrations/sqlite3/20230627144318_torrust_covert_infohashes_to_lowercase.sql @@ -0,0 +1 @@ +UPDATE torrust_torrents SET info_hash = LOWER(info_hash); \ No newline at end of file diff --git a/migrations/sqlite3/20230803160604_torrust_torrents_add_source.sql b/migrations/sqlite3/20230803160604_torrust_torrents_add_source.sql new file mode 100644 index 00000000..5bee0b38 --- /dev/null +++ b/migrations/sqlite3/20230803160604_torrust_torrents_add_source.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_torrents ADD COLUMN source TEXT DEFAULT NULL diff --git a/migrations/sqlite3/20230824164316_torrust_torrents_add_original_info_hash.sql b/migrations/sqlite3/20230824164316_torrust_torrents_add_original_info_hash.sql new file mode 100644 index 00000000..e81cb96c --- /dev/null +++ b/migrations/sqlite3/20230824164316_torrust_torrents_add_original_info_hash.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_torrents ADD COLUMN original_info_hash TEXT DEFAULT NULL \ No newline at end of file diff --git a/migrations/sqlite3/20230905091837_torrust_multiple_original_infohashes.sql b/migrations/sqlite3/20230905091837_torrust_multiple_original_infohashes.sql new file mode 100644 index 00000000..31585d83 --- /dev/null +++ b/migrations/sqlite3/20230905091837_torrust_multiple_original_infohashes.sql @@ -0,0 +1,48 @@ +-- Step 1: Create a new table with all infohashes +CREATE TABLE IF NOT EXISTS torrust_torrent_info_hashes ( + info_hash TEXT NOT NULL, + canonical_info_hash TEXT NOT NULL, + original_is_known BOOLEAN NOT NULL, + PRIMARY KEY(info_hash), + FOREIGN KEY(canonical_info_hash) REFERENCES torrust_torrents (info_hash) ON DELETE CASCADE +); + +-- Step 2: Create one record for each torrent with only the canonical infohash. +-- The original infohash is NULL so we do not know if it was the same. +-- This happens if the uploaded torrent was uploaded before introducing +-- the feature to store the original infohash +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT info_hash, info_hash, FALSE + FROM torrust_torrents + WHERE original_info_hash is NULL; + +-- Step 3: Create one record for each torrent with the same original and +-- canonical infohashes. +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT info_hash, info_hash, TRUE + FROM torrust_torrents + WHERE original_info_hash is NOT NULL + AND info_hash = original_info_hash; + +-- Step 4: Create two records for each torrent with a different original and +-- canonical infohashes. One record with the same original and canonical +-- infohashes and one record with the original infohash and the canonical +-- one. +-- Insert the canonical infohash +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT info_hash, info_hash, TRUE + FROM torrust_torrents + WHERE original_info_hash is NOT NULL + AND info_hash != original_info_hash; +-- Insert the original infohash pointing to the canonical +INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) +SELECT original_info_hash, info_hash, TRUE + FROM torrust_torrents + WHERE original_info_hash is NOT NULL + AND info_hash != original_info_hash; + +-- Step 5: Delete the `torrust_torrents::original_info_hash` column +-- SQLite 2021-03-12 (3.35.0) supports DROP COLUMN +-- https://www.sqlite.org/lang_altertable.html#alter_table_drop_column +ALTER TABLE torrust_torrents DROP COLUMN original_info_hash; + diff --git a/migrations/sqlite3/20230914155441_torrust_no_duplicate_tags.sql b/migrations/sqlite3/20230914155441_torrust_no_duplicate_tags.sql new file mode 100644 index 00000000..e16f2ef0 --- /dev/null +++ b/migrations/sqlite3/20230914155441_torrust_no_duplicate_tags.sql @@ -0,0 +1,13 @@ +-- Step 1: Identify and update the duplicate names +WITH DuplicateNames AS ( + SELECT name + FROM torrust_torrent_tags + GROUP BY name + HAVING COUNT(*) > 1 +) +UPDATE torrust_torrent_tags +SET name = name || '_' || tag_id +WHERE name IN (SELECT name FROM DuplicateNames); + +-- Step 2: Create a UNIQUE index on the name column +CREATE UNIQUE INDEX idx_unique_name ON torrust_torrent_tags(name); diff --git a/migrations/sqlite3/20230918103654_torrust_add_comment_field_to_torrent.sql b/migrations/sqlite3/20230918103654_torrust_add_comment_field_to_torrent.sql new file mode 100644 index 00000000..ff8774e2 --- /dev/null +++ b/migrations/sqlite3/20230918103654_torrust_add_comment_field_to_torrent.sql @@ -0,0 +1 @@ +ALTER TABLE "torrust_torrents" ADD COLUMN "comment" TEXT NULL; \ No newline at end of file diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml new file mode 100644 index 00000000..d8065bca --- /dev/null +++ b/packages/located-error/Cargo.toml @@ -0,0 +1,21 @@ +[package] +description = "A library to provide error decorator with the location and the source of the original error." +keywords = ["errors", "helper", "library"] +name = "torrust-index-located-error" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +log = { version = "0", features = ["release_max_level_info"] } + +[dev-dependencies] +thiserror = "1.0" diff --git a/packages/located-error/LICENSE b/packages/located-error/LICENSE new file mode 100644 index 00000000..0ad25db4 --- /dev/null +++ b/packages/located-error/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/located-error/README.md b/packages/located-error/README.md new file mode 100644 index 00000000..c3c18fa4 --- /dev/null +++ b/packages/located-error/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Located Error + +A library to provide an error decorator with the location and the source of the original error. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-located-error). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs new file mode 100644 index 00000000..bf861868 --- /dev/null +++ b/packages/located-error/src/lib.rs @@ -0,0 +1,136 @@ +//! This crate provides a wrapper around an error that includes the location of +//! the error. +//! +//! ```rust +//! use std::error::Error; +//! use std::panic::Location; +//! use std::sync::Arc; +//! use torrust_tracker_located_error::{Located, LocatedError}; +//! +//! #[derive(thiserror::Error, Debug)] +//! enum TestError { +//! #[error("Test")] +//! Test, +//! } +//! +//! #[track_caller] +//! fn get_caller_location() -> Location<'static> { +//! *Location::caller() +//! } +//! +//! let e = TestError::Test; +//! +//! let b: LocatedError = Located(e).into(); +//! let l = get_caller_location(); +//! +//! assert!(b.to_string().contains("Test, src/lib.rs")); +//! ``` +//! +//! # Credits +//! +//! +use std::error::Error; +use std::panic::Location; +use std::sync::Arc; + +/// A generic wrapper around an error. +/// +/// Where `E` is the inner error (source error). +pub struct Located(pub E); + +/// A wrapper around an error that includes the location of the error. +#[derive(Debug)] +pub struct LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + source: Arc, + location: Box>, +} + +impl<'a, E> std::fmt::Display for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}, {}", self.source, self.location) + } +} + +impl<'a, E> Error for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + Some(&self.source) + } +} + +impl<'a, E> Clone for LocatedError<'a, E> +where + E: Error + ?Sized + Send + Sync, +{ + fn clone(&self) -> Self { + LocatedError { + source: self.source.clone(), + location: self.location.clone(), + } + } +} + +#[allow(clippy::from_over_into)] +impl<'a, E> Into> for Located +where + E: Error + Send + Sync, + Arc: Clone, +{ + #[track_caller] + fn into(self) -> LocatedError<'a, E> { + let e = LocatedError { + source: Arc::new(self.0), + location: Box::new(*std::panic::Location::caller()), + }; + log::debug!("{e}"); + e + } +} + +#[allow(clippy::from_over_into)] +impl<'a> Into> for Arc { + #[track_caller] + fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { + LocatedError { + source: self, + location: Box::new(*std::panic::Location::caller()), + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::Location; + + use super::LocatedError; + use crate::Located; + + #[derive(thiserror::Error, Debug)] + enum TestError { + #[error("Test")] + Test, + } + + #[track_caller] + fn get_caller_location() -> Location<'static> { + *Location::caller() + } + + #[test] + fn error_should_include_location() { + let e = TestError::Test; + + let b: LocatedError<'_, TestError> = Located(e).into(); + let l = get_caller_location(); + + assert_eq!(b.location.file(), l.file()); + } +} diff --git a/project-words.txt b/project-words.txt new file mode 100644 index 00000000..74669479 --- /dev/null +++ b/project-words.txt @@ -0,0 +1,97 @@ +actix +addrs +alekitto +AUTOINCREMENT +bencode +bencoded +Benoit +binascii +btih +buildx +chrono +clippy +codecov +codegen +compatiblelicenses +Containerfile +creativecommons +creds +Culqt +Cyberneering +datetime +DATETIME +dockerhub +Dont +dotless +dtolnay +elif +grcov +Grünwald +hasher +Hasher +hexlify +httpseeds +ICANN +imagoodboy +imdl +indexadmin +indexmap +infohash +Intermodal +jsonwebtoken +leechers +Leechers +LEECHERS +lettre +libsqlite +luckythelab +mailcatcher +mandelbrotset +metainfo +migth +nanos +NCCA +nextest +nilm +nocapture +Oberhachingerstr +oneshot +openbittorrent +opentrackr +ppassword +proxied +rapppid +reqwest +Roadmap +ROADMAP +rowid +RUSTDOCFLAGS +RUSTFLAGS +rustfmt +serde +sgxj +singlepart +sqlx +strftime +struct +sublicensable +sublist +subpoints +Swatinem +taiki +tempdir +tempfile +tera +thiserror +torrust +Torrust +unban +Ununauthorized +upgrader +Uragqm +urlencoding +uroot +Verstappen +waivable +webseeding +Xoauth diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000..abbed5ed --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +max_width = 130 +imports_granularity = "Module" +group_imports = "StdExternalCrate" diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh new file mode 100644 index 00000000..5f8d9d21 --- /dev/null +++ b/share/container/entry_script_sh @@ -0,0 +1,81 @@ +#!/bin/sh +set -x + +to_lc() { echo "$1" | tr '[:upper:]' '[:lower:]'; } +clean() { echo "$1" | tr -d -c 'a-zA-Z0-9-' ; } +cmp_lc() { [ "$(to_lc "$(clean "$1")")" = "$(to_lc "$(clean "$2")")" ]; } + + +inst() { + if [ -n "$1" ] && [ -n "$2" ] && [ -e "$1" ] && [ ! -e "$2" ]; then + install -D -m 0640 -o torrust -g torrust "$1" "$2"; fi; } + + +# Add torrust user, based upon supplied user-id. +if [ -z "$USER_ID" ] && [ "$USER_ID" -lt 1000 ]; then + echo "ERROR: USER_ID is not set, or less than 1000" + exit 1 +fi + +adduser --disabled-password --shell "/bin/sh" --uid "$USER_ID" "torrust" + +# Configure Permissions for Torrust Folders +mkdir -p /var/lib/torrust/index/database/ /etc/torrust/index/ +chown -R "${USER_ID}" /var/lib/torrust /var/log/torrust /etc/torrust +chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust + + +# Install the database and config: +if [ -n "$TORRUST_INDEX_DATABASE_DRIVER" ]; then + if cmp_lc "$TORRUST_INDEX_DATABASE_DRIVER" "sqlite3"; then + + # Select sqlite3 empty database + default_database="/usr/share/torrust/default/database/index.sqlite3.db" + + # Select sqlite3 default configuration + default_config="/usr/share/torrust/default/config/index.container.sqlite3.toml" + + elif cmp_lc "$TORRUST_INDEX_DATABASE_DRIVER" "mysql"; then + + # (no database file needed for mysql) + + # Select default mysql configuration + default_config="/usr/share/torrust/default/config/index.container.mysql.toml" + + else + echo "Error: Unsupported Database Type: \"$TORRUST_INDEX_DATABASE_DRIVER\"." + echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." + exit 1 + fi +else + echo "Error: \"\$TORRUST_INDEX_DATABASE_DRIVER\" was not set!"; exit 1; +fi + +install_config="/etc/torrust/index/index.toml" +install_database="/var/lib/torrust/index/database/sqlite3.db" + +inst "$default_config" "$install_config" +inst "$default_database" "$install_database" + +# Make Minimal Message of the Day +if cmp_lc "$RUNTIME" "runtime"; then + printf '\n in runtime \n' >> /etc/motd; +elif cmp_lc "$RUNTIME" "debug"; then + printf '\n in debug mode \n' >> /etc/motd; +elif cmp_lc "$RUNTIME" "release"; then + printf '\n in release mode \n' >> /etc/motd; +else + echo "ERROR: running in unknown mode: \"$RUNTIME\""; exit 1 +fi + +if [ -e "/usr/share/torrust/container/message" ]; then + cat "/usr/share/torrust/container/message" >> /etc/motd; chmod 0644 /etc/motd +fi + +# Load message of the day from Profile +echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/profile + +cd /home/torrust || exit 1 + +# Switch to torrust user +exec /bin/su-exec torrust "$@" diff --git a/share/container/message b/share/container/message new file mode 100644 index 00000000..5a69f106 --- /dev/null +++ b/share/container/message @@ -0,0 +1,4 @@ + +Lovely welcome to our Torrust Index Container! + +run 'torrust-index' to start the index diff --git a/share/default/config/index.container.mysql.toml b/share/default/config/index.container.mysql.toml new file mode 100644 index 00000000..1999c4a1 --- /dev/null +++ b/share/default/config/index.container.mysql.toml @@ -0,0 +1,50 @@ +log_level = "info" + +[website] +name = "Torrust" + +# Please override the tracker token setting the +# `TORRUST_INDEX_TRACKER_API_TOKEN` +# environmental variable! + +[tracker] +url = "udp://tracker:6969" +mode = "Public" +api_url = "http://tracker:1212" +token = "MyAccessToken" +token_valid_seconds = 7257600 + +[net] +port = 3001 + +[auth] +email_on_signup = "Optional" +min_password_length = 6 +max_password_length = 64 +secret_key = "MaxVerstappenWC2021" + +[database] +connect_url = "mysql://root:root_secret_password@mysql:3306/torrust_index_e2e_testing" + +[mail] +email_verification_enabled = false +from = "example@email.com" +reply_to = "noreply@email.com" +username = "" +password = "" +server = "mailcatcher" +port = 1025 + +[image_cache] +max_request_timeout_ms = 1000 +capacity = 128000000 +entry_size_limit = 4000000 +user_quota_period_seconds = 3600 +user_quota_bytes = 64000000 + +[api] +default_torrent_page_size = 10 +max_torrent_page_size = 30 + +[tracker_statistics_importer] +torrent_info_update_interval = 3600 diff --git a/share/default/config/index.container.sqlite3.toml b/share/default/config/index.container.sqlite3.toml new file mode 100644 index 00000000..c0cb6002 --- /dev/null +++ b/share/default/config/index.container.sqlite3.toml @@ -0,0 +1,50 @@ +log_level = "info" + +[website] +name = "Torrust" + +# Please override the tracker token setting the +# `TORRUST_INDEX_TRACKER_API_TOKEN` +# environmental variable! + +[tracker] +url = "udp://tracker:6969" +mode = "Public" +api_url = "http://tracker:1212" +token = "MyAccessToken" +token_valid_seconds = 7257600 + +[net] +port = 3001 + +[auth] +email_on_signup = "Optional" +min_password_length = 6 +max_password_length = 64 +secret_key = "MaxVerstappenWC2021" + +[database] +connect_url = "sqlite:///var/lib/torrust/index/database/sqlite3.db?mode=rwc" + +[mail] +email_verification_enabled = false +from = "example@email.com" +reply_to = "noreply@email.com" +username = "" +password = "" +server = "mailcatcher" +port = 1025 + +[image_cache] +max_request_timeout_ms = 1000 +capacity = 128000000 +entry_size_limit = 4000000 +user_quota_period_seconds = 3600 +user_quota_bytes = 64000000 + +[api] +default_torrent_page_size = 10 +max_torrent_page_size = 30 + +[tracker_statistics_importer] +torrent_info_update_interval = 3600 diff --git a/share/default/config/index.development.sqlite3.toml b/share/default/config/index.development.sqlite3.toml new file mode 100644 index 00000000..06f89a3c --- /dev/null +++ b/share/default/config/index.development.sqlite3.toml @@ -0,0 +1,46 @@ +log_level = "info" + +[website] +name = "Torrust" + +[tracker] +url = "udp://localhost:6969" +mode = "Public" +api_url = "http://localhost:1212" +token = "MyAccessToken" +token_valid_seconds = 7257600 + +[net] +port = 3001 + +[auth] +email_on_signup = "Optional" +min_password_length = 6 +max_password_length = 64 +secret_key = "MaxVerstappenWC2021" + +[database] +connect_url = "sqlite://data.db?mode=rwc" + +[mail] +email_verification_enabled = false +from = "example@email.com" +reply_to = "noreply@email.com" +username = "" +password = "" +server = "" +port = 25 + +[image_cache] +max_request_timeout_ms = 1000 +capacity = 128000000 +entry_size_limit = 4000000 +user_quota_period_seconds = 3600 +user_quota_bytes = 64000000 + +[api] +default_torrent_page_size = 10 +max_torrent_page_size = 30 + +[tracker_statistics_importer] +torrent_info_update_interval = 3600 diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml new file mode 100644 index 00000000..fb9cbf78 --- /dev/null +++ b/share/default/config/tracker.container.mysql.toml @@ -0,0 +1,38 @@ +announce_interval = 120 +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "info" +max_peer_timeout = 900 +min_announce_interval = 120 +mode = "public" +on_reverse_proxy = false +persistent_torrent_completed_stat = false +remove_peerless_torrents = true +tracker_usage_statistics = true + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +enabled = false + +[[http_trackers]] +bind_address = "0.0.0.0:7070" +enabled = false +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +[http_api] +bind_address = "0.0.0.0:1212" +enabled = true +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +# Please override the admin token setting the +# `TORRUST_TRACKER_API_ADMIN_TOKEN` +# environmental variable! + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml new file mode 100644 index 00000000..54cfd402 --- /dev/null +++ b/share/default/config/tracker.container.sqlite3.toml @@ -0,0 +1,38 @@ +announce_interval = 120 +db_driver = "Sqlite3" +db_path = "/var/lib/torrust/tracker/database/sqlite3.db" +external_ip = "0.0.0.0" +inactive_peer_cleanup_interval = 600 +log_level = "info" +max_peer_timeout = 900 +min_announce_interval = 120 +mode = "public" +on_reverse_proxy = false +persistent_torrent_completed_stat = false +remove_peerless_torrents = true +tracker_usage_statistics = true + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +enabled = false + +[[http_trackers]] +bind_address = "0.0.0.0:7070" +enabled = false +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +[http_api] +bind_address = "0.0.0.0:1212" +enabled = true +ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" +ssl_enabled = false +ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" + +# Please override the admin token setting the +# `TORRUST_TRACKER_API_ADMIN_TOKEN` +# environmental variable! + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/src/app.rs b/src/app.rs new file mode 100644 index 00000000..353ce274 --- /dev/null +++ b/src/app.rs @@ -0,0 +1,184 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::task::JoinHandle; + +use crate::bootstrap::logging; +use crate::cache::image::manager::ImageCacheService; +use crate::common::AppData; +use crate::config::Configuration; +use crate::databases::database; +use crate::services::authentication::{DbUserAuthenticationRepository, JsonWebToken, Service}; +use crate::services::category::{self, DbCategoryRepository}; +use crate::services::tag::{self, DbTagRepository}; +use crate::services::torrent::{ + DbCanonicalInfoHashGroupRepository, DbTorrentAnnounceUrlRepository, DbTorrentFileRepository, DbTorrentInfoRepository, + DbTorrentListingGenerator, DbTorrentRepository, DbTorrentTagRepository, +}; +use crate::services::user::{self, DbBannedUserList, DbUserProfileRepository, DbUserRepository}; +use crate::services::{proxy, settings, torrent}; +use crate::tracker::statistics_importer::StatisticsImporter; +use crate::web::api::v1::auth::Authentication; +use crate::web::api::{start, Version}; +use crate::{mailer, tracker}; + +pub struct Running { + pub api_socket_addr: SocketAddr, + pub api_server: Option>>, + pub tracker_data_importer_handle: tokio::task::JoinHandle<()>, +} + +/// Runs the application. +/// +/// # Panics +/// +/// It panics if there is an error connecting to the database. +#[allow(clippy::too_many_lines)] +pub async fn run(configuration: Configuration, api_version: &Version) -> Running { + let log_level = configuration.settings.read().await.log_level.clone(); + + logging::setup(&log_level); + + let configuration = Arc::new(configuration); + + // Get configuration settings needed to build the app dependencies and + // services: main API server and tracker torrents importer. + + let settings = configuration.settings.read().await; + + let database_connect_url = settings.database.connect_url.clone(); + let torrent_info_update_interval = settings.tracker_statistics_importer.torrent_info_update_interval; + let net_ip = "0.0.0.0".to_string(); + let net_port = settings.net.port; + + // IMPORTANT: drop settings before starting server to avoid read locks that + // leads to requests hanging. + drop(settings); + + // Build app dependencies + + let database = Arc::new(database::connect(&database_connect_url).await.expect("Database error.")); + let json_web_token = Arc::new(JsonWebToken::new(configuration.clone())); + let auth = Arc::new(Authentication::new(json_web_token.clone())); + + // Repositories + let category_repository = Arc::new(DbCategoryRepository::new(database.clone())); + let tag_repository = Arc::new(DbTagRepository::new(database.clone())); + let user_repository = Arc::new(DbUserRepository::new(database.clone())); + let user_authentication_repository = Arc::new(DbUserAuthenticationRepository::new(database.clone())); + let user_profile_repository = Arc::new(DbUserProfileRepository::new(database.clone())); + let torrent_repository = Arc::new(DbTorrentRepository::new(database.clone())); + let canonical_info_hash_group_repository = Arc::new(DbCanonicalInfoHashGroupRepository::new(database.clone())); + let torrent_info_repository = Arc::new(DbTorrentInfoRepository::new(database.clone())); + let torrent_file_repository = Arc::new(DbTorrentFileRepository::new(database.clone())); + let torrent_announce_url_repository = Arc::new(DbTorrentAnnounceUrlRepository::new(database.clone())); + let torrent_tag_repository = Arc::new(DbTorrentTagRepository::new(database.clone())); + let torrent_listing_generator = Arc::new(DbTorrentListingGenerator::new(database.clone())); + let banned_user_list = Arc::new(DbBannedUserList::new(database.clone())); + + // Services + let tracker_service = Arc::new(tracker::service::Service::new(configuration.clone(), database.clone()).await); + let tracker_statistics_importer = + Arc::new(StatisticsImporter::new(configuration.clone(), tracker_service.clone(), database.clone()).await); + let mailer_service = Arc::new(mailer::Service::new(configuration.clone()).await); + let image_cache_service: Arc = Arc::new(ImageCacheService::new(configuration.clone()).await); + let category_service = Arc::new(category::Service::new(category_repository.clone(), user_repository.clone())); + let tag_service = Arc::new(tag::Service::new(tag_repository.clone(), user_repository.clone())); + let proxy_service = Arc::new(proxy::Service::new(image_cache_service.clone(), user_repository.clone())); + let settings_service = Arc::new(settings::Service::new(configuration.clone(), user_repository.clone())); + let torrent_index = Arc::new(torrent::Index::new( + configuration.clone(), + tracker_statistics_importer.clone(), + tracker_service.clone(), + user_repository.clone(), + category_repository.clone(), + torrent_repository.clone(), + canonical_info_hash_group_repository.clone(), + torrent_info_repository.clone(), + torrent_file_repository.clone(), + torrent_announce_url_repository.clone(), + torrent_tag_repository.clone(), + torrent_listing_generator.clone(), + )); + let registration_service = Arc::new(user::RegistrationService::new( + configuration.clone(), + mailer_service.clone(), + user_repository.clone(), + user_profile_repository.clone(), + )); + let ban_service = Arc::new(user::BanService::new( + user_repository.clone(), + user_profile_repository.clone(), + banned_user_list.clone(), + )); + let authentication_service = Arc::new(Service::new( + configuration.clone(), + json_web_token.clone(), + user_repository.clone(), + user_profile_repository.clone(), + user_authentication_repository.clone(), + )); + + // Build app container + + let app_data = Arc::new(AppData::new( + configuration.clone(), + database.clone(), + json_web_token.clone(), + auth.clone(), + authentication_service, + tracker_service.clone(), + tracker_statistics_importer.clone(), + mailer_service, + image_cache_service, + category_repository, + tag_repository, + user_repository, + user_authentication_repository, + user_profile_repository, + torrent_repository, + canonical_info_hash_group_repository, + torrent_info_repository, + torrent_file_repository, + torrent_announce_url_repository, + torrent_tag_repository, + torrent_listing_generator, + banned_user_list, + category_service, + tag_service, + proxy_service, + settings_service, + torrent_index, + registration_service, + ban_service, + )); + + // Start repeating task to import tracker torrent data and updating + // seeders and leechers info. + + let weak_tracker_statistics_importer = Arc::downgrade(&tracker_statistics_importer); + + let tracker_statistics_importer_handle = tokio::spawn(async move { + let interval = std::time::Duration::from_secs(torrent_info_update_interval); + let mut interval = tokio::time::interval(interval); + interval.tick().await; // first tick is immediate... + loop { + interval.tick().await; + if let Some(tracker) = weak_tracker_statistics_importer.upgrade() { + drop(tracker.import_all_torrents_statistics().await); + } else { + break; + } + } + }); + + // Start API server + + let running_api = start(app_data, &net_ip, net_port, api_version).await; + + Running { + api_socket_addr: running_api.socket_addr, + api_server: running_api.api_server, + tracker_data_importer_handle: tracker_statistics_importer_handle, + } +} diff --git a/src/auth.rs b/src/auth.rs deleted file mode 100644 index c13e5da0..00000000 --- a/src/auth.rs +++ /dev/null @@ -1,87 +0,0 @@ -use actix_web::HttpRequest; -use crate::models::user::{UserClaims, UserCompact}; -use jsonwebtoken::{decode, DecodingKey, Validation, Algorithm, encode, Header, EncodingKey}; -use crate::utils::time::current_time; -use crate::errors::ServiceError; -use std::sync::Arc; -use crate::config::Configuration; -use crate::databases::database::Database; - -pub struct AuthorizationService { - cfg: Arc, - database: Arc> -} - -impl AuthorizationService { - pub fn new(cfg: Arc, database: Arc>) -> AuthorizationService { - AuthorizationService { - cfg, - database - } - } - - pub async fn sign_jwt(&self, user: UserCompact) -> String { - let settings = self.cfg.settings.read().await; - - // create JWT that expires in two weeks - let key = settings.auth.secret_key.as_bytes(); - // TODO: create config option for setting the token validity in seconds - let exp_date = current_time() + 1_209_600; // two weeks from now - - let claims = UserClaims { - user, - exp: exp_date, - }; - - let token = encode( - &Header::default(), - &claims, - &EncodingKey::from_secret(key), - ) - .unwrap(); - - token - } - - pub async fn verify_jwt(&self, token: &str) -> Result { - let settings = self.cfg.settings.read().await; - - match decode::( - token, - &DecodingKey::from_secret(settings.auth.secret_key.as_bytes()), - &Validation::new(Algorithm::HS256), - ) { - Ok(token_data) => { - if token_data.claims.exp < current_time() { - return Err(ServiceError::TokenExpired) - } - Ok(token_data.claims) - }, - Err(_) => Err(ServiceError::TokenInvalid) - } - } - - pub async fn get_claims_from_request(&self, req: &HttpRequest) -> Result { - let _auth = req.headers().get("Authorization"); - match _auth { - Some(_) => { - let _split: Vec<&str> = _auth.unwrap().to_str().unwrap().split("Bearer").collect(); - let token = _split[1].trim(); - - match self.verify_jwt(token).await { - Ok(claims) => Ok(claims), - Err(e) => Err(e), - } - } - None => Err(ServiceError::TokenNotFound) - } - } - - pub async fn get_user_compact_from_request(&self, req: &HttpRequest) -> Result { - let claims = self.get_claims_from_request(req).await?; - - self.database.get_user_compact_from_id(claims.user.user_id) - .await - .map_err(|_| ServiceError::UserNotFound) - } -} diff --git a/src/bin/import_tracker_statistics.rs b/src/bin/import_tracker_statistics.rs new file mode 100644 index 00000000..a405248b --- /dev/null +++ b/src/bin/import_tracker_statistics.rs @@ -0,0 +1,11 @@ +//! Import Tracker Statistics command. +//! +//! It imports the number of seeders and leechers for all torrent from the linked tracker. +//! +//! You can execute it with: `cargo run --bin import_tracker_statistics` +use torrust_index::console::commands::import_tracker_statistics::run_importer; + +#[tokio::main] +async fn main() { + run_importer().await; +} diff --git a/src/bin/parse_torrent.rs b/src/bin/parse_torrent.rs new file mode 100644 index 00000000..693d9249 --- /dev/null +++ b/src/bin/parse_torrent.rs @@ -0,0 +1,41 @@ +//! Command line tool to parse a torrent file and print the decoded torrent. +//! +//! It's only used for debugging purposes. +use std::env; +use std::fs::File; +use std::io::{self, Read}; + +use serde_bencode::de::from_bytes; +use serde_bencode::value::Value as BValue; +use torrust_index::utils::parse_torrent; + +fn main() -> io::Result<()> { + let args: Vec = env::args().collect(); + if args.len() != 2 { + eprintln!("Usage: cargo run --bin parse_torrent "); + eprintln!("Example: cargo run --bin parse_torrent ./tests/fixtures/torrents/MC_GRID.zip-3cd18ff2d3eec881207dcc5ca5a2c3a2a3afe462.torrent"); + std::process::exit(1); + } + + println!("Reading the torrent file ..."); + + let mut file = File::open(&args[1])?; + let mut bytes = Vec::new(); + file.read_to_end(&mut bytes)?; + + println!("Decoding torrent with standard serde implementation ..."); + + match from_bytes::(&bytes) { + Ok(_value) => match parse_torrent::decode_torrent(&bytes) { + Ok(torrent) => { + println!("Parsed torrent: \n{torrent:#?}"); + Ok(()) + } + Err(e) => Err(io::Error::new(io::ErrorKind::Other, format!("Error: invalid torrent!. {e}"))), + }, + Err(e) => Err(io::Error::new( + io::ErrorKind::Other, + format!("Error: invalid bencode data!. {e}"), + )), + } +} diff --git a/src/bin/upgrade.rs b/src/bin/upgrade.rs new file mode 100644 index 00000000..fd072f4f --- /dev/null +++ b/src/bin/upgrade.rs @@ -0,0 +1,10 @@ +//! Upgrade command. +//! It updates the application from version v1.0.0 to v2.0.0. +//! You can execute it with: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads` + +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::upgrader::run; + +#[tokio::main] +async fn main() { + run().await; +} diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs new file mode 100644 index 00000000..49f661ac --- /dev/null +++ b/src/bootstrap/config.rs @@ -0,0 +1,62 @@ +//! Initialize configuration from file or env var. +//! +//! All environment variables are prefixed with `TORRUST_INDEX_`. + +// Environment variables + +use crate::config::{Configuration, Info}; + +/// The whole `index.toml` file content. It has priority over the config file. +/// Even if the file is not on the default path. +const ENV_VAR_CONFIG: &str = "TORRUST_INDEX_CONFIG"; + +/// Token needed to communicate with the Torrust Tracker +const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_INDEX_TRACKER_API_TOKEN"; + +/// The `index.toml` file location. +pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_INDEX_PATH_CONFIG"; + +// Default values +pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/index.development.sqlite3.toml"; + +/// If present, CORS will be permissive. +pub const ENV_VAR_CORS_PERMISSIVE: &str = "TORRUST_INDEX_BACK_CORS_PERMISSIVE"; + +/// It loads the application configuration from the environment. +/// +/// There are two methods to inject the configuration: +/// +/// 1. By using a config file: `index.toml`. +/// 2. Environment variable: `TORRUST_INDEX_CONFIG`. The variable contains the same contents as the `index.toml` file. +/// +/// Environment variable has priority over the config file. +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-index-configuration) for the configuration options. +/// +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./index.toml` file or the env var `TORRUST_INDEX_CONFIG`. +#[must_use] +pub fn initialize_configuration() -> Configuration { + let info = Info::new( + ENV_VAR_CONFIG.to_string(), + ENV_VAR_PATH_CONFIG.to_string(), + DEFAULT_PATH_CONFIG.to_string(), + ENV_VAR_API_ADMIN_TOKEN.to_string(), + ) + .unwrap(); + + Configuration::load(&info).unwrap() +} + +#[cfg(test)] +mod tests { + + #[test] + fn it_should_load_with_default_config() { + use crate::bootstrap::config::initialize_configuration; + + drop(initialize_configuration()); + } +} diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs new file mode 100644 index 00000000..8546720f --- /dev/null +++ b/src/bootstrap/logging.rs @@ -0,0 +1,54 @@ +//! Setup for the application logging. +//! +//! - `Off` +//! - `Error` +//! - `Warn` +//! - `Info` +//! - `Debug` +//! - `Trace` +use std::str::FromStr; +use std::sync::Once; + +use log::{info, LevelFilter}; + +static INIT: Once = Once::new(); + +pub fn setup(log_level: &Option) { + let level = config_level_or_default(log_level); + + if level == log::LevelFilter::Off { + return; + } + + INIT.call_once(|| { + stdout_config(level); + }); +} + +fn config_level_or_default(log_level: &Option) -> LevelFilter { + match log_level { + None => log::LevelFilter::Info, + Some(level) => LevelFilter::from_str(level).unwrap(), + } +} + +fn stdout_config(level: LevelFilter) { + if let Err(_err) = fern::Dispatch::new() + .format(|out, message, record| { + out.finish(format_args!( + "{} [{}][{}] {}", + chrono::Local::now().format("%+"), + record.target(), + record.level(), + message + )); + }) + .level(level) + .chain(std::io::stdout()) + .apply() + { + panic!("Failed to initialize logging.") + } + + info!("logging initialized."); +} diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs new file mode 100644 index 00000000..8b0e66a6 --- /dev/null +++ b/src/bootstrap/mod.rs @@ -0,0 +1,2 @@ +pub mod config; +pub mod logging; diff --git a/src/cache/cache.rs b/src/cache/cache.rs new file mode 100644 index 00000000..ce842448 --- /dev/null +++ b/src/cache/cache.rs @@ -0,0 +1,201 @@ +use bytes::Bytes; +use indexmap::IndexMap; + +#[derive(Debug)] +pub enum Error { + EntrySizeLimitExceedsTotalCapacity, + BytesExceedEntrySizeLimit, + CacheCapacityIsTooSmall, +} + +#[derive(Debug, Clone)] +pub struct BytesCacheEntry { + pub bytes: Bytes, +} + +// Individual entry destined for the byte cache. +impl BytesCacheEntry { + pub fn new(bytes: Bytes) -> Self { + Self { bytes } + } +} + +pub struct BytesCache { + bytes_table: IndexMap, + total_capacity: usize, + entry_size_limit: usize, +} + +impl BytesCache { + #[must_use] + pub fn new() -> Self { + Self { + bytes_table: IndexMap::new(), + total_capacity: 0, + entry_size_limit: 0, + } + } + + // With a total capacity in bytes. + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + let mut new = Self::new(); + + new.total_capacity = capacity; + + new + } + + // With a limit for individual entry sizes. + #[must_use] + pub fn with_entry_size_limit(entry_size_limit: usize) -> Self { + let mut new = Self::new(); + + new.entry_size_limit = entry_size_limit; + + new + } + + // With both a total capacity limit and an individual entry size limit. + pub fn with_capacity_and_entry_size_limit(capacity: usize, entry_size_limit: usize) -> Result { + if entry_size_limit > capacity { + return Err(Error::EntrySizeLimitExceedsTotalCapacity); + } + + let mut new = Self::new(); + + new.total_capacity = capacity; + new.entry_size_limit = entry_size_limit; + + Ok(new) + } + + pub async fn get(&self, key: &str) -> Option { + self.bytes_table.get(key).cloned() + } + + // Return the amount of entries in the map. + pub async fn len(&self) -> usize { + self.bytes_table.len() + } + + // Size of all the entry bytes combined. + #[must_use] + pub fn total_size(&self) -> usize { + let mut size: usize = 0; + + for (_, entry) in self.bytes_table.iter() { + size += entry.bytes.len(); + } + + size + } + + // Insert bytes using key. + // TODO: Freed space might need to be reserved. Hold and pass write lock between functions? + // For TO DO above: semaphore: Arc, might be a solution. + pub async fn set(&mut self, key: String, bytes: Bytes) -> Result, Error> { + if bytes.len() > self.entry_size_limit { + return Err(Error::BytesExceedEntrySizeLimit); + } + + // Remove the old entry so that a new entry will be added as last in the queue. + let _ = self.bytes_table.shift_remove(&key); + + let bytes_cache_entry = BytesCacheEntry::new(bytes); + + self.free_size(bytes_cache_entry.bytes.len())?; + + Ok(self.bytes_table.insert(key, bytes_cache_entry)) + } + + // Free space. Size amount in bytes. + fn free_size(&mut self, size: usize) -> Result<(), Error> { + // Size may not exceed the total capacity of the bytes cache. + if size > self.total_capacity { + return Err(Error::CacheCapacityIsTooSmall); + } + + let cache_size = self.total_size(); + let size_to_be_freed = size.saturating_sub(self.total_capacity - cache_size); + let mut size_freed: usize = 0; + + while size_freed < size_to_be_freed { + let oldest_entry = self + .pop() + .expect("bytes cache has no more entries, yet there isn't enough space."); + + size_freed += oldest_entry.bytes.len(); + } + + Ok(()) + } + + // Remove and return the oldest entry. + pub fn pop(&mut self) -> Option { + self.bytes_table.shift_remove_index(0).map(|(_, entry)| entry) + } +} + +impl Default for BytesCache { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use bytes::Bytes; + + use crate::cache::cache::BytesCache; + + #[tokio::test] + async fn set_bytes_cache_with_capacity_and_entry_size_limit_should_succeed() { + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(6, 6).unwrap(); + let bytes: Bytes = Bytes::from("abcdef"); + + assert!(bytes_cache.set("1".to_string(), bytes).await.is_ok()) + } + + #[tokio::test] + async fn given_a_bytes_cache_with_a_capacity_and_entry_size_limit_it_should_allow_adding_new_entries_if_the_limit_is_not_exceeded( + ) { + let bytes: Bytes = Bytes::from("abcdef"); + + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(bytes.len() * 2, bytes.len()).unwrap(); + + // Add first entry (6 bytes) + assert!(bytes_cache.set("key1".to_string(), bytes.clone()).await.is_ok()); + + // Add second entry (6 bytes) + assert!(bytes_cache.set("key2".to_string(), bytes).await.is_ok()); + + // Both entries were added because we did not reach the limit + assert_eq!(bytes_cache.len().await, 2) + } + + #[tokio::test] + async fn given_a_bytes_cache_with_a_capacity_and_entry_size_limit_it_should_not_allow_adding_new_entries_if_the_capacity_is_exceeded( + ) { + let bytes: Bytes = Bytes::from("abcdef"); + + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(bytes.len() * 2 - 1, bytes.len()).unwrap(); + + // Add first entry (6 bytes) + assert!(bytes_cache.set("key1".to_string(), bytes.clone()).await.is_ok()); + + // Add second entry (6 bytes) + assert!(bytes_cache.set("key2".to_string(), bytes).await.is_ok()); + + // Only one entry is in the cache, because otherwise the total capacity would have been exceeded + assert_eq!(bytes_cache.len().await, 1) + } + + #[tokio::test] + async fn set_bytes_cache_with_capacity_and_entry_size_limit_should_fail() { + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(6, 5).unwrap(); + let bytes: Bytes = Bytes::from("abcdef"); + + assert!(bytes_cache.set("1".to_string(), bytes).await.is_err()) + } +} diff --git a/src/cache/image/manager.rs b/src/cache/image/manager.rs new file mode 100644 index 00000000..24a7e771 --- /dev/null +++ b/src/cache/image/manager.rs @@ -0,0 +1,235 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; + +use bytes::Bytes; +use tokio::sync::RwLock; + +use crate::cache::BytesCache; +use crate::config::Configuration; +use crate::models::user::UserCompact; + +pub enum Error { + UrlIsUnreachable, + UrlIsNotAnImage, + ImageTooBig, + UserQuotaMet, + Unauthenticated, +} + +type UserQuotas = HashMap; + +/// Returns the current time in seconds. +/// +/// # Panics +/// +/// This function will panic if the current time is before the UNIX EPOCH. +#[must_use] +pub fn now_in_secs() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("SystemTime before UNIX EPOCH!") + .as_secs() +} + +#[derive(Clone)] +pub struct ImageCacheQuota { + pub user_id: i64, + pub usage: usize, + pub max_usage: usize, + pub date_start_secs: u64, + pub period_secs: u64, +} + +impl ImageCacheQuota { + #[must_use] + pub fn new(user_id: i64, max_usage: usize, period_secs: u64) -> Self { + Self { + user_id, + usage: 0, + max_usage, + date_start_secs: now_in_secs(), + period_secs, + } + } + + /// Add Usage Quota + /// + /// # Errors + /// + /// This function will return a `Error::UserQuotaMet` if user quota has been met. + pub fn add_usage(&mut self, amount: usize) -> Result<(), Error> { + // Check if quota needs to be reset. + if now_in_secs() - self.date_start_secs > self.period_secs { + self.reset(); + } + + if self.is_reached() { + return Err(Error::UserQuotaMet); + } + + self.usage = self.usage.saturating_add(amount); + + Ok(()) + } + + pub fn reset(&mut self) { + self.usage = 0; + self.date_start_secs = now_in_secs(); + } + + #[must_use] + pub fn is_reached(&self) -> bool { + self.usage >= self.max_usage + } +} + +pub struct ImageCacheService { + image_cache: RwLock, + user_quotas: RwLock, + reqwest_client: reqwest::Client, + cfg: Arc, +} + +impl ImageCacheService { + /// Create a new image cache service. + /// + /// # Panics + /// + /// This function will panic if the image cache could not be created. + pub async fn new(cfg: Arc) -> Self { + let settings = cfg.settings.read().await; + + let image_cache = + BytesCache::with_capacity_and_entry_size_limit(settings.image_cache.capacity, settings.image_cache.entry_size_limit) + .expect("Could not create image cache."); + + let reqwest_client = reqwest::Client::builder() + .timeout(Duration::from_millis(settings.image_cache.max_request_timeout_ms)) + .build() + .expect("unable to build client request"); + + drop(settings); + + Self { + image_cache: RwLock::new(image_cache), + user_quotas: RwLock::new(HashMap::new()), + reqwest_client, + cfg, + } + } + + /// Get an image from the url and insert it into the cache if it isn't cached already. + /// Unauthenticated users can only get already cached images. + /// + /// # Errors + /// + /// Return a `Error::Unauthenticated` if the user has not been authenticated. + pub async fn get_image_by_url(&self, url: &str, opt_user: Option) -> Result { + if let Some(entry) = self.image_cache.read().await.get(url).await { + return Ok(entry.bytes); + } + + match opt_user { + None => Err(Error::Unauthenticated), + + Some(user) => { + self.check_user_quota(&user).await?; + + let image_bytes = self.get_image_from_url_as_bytes(url).await?; + + self.check_image_size(&image_bytes).await?; + + // These two functions could be executed after returning the image to the client, + // but than we would need a dedicated task or thread that executes these functions. + // This can be problematic if a task is spawned after every user request. + // Since these functions execute very fast, I don't see a reason to further optimize this. + // For now. + self.update_image_cache(url, &image_bytes).await?; + + self.update_user_quota(&user, image_bytes.len()).await?; + + Ok(image_bytes) + } + } + } + + async fn get_image_from_url_as_bytes(&self, url: &str) -> Result { + let res = self + .reqwest_client + .clone() + .get(url) + .send() + .await + .map_err(|_| Error::UrlIsUnreachable)?; + + // code-review: we could get a HTTP 304 response, which doesn't contain a body (the image bytes). + + if let Some(content_type) = res.headers().get("Content-Type") { + if content_type != "image/jpeg" && content_type != "image/png" { + return Err(Error::UrlIsNotAnImage); + } + } else { + return Err(Error::UrlIsNotAnImage); + } + + res.bytes().await.map_err(|_| Error::UrlIsNotAnImage) + } + + async fn check_user_quota(&self, user: &UserCompact) -> Result<(), Error> { + if let Some(quota) = self.user_quotas.read().await.get(&user.user_id) { + if quota.is_reached() { + return Err(Error::UserQuotaMet); + } + } + + Ok(()) + } + + async fn check_image_size(&self, image_bytes: &Bytes) -> Result<(), Error> { + let settings = self.cfg.settings.read().await; + + if image_bytes.len() > settings.image_cache.entry_size_limit { + return Err(Error::ImageTooBig); + } + + Ok(()) + } + + async fn update_image_cache(&self, url: &str, image_bytes: &Bytes) -> Result<(), Error> { + if self + .image_cache + .write() + .await + .set(url.to_string(), image_bytes.clone()) + .await + .is_err() + { + return Err(Error::ImageTooBig); + } + + Ok(()) + } + + async fn update_user_quota(&self, user: &UserCompact, amount: usize) -> Result<(), Error> { + let settings = self.cfg.settings.read().await; + + let mut quota = self + .user_quotas + .read() + .await + .get(&user.user_id) + .cloned() + .unwrap_or(ImageCacheQuota::new( + user.user_id, + settings.image_cache.user_quota_bytes, + settings.image_cache.user_quota_period_seconds, + )); + + let _ = quota.add_usage(amount); + + let _ = self.user_quotas.write().await.insert(user.user_id, quota); + + Ok(()) + } +} diff --git a/src/cache/image/mod.rs b/src/cache/image/mod.rs new file mode 100644 index 00000000..ff8de9eb --- /dev/null +++ b/src/cache/image/mod.rs @@ -0,0 +1 @@ +pub mod manager; diff --git a/src/cache/mod.rs b/src/cache/mod.rs new file mode 100644 index 00000000..4dfc5af3 --- /dev/null +++ b/src/cache/mod.rs @@ -0,0 +1,222 @@ +pub mod image; + +use bytes::Bytes; +use indexmap::IndexMap; + +#[derive(Debug)] +pub enum Error { + EntrySizeLimitExceedsTotalCapacity, + BytesExceedEntrySizeLimit, + CacheCapacityIsTooSmall, +} + +#[derive(Debug, Clone)] +pub struct BytesCacheEntry { + pub bytes: Bytes, +} + +// Individual entry destined for the byte cache. +impl BytesCacheEntry { + pub fn new(bytes: Bytes) -> Self { + Self { bytes } + } +} +#[allow(clippy::module_name_repetitions)] +pub struct BytesCache { + bytes_table: IndexMap, + total_capacity: usize, + entry_size_limit: usize, +} + +impl BytesCache { + #[must_use] + pub fn new() -> Self { + Self { + bytes_table: IndexMap::new(), + total_capacity: 0, + entry_size_limit: 0, + } + } + + // With a total capacity in bytes. + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + let mut new = Self::new(); + + new.total_capacity = capacity; + + new + } + + // With a limit for individual entry sizes. + #[must_use] + pub fn with_entry_size_limit(entry_size_limit: usize) -> Self { + let mut new = Self::new(); + + new.entry_size_limit = entry_size_limit; + + new + } + + /// Helper to create a new bytes cache with both an individual entry and size limit. + /// + /// # Errors + /// + /// This function will return `Error::EntrySizeLimitExceedsTotalCapacity` if the specified size is too large. + /// + pub fn with_capacity_and_entry_size_limit(capacity: usize, entry_size_limit: usize) -> Result { + if entry_size_limit > capacity { + return Err(Error::EntrySizeLimitExceedsTotalCapacity); + } + + let mut new = Self::new(); + + new.total_capacity = capacity; + new.entry_size_limit = entry_size_limit; + + Ok(new) + } + + #[allow(clippy::unused_async)] + pub async fn get(&self, key: &str) -> Option { + self.bytes_table.get(key).cloned() + } + + // Return the amount of entries in the map. + #[allow(clippy::unused_async)] + pub async fn len(&self) -> usize { + self.bytes_table.len() + } + + #[allow(clippy::unused_async)] + pub async fn is_empty(&self) -> bool { + self.bytes_table.is_empty() + } + + // Size of all the entry bytes combined. + #[must_use] + pub fn total_size(&self) -> usize { + let mut size: usize = 0; + + for (_, entry) in &self.bytes_table { + size += entry.bytes.len(); + } + + size + } + + /// Adds a image to the cache. + /// + /// # Errors + /// + /// This function will return an error if there is not enough free size. + /// + // Insert bytes using key. + // TODO: Freed space might need to be reserved. Hold and pass write lock between functions? + // For TO DO above: semaphore: Arc, might be a solution. + #[allow(clippy::unused_async)] + pub async fn set(&mut self, key: String, bytes: Bytes) -> Result, Error> { + if bytes.len() > self.entry_size_limit { + return Err(Error::BytesExceedEntrySizeLimit); + } + + // Remove the old entry so that a new entry will be added as last in the queue. + drop(self.bytes_table.shift_remove(&key)); + + let bytes_cache_entry = BytesCacheEntry::new(bytes); + + self.free_size(bytes_cache_entry.bytes.len())?; + + Ok(self.bytes_table.insert(key, bytes_cache_entry)) + } + + // Free space. Size amount in bytes. + fn free_size(&mut self, size: usize) -> Result<(), Error> { + // Size may not exceed the total capacity of the bytes cache. + if size > self.total_capacity { + return Err(Error::CacheCapacityIsTooSmall); + } + + let cache_size = self.total_size(); + let size_to_be_freed = size.saturating_sub(self.total_capacity - cache_size); + let mut size_freed: usize = 0; + + while size_freed < size_to_be_freed { + let oldest_entry = self + .pop() + .expect("bytes cache has no more entries, yet there isn't enough space."); + + size_freed += oldest_entry.bytes.len(); + } + + Ok(()) + } + + // Remove and return the oldest entry. + pub fn pop(&mut self) -> Option { + self.bytes_table.shift_remove_index(0).map(|(_, entry)| entry) + } +} + +impl Default for BytesCache { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use bytes::Bytes; + + use crate::cache::BytesCache; + + #[tokio::test] + async fn set_bytes_cache_with_capacity_and_entry_size_limit_should_succeed() { + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(6, 6).unwrap(); + let bytes: Bytes = Bytes::from("abcdef"); + + assert!(bytes_cache.set("1".to_string(), bytes).await.is_ok()); + } + + #[tokio::test] + async fn given_a_bytes_cache_with_a_capacity_and_entry_size_limit_it_should_allow_adding_new_entries_if_the_limit_is_not_exceeded( + ) { + let bytes: Bytes = Bytes::from("abcdef"); + + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(bytes.len() * 2, bytes.len()).unwrap(); + + // Add first entry (6 bytes) + assert!(bytes_cache.set("key1".to_string(), bytes.clone()).await.is_ok()); + + // Add second entry (6 bytes) + assert!(bytes_cache.set("key2".to_string(), bytes).await.is_ok()); + + // Both entries were added because we did not reach the limit + assert_eq!(bytes_cache.len().await, 2); + } + + #[tokio::test] + async fn given_a_bytes_cache_with_a_capacity_and_entry_size_limit_it_should_not_allow_adding_new_entries_if_the_capacity_is_exceeded( + ) { + let bytes: Bytes = Bytes::from("abcdef"); + + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(bytes.len() * 2 - 1, bytes.len()).unwrap(); + + // Add first entry (6 bytes) + assert!(bytes_cache.set("key1".to_string(), bytes.clone()).await.is_ok()); + + // Add second entry (6 bytes) + assert!(bytes_cache.set("key2".to_string(), bytes).await.is_ok()); + + // Only one entry is in the cache, because otherwise the total capacity would have been exceeded + assert_eq!(bytes_cache.len().await, 1); + } + + #[tokio::test] + async fn set_bytes_cache_with_capacity_and_entry_size_limit_should_fail() { + let mut bytes_cache = BytesCache::with_capacity_and_entry_size_limit(6, 5).unwrap(); + let bytes: Bytes = Bytes::from("abcdef"); + + assert!(bytes_cache.set("1".to_string(), bytes).await.is_err()); + } +} diff --git a/src/common.rs b/src/common.rs index 2f11f6ec..bf16889a 100644 --- a/src/common.rs +++ b/src/common.rs @@ -1,30 +1,123 @@ use std::sync::Arc; + +use crate::cache::image::manager::ImageCacheService; use crate::config::Configuration; -use crate::auth::AuthorizationService; use crate::databases::database::Database; -use crate::tracker::TrackerService; -use crate::mailer::MailerService; - +use crate::services::authentication::{DbUserAuthenticationRepository, JsonWebToken, Service}; +use crate::services::category::{self, DbCategoryRepository}; +use crate::services::tag::{self, DbTagRepository}; +use crate::services::torrent::{ + DbCanonicalInfoHashGroupRepository, DbTorrentAnnounceUrlRepository, DbTorrentFileRepository, DbTorrentInfoRepository, + DbTorrentListingGenerator, DbTorrentRepository, DbTorrentTagRepository, +}; +use crate::services::user::{self, DbBannedUserList, DbUserProfileRepository, DbUserRepository}; +use crate::services::{proxy, settings, torrent}; +use crate::tracker::statistics_importer::StatisticsImporter; +use crate::web::api::v1::auth::Authentication; +use crate::{mailer, tracker}; pub type Username = String; -pub type WebAppData = actix_web::web::Data>; - pub struct AppData { pub cfg: Arc, pub database: Arc>, - pub auth: Arc, - pub tracker: Arc, - pub mailer: Arc + pub json_web_token: Arc, + pub auth: Arc, + pub authentication_service: Arc, + pub tracker_service: Arc, + pub tracker_statistics_importer: Arc, + pub mailer: Arc, + pub image_cache_manager: Arc, + // Repositories + pub category_repository: Arc, + pub tag_repository: Arc, + pub user_repository: Arc, + pub user_authentication_repository: Arc, + pub user_profile_repository: Arc, + pub torrent_repository: Arc, + pub torrent_info_hash_repository: Arc, + pub torrent_info_repository: Arc, + pub torrent_file_repository: Arc, + pub torrent_announce_url_repository: Arc, + pub torrent_tag_repository: Arc, + pub torrent_listing_generator: Arc, + pub banned_user_list: Arc, + // Services + pub category_service: Arc, + pub tag_service: Arc, + pub proxy_service: Arc, + pub settings_service: Arc, + pub torrent_service: Arc, + pub registration_service: Arc, + pub ban_service: Arc, } impl AppData { - pub fn new(cfg: Arc, database: Arc>, auth: Arc, tracker: Arc, mailer: Arc) -> AppData { + #[allow(clippy::too_many_arguments)] + pub fn new( + cfg: Arc, + database: Arc>, + json_web_token: Arc, + auth: Arc, + authentication_service: Arc, + tracker_service: Arc, + tracker_statistics_importer: Arc, + mailer: Arc, + image_cache_manager: Arc, + // Repositories + category_repository: Arc, + tag_repository: Arc, + user_repository: Arc, + user_authentication_repository: Arc, + user_profile_repository: Arc, + torrent_repository: Arc, + torrent_info_hash_repository: Arc, + torrent_info_repository: Arc, + torrent_file_repository: Arc, + torrent_announce_url_repository: Arc, + torrent_tag_repository: Arc, + torrent_listing_generator: Arc, + banned_user_list: Arc, + // Services + category_service: Arc, + tag_service: Arc, + proxy_service: Arc, + settings_service: Arc, + torrent_service: Arc, + registration_service: Arc, + ban_service: Arc, + ) -> AppData { AppData { cfg, database, + json_web_token, auth, - tracker, + authentication_service, + tracker_service, + tracker_statistics_importer, mailer, + image_cache_manager, + // Repositories + category_repository, + tag_repository, + user_repository, + user_authentication_repository, + user_profile_repository, + torrent_repository, + torrent_info_hash_repository, + torrent_info_repository, + torrent_file_repository, + torrent_announce_url_repository, + torrent_tag_repository, + torrent_listing_generator, + banned_user_list, + // Services + category_service, + tag_service, + proxy_service, + settings_service, + torrent_service, + registration_service, + ban_service, } } } diff --git a/src/config.rs b/src/config.rs index 89078f2d..941c3921 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,179 +1,494 @@ -use std::fs; -use config::{ConfigError, Config, File}; +//! Configuration for the application. use std::path::Path; -use serde::{Serialize, Deserialize}; +use std::sync::Arc; +use std::{env, fs}; + +use config::{Config, ConfigError, File, FileFormat}; +use log::warn; +use serde::{Deserialize, Serialize}; +use thiserror::Error; use tokio::sync::RwLock; -use crate::databases::database::DatabaseDriver; +use torrust_index_located_error::{Located, LocatedError}; + +/// Information required for loading config +#[derive(Debug, Default, Clone)] +pub struct Info { + index_toml: String, + tracker_api_token: Option, +} + +impl Info { + /// Build Configuration Info + /// + /// # Examples + /// + /// ```no_run + /// # use torrust_index::config::Info; + /// # let (env_var_config, env_var_path_config, default_path_config, env_var_tracker_api_token) = ("".to_string(), "".to_string(), "".to_string(), "".to_string()); + /// let result = Info::new(env_var_config, env_var_path_config, default_path_config, env_var_tracker_api_token); + /// ``` + /// + /// # Errors + /// + /// Will return `Err` if unable to obtain a configuration. + /// + #[allow(clippy::needless_pass_by_value)] + pub fn new( + env_var_config: String, + env_var_path_config: String, + default_path_config: String, + env_var_tracker_api_token: String, + ) -> Result { + let index_toml = if let Ok(index_toml) = env::var(&env_var_config) { + println!("Loading configuration from env var {env_var_config} ..."); + + index_toml + } else { + let config_path = if let Ok(config_path) = env::var(env_var_path_config) { + println!("Loading configuration file: `{config_path}` ..."); + + config_path + } else { + println!("Loading default configuration file: `{default_path_config}` ..."); + + default_path_config + }; + + fs::read_to_string(config_path) + .map_err(|e| Error::UnableToLoadFromConfigFile { + source: (Arc::new(e) as Arc).into(), + })? + .parse() + .map_err(|_e: std::convert::Infallible| Error::Infallible)? + }; + let tracker_api_token = env::var(env_var_tracker_api_token).ok(); + + Ok(Self { + index_toml, + tracker_api_token, + }) + } +} +/// Errors that can occur when loading the configuration. +#[derive(Error, Debug)] +pub enum Error { + /// Unable to load the configuration from the environment variable. + /// This error only occurs if there is no configuration file and the + /// `TORRUST_TRACKER_CONFIG` environment variable is not set. + #[error("Unable to load from Environmental Variable: {source}")] + UnableToLoadFromEnvironmentVariable { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + #[error("Unable to load from Config File: {source}")] + UnableToLoadFromConfigFile { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + + /// Unable to load the configuration from the configuration file. + #[error("Failed processing the configuration: {source}")] + ConfigError { source: LocatedError<'static, ConfigError> }, + + #[error("The error for errors that can never happen.")] + Infallible, +} + +impl From for Error { + #[track_caller] + fn from(err: ConfigError) -> Self { + Self::ConfigError { + source: Located(err).into(), + } + } +} + +/// Information displayed to the user in the website. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Website { + /// The name of the website. pub name: String, } +impl Default for Website { + fn default() -> Self { + Self { + name: "Torrust".to_string(), + } + } +} + +/// See `TrackerMode` in [`torrust-tracker-primitives`](https://docs.rs/torrust-tracker-primitives) +/// crate for more information. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum TrackerMode { + // todo: use https://crates.io/crates/torrust-tracker-primitives + /// Will track every new info hash and serve every peer. Public, + /// Will only serve authenticated peers. Private, + /// Will only track whitelisted info hashes. Whitelisted, - PrivateWhitelisted + /// Will only track whitelisted info hashes and serve authenticated peers. + PrivateWhitelisted, } +impl Default for TrackerMode { + fn default() -> Self { + Self::Public + } +} + +/// Configuration for the associated tracker. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Tracker { + /// Connection string for the tracker. For example: `udp://TRACKER_IP:6969`. pub url: String, + /// The mode of the tracker. For example: `Public`. + /// See `TrackerMode` in [`torrust-tracker-primitives`](https://docs.rs/torrust-tracker-primitives) + /// crate for more information. pub mode: TrackerMode, + /// The url of the tracker API. For example: `http://localhost:1212`. pub api_url: String, + /// The token used to authenticate with the tracker API. pub token: String, + /// The amount of seconds the token is valid. pub token_valid_seconds: u64, } +impl Tracker { + fn override_tracker_api_token(&mut self, tracker_api_token: &str) { + self.token = tracker_api_token.to_string(); + } +} + +impl Default for Tracker { + fn default() -> Self { + Self { + url: "udp://localhost:6969".to_string(), + mode: TrackerMode::default(), + api_url: "http://localhost:1212".to_string(), + token: "MyAccessToken".to_string(), + token_valid_seconds: 7_257_600, + } + } +} + +/// Port number representing that the OS will choose one randomly from the available ports. +/// +/// It's the port number `0` +pub const FREE_PORT: u16 = 0; + +/// The the base URL for the API. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Network { + /// The port to listen on. Default to `3001`. pub port: u16, + /// The base URL for the API. For example: `http://localhost`. + /// If not set, the base URL will be inferred from the request. pub base_url: Option, } +impl Default for Network { + fn default() -> Self { + Self { + port: 3001, + base_url: None, + } + } +} + +/// Whether the email is required on signup or not. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum EmailOnSignup { + /// The email is required on signup. Required, + /// The email is optional on signup. Optional, - None + /// The email is not allowed on signup. It will only be ignored if provided. + None, // code-review: rename to `Ignored`? } +impl Default for EmailOnSignup { + fn default() -> Self { + Self::Optional + } +} + +/// Authentication options. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Auth { + /// Whether or not to require an email on signup. pub email_on_signup: EmailOnSignup, + /// The minimum password length. pub min_password_length: usize, + /// The maximum password length. pub max_password_length: usize, + /// The secret key used to sign JWT tokens. pub secret_key: String, } +impl Default for Auth { + fn default() -> Self { + Self { + email_on_signup: EmailOnSignup::default(), + min_password_length: 6, + max_password_length: 64, + secret_key: "MaxVerstappenWC2021".to_string(), + } + } +} + +/// Database configuration. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Database { - pub db_driver: DatabaseDriver, + /// The connection string for the database. For example: `sqlite://data.db?mode=rwc`. pub connect_url: String, - pub torrent_info_update_interval: u64, } +impl Default for Database { + fn default() -> Self { + Self { + connect_url: "sqlite://data.db?mode=rwc".to_string(), + } + } +} + +/// SMTP configuration. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Mail { + /// Whether or not to enable email verification on signup. pub email_verification_enabled: bool, + /// The email address to send emails from. pub from: String, + /// The email address to reply to. pub reply_to: String, + /// The username to use for SMTP authentication. pub username: String, + /// The password to use for SMTP authentication. pub password: String, + /// The SMTP server to use. pub server: String, + /// The SMTP port to use. pub port: u16, } +impl Default for Mail { + fn default() -> Self { + Self { + email_verification_enabled: false, + from: "example@email.com".to_string(), + reply_to: "noreply@email.com".to_string(), + username: String::default(), + password: String::default(), + server: String::default(), + port: 25, + } + } +} + +/// Configuration for the image proxy cache. +/// +/// Users have a cache quota per period. For example: 100MB per day. +/// When users are navigating the site, they will be downloading images that are +/// embedded in the torrent description. These images will be cached in the +/// proxy. The proxy will not download new images if the user has reached the +/// quota. +#[allow(clippy::module_name_repetitions)] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ImageCache { + /// Maximum time in seconds to wait for downloading the image form the original source. + pub max_request_timeout_ms: u64, + /// Cache size in bytes. + pub capacity: usize, + /// Maximum size in bytes for a single image. + pub entry_size_limit: usize, + /// Users have a cache quota per period. For example: 100MB per day. + /// This is the period in seconds (1 day in seconds). + pub user_quota_period_seconds: u64, + /// Users have a cache quota per period. For example: 100MB per day. + /// This is the maximum size in bytes (100MB in bytes). + pub user_quota_bytes: usize, +} + +/// Core configuration for the API +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Api { + /// The default page size for torrent lists. + pub default_torrent_page_size: u8, + /// The maximum page size for torrent lists. + pub max_torrent_page_size: u8, +} + +impl Default for Api { + fn default() -> Self { + Self { + default_torrent_page_size: 10, + max_torrent_page_size: 30, + } + } +} + +/// Configuration for the tracker statistics importer. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TorrustConfig { +pub struct TrackerStatisticsImporter { + /// The interval in seconds to get statistics from the tracker. + pub torrent_info_update_interval: u64, +} + +impl Default for TrackerStatisticsImporter { + fn default() -> Self { + Self { + torrent_info_update_interval: 3600, + } + } +} + +impl Default for ImageCache { + fn default() -> Self { + Self { + max_request_timeout_ms: 1000, + capacity: 128_000_000, + entry_size_limit: 4_000_000, + user_quota_period_seconds: 3600, + user_quota_bytes: 64_000_000, + } + } +} + +/// The whole configuration for the index. +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct TorrustIndex { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + pub log_level: Option, + /// The website customizable values. pub website: Website, + /// The tracker configuration. pub tracker: Tracker, + /// The network configuration. pub net: Network, + /// The authentication configuration. pub auth: Auth, + /// The database configuration. pub database: Database, + /// The SMTP configuration. pub mail: Mail, + /// The image proxy cache configuration. + pub image_cache: ImageCache, + /// The API configuration. + pub api: Api, + /// The tracker statistics importer job configuration. + pub tracker_statistics_importer: TrackerStatisticsImporter, } +impl TorrustIndex { + fn override_tracker_api_token(&mut self, tracker_api_token: &str) { + self.tracker.override_tracker_api_token(tracker_api_token); + } +} + +/// The configuration service. #[derive(Debug)] pub struct Configuration { - pub settings: RwLock + /// The state of the configuration. + pub settings: RwLock, + /// The path to the configuration file. This is `None` if the configuration + /// was loaded from the environment. + pub config_path: Option, } -impl Configuration { - pub fn default() -> Configuration { - let torrust_config = TorrustConfig { - website: Website { - name: "Torrust".to_string() - }, - tracker: Tracker { - url: "udp://localhost:6969".to_string(), - mode: TrackerMode::Public, - api_url: "http://localhost:1212".to_string(), - token: "MyAccessToken".to_string(), - token_valid_seconds: 7257600 - }, - net: Network { - port: 3000, - base_url: None - }, - auth: Auth { - email_on_signup: EmailOnSignup::Optional, - min_password_length: 6, - max_password_length: 64, - secret_key: "MaxVerstappenWC2021".to_string() - }, - database: Database { - db_driver: DatabaseDriver::Sqlite3, - connect_url: "sqlite://data.db?mode=rwc".to_string(), - torrent_info_update_interval: 3600 - }, - mail: Mail { - email_verification_enabled: false, - from: "example@email.com".to_string(), - reply_to: "noreply@email.com".to_string(), - username: "".to_string(), - password: "".to_string(), - server: "".to_string(), - port: 25 - } - }; - +impl Default for Configuration { + fn default() -> Configuration { Configuration { - settings: RwLock::new(torrust_config) + settings: RwLock::new(TorrustIndex::default()), + config_path: None, } } +} - pub async fn load_from_file() -> Result { - let mut config = Config::new(); +impl Configuration { + /// Loads the configuration from the configuration file. + /// + /// # Errors + /// + /// This function will return an error no configuration in the `CONFIG_PATH` exists, and a new file is is created. + /// This function will return an error if the `config` is not a valid `TorrustConfig` document. + pub async fn load_from_file(config_path: &str) -> Result { + let config_builder = Config::builder(); - const CONFIG_PATH: &str = "config.toml"; + #[allow(unused_assignments)] + let mut config = Config::default(); - if Path::new(CONFIG_PATH).exists() { - config.merge(File::with_name(CONFIG_PATH))?; + if Path::new(config_path).exists() { + config = config_builder.add_source(File::with_name(config_path)).build()?; } else { - eprintln!("No config file found."); - eprintln!("Creating config file.."); + warn!("No config file found. Creating default config file ..."); + let config = Configuration::default(); - let _ = config.save_to_file().await; - return Err(ConfigError::Message(format!("Please edit the config.TOML in the root folder and restart the tracker."))) + let () = config.save_to_file(config_path).await; + + return Err(ConfigError::Message(format!( + "No config file found. Created default config file in {config_path}. Edit the file and start the application." + ))); } - let torrust_config: TorrustConfig = match config.try_into() { + let torrust_config: TorrustIndex = match config.try_deserialize() { Ok(data) => Ok(data), - Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {}.", e))), + Err(e) => Err(ConfigError::Message(format!("Errors while processing config: {e}."))), }?; Ok(Configuration { - settings: RwLock::new(torrust_config) + settings: RwLock::new(torrust_config), + config_path: Some(config_path.to_string()), + }) + } + + /// Loads the configuration from the `Info` struct. The whole + /// configuration in toml format is included in the `info.index_toml` string. + /// + /// Optionally will override the tracker api token. + /// + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load(info: &Info) -> Result { + let config_builder = Config::builder() + .add_source(File::from_str(&info.index_toml, FileFormat::Toml)) + .build()?; + let mut index_config: TorrustIndex = config_builder.try_deserialize()?; + + if let Some(ref token) = info.tracker_api_token { + index_config.override_tracker_api_token(token); + }; + + Ok(Configuration { + settings: RwLock::new(index_config), + config_path: None, }) } - pub async fn save_to_file(&self) -> Result<(), ()>{ + /// Returns the save to file of this [`Configuration`]. + /// + /// # Panics + /// + /// This function will panic if it can't write to the file. + pub async fn save_to_file(&self, config_path: &str) { let settings = self.settings.read().await; let toml_string = toml::to_string(&*settings).expect("Could not encode TOML value"); drop(settings); - fs::write("config.toml", toml_string).expect("Could not write to file!"); - Ok(()) + fs::write(config_path, toml_string).expect("Could not write to file!"); } - pub async fn update_settings(&self, new_settings: TorrustConfig) -> Result<(), ()> { - let mut settings = self.settings.write().await; - *settings = new_settings; - - drop(settings); - - let _ = self.save_to_file().await; + pub async fn get_all(&self) -> TorrustIndex { + let settings_lock = self.settings.read().await; - Ok(()) + settings_lock.clone() } -} -impl Configuration { pub async fn get_public(&self) -> ConfigurationPublic { let settings_lock = self.settings.read().await; @@ -181,15 +496,29 @@ impl Configuration { website_name: settings_lock.website.name.clone(), tracker_url: settings_lock.tracker.url.clone(), tracker_mode: settings_lock.tracker.mode.clone(), - email_on_signup: settings_lock.auth.email_on_signup.clone() + email_on_signup: settings_lock.auth.email_on_signup.clone(), } } + + pub async fn get_site_name(&self) -> String { + let settings_lock = self.settings.read().await; + + settings_lock.website.name.clone() + } + + pub async fn get_api_base_url(&self) -> Option { + let settings_lock = self.settings.read().await; + + settings_lock.net.base_url.clone() + } } +/// The public index configuration. +/// There is an endpoint to get this configuration. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ConfigurationPublic { website_name: String, tracker_url: String, tracker_mode: TrackerMode, - email_on_signup: EmailOnSignup + email_on_signup: EmailOnSignup, } diff --git a/src/console/commands/import_tracker_statistics.rs b/src/console/commands/import_tracker_statistics.rs new file mode 100644 index 00000000..08acbb31 --- /dev/null +++ b/src/console/commands/import_tracker_statistics.rs @@ -0,0 +1,119 @@ +//! It imports statistics for all torrents from the linked tracker. +//! +//! It imports the number of seeders and leechers for all torrents from the +//! associated tracker. +//! +//! You can execute it with: `cargo run --bin import_tracker_statistics`. +//! +//! After running it you will see the following output: +//! +//! ```text +//! Importing statistics from linked tracker ... +//! Loading configuration from config file `./config.toml` +//! Tracker url: udp://localhost:6969 +//! ``` +//! +//! Statistics are also imported: +//! +//! - Periodically by the importer job. The importer job is executed every hour +//! by default. See [`TrackerStatisticsImporter`](crate::config::TrackerStatisticsImporter) +//! for more details. +//! - When a new torrent is added. +//! - When the API returns data about a torrent statistics are collected from +//! the tracker in real time. +use std::env; +use std::sync::Arc; + +use derive_more::{Display, Error}; +use text_colorizer::Colorize; + +use crate::bootstrap::config::initialize_configuration; +use crate::bootstrap::logging; +use crate::databases::database; +use crate::tracker::service::Service; +use crate::tracker::statistics_importer::StatisticsImporter; + +const NUMBER_OF_ARGUMENTS: usize = 0; + +#[derive(Debug, Display, PartialEq, Error)] +#[allow(dead_code)] +pub enum ImportError { + #[display(fmt = "internal server error")] + WrongNumberOfArgumentsError, +} + +fn parse_args() -> Result<(), ImportError> { + let args: Vec = env::args().skip(1).collect(); + + if args.len() != NUMBER_OF_ARGUMENTS { + eprintln!( + "{} wrong number of arguments: expected {}, got {}", + "Error".red().bold(), + NUMBER_OF_ARGUMENTS, + args.len() + ); + print_usage(); + return Err(ImportError::WrongNumberOfArgumentsError); + } + + Ok(()) +} + +fn print_usage() { + eprintln!( + "{} - imports torrents statistics from linked tracker. + + cargo run --bin import_tracker_statistics + + ", + "Tracker Statistics Importer".green() + ); +} + +/// Import Tracker Statistics Command +/// +/// # Panics +/// +/// Panics if arguments cannot be parsed. +pub async fn run_importer() { + parse_args().expect("unable to parse command arguments"); + import().await; +} + +/// Import Command Arguments +/// +/// # Panics +/// +/// Panics if `Configuration::load_from_file` has any error. +pub async fn import() { + println!("Importing statistics from linked tracker ..."); + + let configuration = initialize_configuration(); + + let log_level = configuration.settings.read().await.log_level.clone(); + + logging::setup(&log_level); + + let cfg = Arc::new(configuration); + + let settings = cfg.settings.read().await; + + let tracker_url = settings.tracker.url.clone(); + + eprintln!("Tracker url: {}", tracker_url.green()); + + let database = Arc::new( + database::connect(&settings.database.connect_url) + .await + .expect("unable to connect to db"), + ); + + let tracker_service = Arc::new(Service::new(cfg.clone(), database.clone()).await); + let tracker_statistics_importer = + Arc::new(StatisticsImporter::new(cfg.clone(), tracker_service.clone(), database.clone()).await); + + tracker_statistics_importer + .import_all_torrents_statistics() + .await + .expect("should import all torrents statistics"); +} diff --git a/src/console/commands/mod.rs b/src/console/commands/mod.rs new file mode 100644 index 00000000..6dad4966 --- /dev/null +++ b/src/console/commands/mod.rs @@ -0,0 +1 @@ +pub mod import_tracker_statistics; diff --git a/src/console/mod.rs b/src/console/mod.rs new file mode 100644 index 00000000..82b6da3c --- /dev/null +++ b/src/console/mod.rs @@ -0,0 +1 @@ +pub mod commands; diff --git a/src/databases/database.rs b/src/databases/database.rs index c22f8202..0d6e8c3e 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -1,33 +1,63 @@ use async_trait::async_trait; -use chrono::{NaiveDateTime}; -use serde::{Serialize, Deserialize}; -use crate::databases::mysql::MysqlDatabase; -use crate::databases::sqlite::SqliteDatabase; -use crate::models::response::{TorrentsResponse}; -use crate::models::torrent::TorrentListing; -use crate::models::torrent_file::{DbTorrentInfo, Torrent, TorrentFile}; +use chrono::NaiveDateTime; +use serde::{Deserialize, Serialize}; + +use crate::databases::mysql::Mysql; +use crate::databases::sqlite::Sqlite; +use crate::models::category::CategoryId; +use crate::models::info_hash::InfoHash; +use crate::models::response::TorrentsResponse; +use crate::models::torrent::{Metadata, TorrentListing}; +use crate::models::torrent_file::{DbTorrent, Torrent, TorrentFile}; +use crate::models::torrent_tag::{TagId, TorrentTag}; use crate::models::tracker_key::TrackerKey; -use crate::models::user::{User, UserAuthentication, UserCompact, UserProfile}; - -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] -pub enum DatabaseDriver { +use crate::models::user::{User, UserAuthentication, UserCompact, UserId, UserProfile}; +use crate::services::torrent::CanonicalInfoHashGroup; + +/// Database tables to be truncated when upgrading from v1.0.0 to v2.0.0. +/// They must be in the correct order to avoid foreign key errors. +pub const TABLES_TO_TRUNCATE: &[&str] = &[ + "torrust_torrent_announce_urls", + "torrust_torrent_files", + "torrust_torrent_info", + "torrust_torrent_tag_links", + "torrust_torrent_tracker_stats", + "torrust_torrents", + "torrust_tracker_keys", + "torrust_user_authentication", + "torrust_user_bans", + "torrust_user_invitation_uses", + "torrust_user_invitations", + "torrust_user_profiles", + "torrust_user_public_keys", + "torrust_users", + "torrust_categories", + "torrust_torrent_tags", +]; + +/// Database drivers. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +pub enum Driver { Sqlite3, - Mysql + Mysql, } +/// Compact representation of torrent. #[derive(Debug, Serialize, sqlx::FromRow)] pub struct TorrentCompact { pub torrent_id: i64, pub info_hash: String, } +/// Torrent category. #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct Category { pub category_id: i64, pub name: String, - pub num_torrents: i64 + pub num_torrents: i64, } +/// Sorting options for torrents. #[derive(Clone, Copy, Debug, Deserialize)] pub enum Sorting { UploadedAsc, @@ -42,140 +72,243 @@ pub enum Sorting { SizeDesc, } +/// Database errors. #[derive(Debug)] -pub enum DatabaseError { +pub enum Error { Error, + ErrorWithText(String), + UnrecognizedDatabaseDriver, // when the db path does not start with sqlite or mysql UsernameTaken, EmailTaken, UserNotFound, CategoryAlreadyExists, CategoryNotFound, + TagAlreadyExists, + TagNotFound, TorrentNotFound, TorrentAlreadyExists, // when uploading an already uploaded info_hash TorrentTitleAlreadyExists, + TorrentInfoHashNotFound, } -pub async fn connect_database(db_driver: &DatabaseDriver, db_path: &str) -> Box { - // match &db_path.chars().collect::>() as &[char] { - // ['s', 'q', 'l', 'i', 't', 'e', ..] => { - // let db = SqliteDatabase::new(db_path).await; - // Ok(Box::new(db)) - // } - // ['m', 'y', 's', 'q', 'l', ..] => { - // let db = MysqlDatabase::new(db_path).await; - // Ok(Box::new(db)) - // } - // _ => { - // Err(()) - // } - // } - - match db_driver { - DatabaseDriver::Sqlite3 => { - let db = SqliteDatabase::new(db_path).await; - Box::new(db) - } - DatabaseDriver::Mysql => { - let db = MysqlDatabase::new(db_path).await; - Box::new(db) - } +/// Get the Driver of the Database from the Connection String +/// +/// # Errors +/// +/// This function will return an `Error::UnrecognizedDatabaseDriver` if unable to match database type. +pub fn get_driver(db_path: &str) -> Result { + match &db_path.chars().collect::>() as &[char] { + ['s', 'q', 'l', 'i', 't', 'e', ..] => Ok(Driver::Sqlite3), + ['m', 'y', 's', 'q', 'l', ..] => Ok(Driver::Mysql), + _ => Err(Error::UnrecognizedDatabaseDriver), } } +/// Connect to a database. +/// +/// # Errors +/// +/// This function will return an `Error::UnrecognizedDatabaseDriver` if unable to match database type. +pub async fn connect(db_path: &str) -> Result, Error> { + let db_driver = self::get_driver(db_path)?; + + Ok(match db_driver { + self::Driver::Sqlite3 => Box::new(Sqlite::new(db_path).await), + self::Driver::Mysql => Box::new(Mysql::new(db_path).await), + }) +} + +/// Trait for database implementations. #[async_trait] pub trait Database: Sync + Send { - // return current database driver - fn get_database_driver(&self) -> DatabaseDriver; + /// Return current database driver. + fn get_database_driver(&self) -> Driver; + + async fn new(db_path: &str) -> Self + where + Self: Sized; - // add new user and get the newly inserted user_id - async fn insert_user_and_get_id(&self, username: &str, email: &str, password: &str) -> Result; + /// Add new user and return the newly inserted `user_id`. + async fn insert_user_and_get_id(&self, username: &str, email: &str, password: &str) -> Result; - // get user profile by user_id - async fn get_user_from_id(&self, user_id: i64) -> Result; + /// Get `User` from `user_id`. + async fn get_user_from_id(&self, user_id: i64) -> Result; - // get user authentication by user_id - async fn get_user_authentication_from_id(&self, user_id: i64) -> Result; + /// Get `UserAuthentication` from `user_id`. + async fn get_user_authentication_from_id(&self, user_id: UserId) -> Result; - // get user profile by username - async fn get_user_profile_from_username(&self, username: &str) -> Result; + /// Get `UserProfile` from `username`. + async fn get_user_profile_from_username(&self, username: &str) -> Result; - // get user compact by user_id - async fn get_user_compact_from_id(&self, user_id: i64) -> Result; + /// Get `UserCompact` from `user_id`. + async fn get_user_compact_from_id(&self, user_id: i64) -> Result; - // todo: change to get all tracker keys of user, no matter if they are still valid - // get a user's tracker key + /// Get a user's `TrackerKey`. async fn get_user_tracker_key(&self, user_id: i64) -> Option; - // count users - async fn count_users(&self) -> Result; + /// Get total user count. + async fn count_users(&self) -> Result; + + /// Ban user with `user_id`, `reason` and `date_expiry`. + async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), Error>; + + /// Grant a user the administrator role. + async fn grant_admin_role(&self, user_id: i64) -> Result<(), Error>; + + /// Verify a user's email with `user_id`. + async fn verify_email(&self, user_id: i64) -> Result<(), Error>; + + /// Link a `TrackerKey` to a certain user with `user_id`. + async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), Error>; + + /// Delete user and all related user data with `user_id`. + async fn delete_user(&self, user_id: i64) -> Result<(), Error>; + + /// Add a new category and return `category_id`. + async fn insert_category_and_get_id(&self, category_name: &str) -> Result; + + /// Get `Category` from `category_id`. + async fn get_category_from_id(&self, category_id: i64) -> Result; + + /// Get `Category` from `category_name`. + async fn get_category_from_name(&self, category_name: &str) -> Result; + + /// Get all categories as `Vec`. + async fn get_categories(&self) -> Result, Error>; + + /// Delete category with `category_name`. + async fn delete_category(&self, category_name: &str) -> Result<(), Error>; + + /// Get results of a torrent search in a paginated and sorted form as `TorrentsResponse` from `search`, `categories`, `sort`, `offset` and `page_size`. + async fn get_torrents_search_sorted_paginated( + &self, + search: &Option, + categories: &Option>, + tags: &Option>, + sort: &Sorting, + offset: u64, + page_size: u8, + ) -> Result; + + /// Add new torrent and return the newly inserted `torrent_id` with `torrent`, `uploader_id`, `category_id`, `title` and `description`. + async fn insert_torrent_and_get_id( + &self, + original_info_hash: &InfoHash, + torrent: &Torrent, + uploader_id: UserId, + metadata: &Metadata, + ) -> Result; + + /// Get `Torrent` from `InfoHash`. + async fn get_torrent_from_info_hash(&self, info_hash: &InfoHash) -> Result { + let db_torrent = self.get_torrent_info_from_info_hash(info_hash).await?; + + let torrent_files = self.get_torrent_files_from_id(db_torrent.torrent_id).await?; + + let torrent_announce_urls = self.get_torrent_announce_urls_from_id(db_torrent.torrent_id).await?; + + Ok(Torrent::from_database(&db_torrent, &torrent_files, torrent_announce_urls)) + } + + /// Get `Torrent` from `torrent_id`. + async fn get_torrent_from_id(&self, torrent_id: i64) -> Result { + let db_torrent = self.get_torrent_info_from_id(torrent_id).await?; + + let torrent_files = self.get_torrent_files_from_id(torrent_id).await?; + + let torrent_announce_urls = self.get_torrent_announce_urls_from_id(torrent_id).await?; + + Ok(Torrent::from_database(&db_torrent, &torrent_files, torrent_announce_urls)) + } + + /// It returns the list of all infohashes producing the same canonical + /// infohash. + /// + /// If the original infohash was unknown, it returns the canonical infohash. + /// + /// # Errors + /// + /// Returns an error is there was a problem with the database. + async fn get_torrent_canonical_info_hash_group(&self, canonical: &InfoHash) -> Result; + + /// It returns the [`CanonicalInfoHashGroup`] the info-hash belongs to, if + /// the info-hash belongs to a group. Otherwise, returns `None`. + /// + /// # Errors + /// + /// Returns an error is there was a problem with the database. + async fn find_canonical_info_hash_for(&self, info_hash: &InfoHash) -> Result, Error>; - // todo: make DateTime struct for the date_expiry - // ban user - async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), DatabaseError>; + /// It adds a new info-hash to the canonical info-hash group. + /// + /// # Errors + /// + /// Returns an error is there was a problem with the database. + async fn add_info_hash_to_canonical_info_hash_group(&self, original: &InfoHash, canonical: &InfoHash) -> Result<(), Error>; - // give a user administrator rights - async fn grant_admin_role(&self, user_id: i64) -> Result<(), DatabaseError>; + /// Get torrent's info as `DbTorrentInfo` from `torrent_id`. + async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result; - // verify email - async fn verify_email(&self, user_id: i64) -> Result<(), DatabaseError>; + /// Get torrent's info as `DbTorrentInfo` from torrent `InfoHash`. + async fn get_torrent_info_from_info_hash(&self, info_hash: &InfoHash) -> Result; - // create a new tracker key for a certain user - async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), DatabaseError>; + /// Get all torrent's files as `Vec` from `torrent_id`. + async fn get_torrent_files_from_id(&self, torrent_id: i64) -> Result, Error>; - // delete user - async fn delete_user(&self, user_id: i64) -> Result<(), DatabaseError>; + /// Get all torrent's announce urls as `Vec>` from `torrent_id`. + async fn get_torrent_announce_urls_from_id(&self, torrent_id: i64) -> Result>, Error>; - // add new category - async fn insert_category_and_get_id(&self, category_name: &str) -> Result; + /// Get `TorrentListing` from `torrent_id`. + async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result; - // get category by id - async fn get_category_from_id(&self, id: i64) -> Result; + /// Get `TorrentListing` from `InfoHash`. + async fn get_torrent_listing_from_info_hash(&self, info_hash: &InfoHash) -> Result; - // get category by name - async fn get_category_from_name(&self, category: &str) -> Result; + /// Get all torrents as `Vec`. + async fn get_all_torrents_compact(&self) -> Result, Error>; - // get all categories - async fn get_categories(&self) -> Result, DatabaseError>; + /// Update a torrent's title with `torrent_id` and `title`. + async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), Error>; - // delete category - async fn delete_category(&self, category_name: &str) -> Result<(), DatabaseError>; + /// Update a torrent's description with `torrent_id` and `description`. + async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), Error>; - // get results of a torrent search in a paginated and sorted form - async fn get_torrents_search_sorted_paginated(&self, search: &Option, categories: &Option>, sort: &Sorting, offset: u64, page_size: u8) -> Result; + /// Update a torrent's category with `torrent_id` and `category_id`. + async fn update_torrent_category(&self, torrent_id: i64, category_id: CategoryId) -> Result<(), Error>; - // add new torrent and get the newly inserted torrent_id - async fn insert_torrent_and_get_id(&self, torrent: &Torrent, uploader_id: i64, category_id: i64, title: &str, description: &str) -> Result; + /// Add a new tag. + async fn insert_tag_and_get_id(&self, name: &str) -> Result; - // get torrent by id - async fn get_torrent_from_id(&self, torrent_id: i64) -> Result; + /// Delete a tag. + async fn delete_tag(&self, tag_id: TagId) -> Result<(), Error>; - // get torrent info by id - async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result; + /// Add a tag to torrent. + async fn add_torrent_tag_link(&self, torrent_id: i64, tag_id: TagId) -> Result<(), Error>; - // get torrent files by id - async fn get_torrent_files_from_id(&self, torrent_id: i64) -> Result, DatabaseError>; + /// Add multiple tags to a torrent at once. + async fn add_torrent_tag_links(&self, torrent_id: i64, tag_ids: &[TagId]) -> Result<(), Error>; - // get torrent announce urls by id - async fn get_torrent_announce_urls_from_id(&self, torrent_id: i64) -> Result>, DatabaseError>; + /// Remove a tag from torrent. + async fn delete_torrent_tag_link(&self, torrent_id: i64, tag_id: TagId) -> Result<(), Error>; - // get torrent listing by id - async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result; + /// Remove all tags from torrent. + async fn delete_all_torrent_tag_links(&self, torrent_id: i64) -> Result<(), Error>; - // get all torrents (torrent_id + info_hash) - async fn get_all_torrents_compact(&self) -> Result, DatabaseError>; + /// Get tag from name. + async fn get_tag_from_name(&self, name: &str) -> Result; - // update a torrent's title - async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), DatabaseError>; + /// Get all tags as `Vec`. + async fn get_tags(&self) -> Result, Error>; - // update a torrent's description - async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), DatabaseError>; + /// Get tags for `torrent_id`. + async fn get_tags_for_torrent_id(&self, torrent_id: i64) -> Result, Error>; - // update the seeders and leechers info for a particular torrent - async fn update_tracker_info(&self, torrent_id: i64, tracker_url: &str, seeders: i64, leechers: i64) -> Result<(), DatabaseError>; + /// Update the seeders and leechers info for a torrent with `torrent_id`, `tracker_url`, `seeders` and `leechers`. + async fn update_tracker_info(&self, torrent_id: i64, tracker_url: &str, seeders: i64, leechers: i64) -> Result<(), Error>; - // delete a torrent - async fn delete_torrent(&self, torrent_id: i64) -> Result<(), DatabaseError>; + /// Delete a torrent with `torrent_id`. + async fn delete_torrent(&self, torrent_id: i64) -> Result<(), Error>; - // DELETES ALL DATABASE ROWS, ONLY CALL THIS IF YOU KNOW WHAT YOU'RE DOING! - async fn delete_all_database_rows(&self) -> Result<(), DatabaseError>; + /// DELETES ALL DATABASE ROWS, ONLY CALL THIS IF YOU KNOW WHAT YOU'RE DOING! + async fn delete_all_database_rows(&self) -> Result<(), Error>; } diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 9340e821..169d99f4 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,3 @@ pub mod database; -pub mod sqlite; pub mod mysql; +pub mod sqlite; diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index c1fd6b6b..0c5175a6 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -1,25 +1,44 @@ -use sqlx::{Acquire, MySqlPool, query, query_as}; +use std::str::FromStr; +use std::time::Duration; + use async_trait::async_trait; -use chrono::{NaiveDateTime}; -use sqlx::mysql::MySqlPoolOptions; - -use crate::models::user::{User, UserAuthentication, UserCompact, UserProfile}; -use crate::models::torrent::TorrentListing; -use crate::utils::time::current_time; -use crate::models::tracker_key::{TrackerKey}; -use crate::databases::database::{Category, Database, DatabaseDriver, DatabaseError, Sorting, TorrentCompact}; -use crate::models::response::{TorrentsResponse}; -use crate::models::torrent_file::{DbTorrentInfo, Torrent, DbTorrentFile, DbTorrentAnnounceUrl, TorrentFile}; -use crate::utils::hex::bytes_to_hex; - -pub struct MysqlDatabase { - pub pool: MySqlPool +use chrono::NaiveDateTime; +use sqlx::mysql::{MySqlConnectOptions, MySqlPoolOptions}; +use sqlx::{query, query_as, Acquire, ConnectOptions, MySqlPool}; + +use super::database::TABLES_TO_TRUNCATE; +use crate::databases::database; +use crate::databases::database::{Category, Database, Driver, Sorting, TorrentCompact}; +use crate::models::category::CategoryId; +use crate::models::info_hash::InfoHash; +use crate::models::response::TorrentsResponse; +use crate::models::torrent::{Metadata, TorrentListing}; +use crate::models::torrent_file::{DbTorrent, DbTorrentAnnounceUrl, DbTorrentFile, Torrent, TorrentFile}; +use crate::models::torrent_tag::{TagId, TorrentTag}; +use crate::models::tracker_key::TrackerKey; +use crate::models::user::{User, UserAuthentication, UserCompact, UserId, UserProfile}; +use crate::services::torrent::{CanonicalInfoHashGroup, DbTorrentInfoHash}; +use crate::utils::clock; +use crate::utils::hex::from_bytes; + +pub struct Mysql { + pub pool: MySqlPool, } -impl MysqlDatabase { - pub async fn new(database_url: &str) -> Self { +#[async_trait] +impl Database for Mysql { + fn get_database_driver(&self) -> Driver { + Driver::Mysql + } + + async fn new(database_url: &str) -> Self { + let connection_options = MySqlConnectOptions::from_str(database_url) + .expect("Unable to create connection options.") + .log_statements(log::LevelFilter::Error) + .log_slow_statements(log::LevelFilter::Warn, Duration::from_secs(1)); + let db = MySqlPoolOptions::new() - .connect(database_url) + .connect_with(connection_options) .await .expect("Unable to create database pool."); @@ -28,49 +47,35 @@ impl MysqlDatabase { .await .expect("Could not run database migrations."); - Self { - pool: db - } - } -} - -#[async_trait] -impl Database for MysqlDatabase { - fn get_database_driver(&self) -> DatabaseDriver { - DatabaseDriver::Mysql + Self { pool: db } } - async fn insert_user_and_get_id(&self, username: &str, email: &str, password_hash: &str) -> Result { - + async fn insert_user_and_get_id(&self, username: &str, email: &str, password_hash: &str) -> Result { // open pool connection - let mut conn = self.pool.acquire() - .await - .map_err(|_| DatabaseError::Error)?; + let mut conn = self.pool.acquire().await.map_err(|_| database::Error::Error)?; // start db transaction - let mut tx = conn.begin() - .await - .map_err(|_| DatabaseError::Error)?; + let mut tx = conn.begin().await.map_err(|_| database::Error::Error)?; // create the user account and get the user id let user_id = query("INSERT INTO torrust_users (date_registered) VALUES (UTC_TIMESTAMP())") - .execute(&mut tx) + .execute(&mut *tx) .await .map(|v| v.last_insert_id()) - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; // add password hash for account let insert_user_auth_result = query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") .bind(user_id) .bind(password_hash) - .execute(&mut tx) + .execute(&mut *tx) .await - .map_err(|_| DatabaseError::Error); + .map_err(|_| database::Error::Error); // rollback transaction on error if let Err(e) = insert_user_auth_result { - let _ = tx.rollback().await; - return Err(e) + drop(tx.rollback().await); + return Err(e); } // add account profile details @@ -78,71 +83,76 @@ impl Database for MysqlDatabase { .bind(user_id) .bind(username) .bind(email) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| match e { sqlx::Error::Database(err) => { if err.message().contains("username") { - DatabaseError::UsernameTaken + database::Error::UsernameTaken } else if err.message().contains("email") { - DatabaseError::EmailTaken + database::Error::EmailTaken } else { - DatabaseError::Error + database::Error::Error } } - _ => DatabaseError::Error + _ => database::Error::Error }); // commit or rollback transaction and return user_id on success match insert_user_profile_result { Ok(_) => { - let _ = tx.commit().await; - Ok(user_id as i64) + drop(tx.commit().await); + Ok(i64::overflowing_add_unsigned(0, user_id).0) } Err(e) => { - let _ = tx.rollback().await; + drop(tx.rollback().await); Err(e) } } } - async fn get_user_from_id(&self, user_id: i64) -> Result { + async fn get_user_from_id(&self, user_id: i64) -> Result { query_as::<_, User>("SELECT * FROM torrust_users WHERE user_id = ?") .bind(user_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } - async fn get_user_authentication_from_id(&self, user_id: i64) -> Result { + async fn get_user_authentication_from_id(&self, user_id: UserId) -> Result { query_as::<_, UserAuthentication>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") .bind(user_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } - async fn get_user_profile_from_username(&self, username: &str) -> Result { + async fn get_user_profile_from_username(&self, username: &str) -> Result { query_as::<_, UserProfile>(r#"SELECT user_id, username, COALESCE(email, "") as email, email_verified, COALESCE(bio, "") as bio, COALESCE(avatar, "") as avatar FROM torrust_user_profiles WHERE username = ?"#) .bind(username) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } - async fn get_user_compact_from_id(&self, user_id: i64) -> Result { + async fn get_user_compact_from_id(&self, user_id: i64) -> Result { query_as::<_, UserCompact>("SELECT tu.user_id, tp.username, tu.administrator FROM torrust_users tu INNER JOIN torrust_user_profiles tp ON tu.user_id = tp.user_id WHERE tu.user_id = ?") .bind(user_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } + /// Gets User Tracker Key + /// + /// # Panics + /// + /// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. + /// (this will naturally happen in 292.5 billion years) async fn get_user_tracker_key(&self, user_id: i64) -> Option { const HOUR_IN_SECONDS: i64 = 3600; - // casting current_time() to i64 will overflow in the year 2262 - let current_time_plus_hour = (current_time() as i64) + HOUR_IN_SECONDS; + let current_time_plus_hour = i64::try_from(clock::now()).unwrap().saturating_add(HOUR_IN_SECONDS); // get tracker key that is valid for at least one hour from now query_as::<_, TrackerKey>("SELECT tracker_key AS 'key', date_expiry AS valid_until FROM torrust_tracker_keys WHERE user_id = ? AND date_expiry > ? ORDER BY date_expiry DESC") @@ -153,15 +163,15 @@ impl Database for MysqlDatabase { .ok() } - async fn count_users(&self) -> Result { + async fn count_users(&self) -> Result { query_as("SELECT COUNT(*) FROM torrust_users") .fetch_one(&self.pool) .await .map(|(v,)| v) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), DatabaseError> { + async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), database::Error> { // date needs to be in ISO 8601 format let date_expiry_string = date_expiry.format("%Y-%m-%d %H:%M:%S").to_string(); @@ -172,36 +182,40 @@ impl Database for MysqlDatabase { .execute(&self.pool) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn grant_admin_role(&self, user_id: i64) -> Result<(), DatabaseError> { + async fn grant_admin_role(&self, user_id: i64) -> Result<(), database::Error> { query("UPDATE torrust_users SET administrator = TRUE WHERE user_id = ?") .bind(user_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::UserNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::UserNotFound) + } }) } - async fn verify_email(&self, user_id: i64) -> Result<(), DatabaseError> { + async fn verify_email(&self, user_id: i64) -> Result<(), database::Error> { query("UPDATE torrust_user_profiles SET email_verified = TRUE WHERE user_id = ?") .bind(user_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::UserNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::UserNotFound) + } }) } - async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), DatabaseError> { + async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), database::Error> { let key = tracker_key.key.clone(); query("INSERT INTO torrust_tracker_keys (user_id, tracker_key, date_expiry) VALUES (?, ?, ?)") @@ -211,81 +225,95 @@ impl Database for MysqlDatabase { .execute(&self.pool) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn delete_user(&self, user_id: i64) -> Result<(), DatabaseError> { + async fn delete_user(&self, user_id: i64) -> Result<(), database::Error> { query("DELETE FROM torrust_users WHERE user_id = ?") .bind(user_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::UserNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::UserNotFound) + } }) } - async fn insert_category_and_get_id(&self, category_name: &str) -> Result { + async fn insert_category_and_get_id(&self, category_name: &str) -> Result { query("INSERT INTO torrust_categories (name) VALUES (?)") .bind(category_name) .execute(&self.pool) .await - .map(|v| v.last_insert_id() as i64) + .map(|v| i64::try_from(v.last_insert_id()).expect("last ID is larger than i64")) .map_err(|e| match e { sqlx::Error::Database(err) => { - if err.message().contains("UNIQUE") { - DatabaseError::CategoryAlreadyExists + log::error!("DB error: {:?}", err); + if err.message().contains("Duplicate entry") && err.message().contains("name") { + database::Error::CategoryAlreadyExists } else { - DatabaseError::Error + database::Error::Error } - }, - _ => DatabaseError::Error + } + _ => database::Error::Error, }) } - async fn get_category_from_id(&self, category_id: i64) -> Result { + async fn get_category_from_id(&self, category_id: i64) -> Result { query_as::<_, Category>("SELECT category_id, name, (SELECT COUNT(*) FROM torrust_torrents WHERE torrust_torrents.category_id = torrust_categories.category_id) AS num_torrents FROM torrust_categories WHERE category_id = ?") .bind(category_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::CategoryNotFound) + .map_err(|_| database::Error::CategoryNotFound) } - async fn get_category_from_name(&self, category_name: &str) -> Result { + async fn get_category_from_name(&self, category_name: &str) -> Result { query_as::<_, Category>("SELECT category_id, name, (SELECT COUNT(*) FROM torrust_torrents WHERE torrust_torrents.category_id = torrust_categories.category_id) AS num_torrents FROM torrust_categories WHERE name = ?") .bind(category_name) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::CategoryNotFound) + .map_err(|_| database::Error::CategoryNotFound) } - async fn get_categories(&self) -> Result, DatabaseError> { + async fn get_categories(&self) -> Result, database::Error> { query_as::<_, Category>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn delete_category(&self, category_name: &str) -> Result<(), DatabaseError> { + async fn delete_category(&self, category_name: &str) -> Result<(), database::Error> { query("DELETE FROM torrust_categories WHERE name = ?") .bind(category_name) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::CategoryNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::CategoryNotFound) + } }) } - // TODO: refactor this - async fn get_torrents_search_sorted_paginated(&self, search: &Option, categories: &Option>, sort: &Sorting, offset: u64, page_size: u8) -> Result { + // todo: refactor this + #[allow(clippy::too_many_lines)] + async fn get_torrents_search_sorted_paginated( + &self, + search: &Option, + categories: &Option>, + tags: &Option>, + sort: &Sorting, + offset: u64, + limit: u8, + ) -> Result { let title = match search { None => "%".to_string(), - Some(v) => format!("%{}%", v) + Some(v) => format!("%{v}%"), }; let sort_query: String = match sort { @@ -304,127 +332,202 @@ impl Database for MysqlDatabase { let category_filter_query = if let Some(c) = categories { let mut i = 0; let mut category_filters = String::new(); - for category in c.iter() { + for category in c { // don't take user input in the db query if let Ok(sanitized_category) = self.get_category_from_name(category).await { let mut str = format!("tc.name = '{}'", sanitized_category.name); - if i > 0 { str = format!(" OR {}", str); } + if i > 0 { + str = format!(" OR {str}"); + } category_filters.push_str(&str); i += 1; } } - if category_filters.len() > 0 { - format!("INNER JOIN torrust_categories tc ON tt.category_id = tc.category_id AND ({}) ", category_filters) + if category_filters.is_empty() { + String::new() } else { + format!("INNER JOIN torrust_categories tc ON tt.category_id = tc.category_id AND ({category_filters}) ") + } + } else { + String::new() + }; + + let tag_filter_query = if let Some(t) = tags { + let mut i = 0; + let mut tag_filters = String::new(); + for tag in t { + // don't take user input in the db query + if let Ok(sanitized_tag) = self.get_tag_from_name(tag).await { + let mut str = format!("tl.tag_id = '{}'", sanitized_tag.tag_id); + if i > 0 { + str = format!(" OR {str}"); + } + tag_filters.push_str(&str); + i += 1; + } + } + if tag_filters.is_empty() { String::new() + } else { + format!("INNER JOIN torrust_torrent_tag_links tl ON tt.torrent_id = tl.torrent_id AND ({tag_filters}) ") } } else { String::new() }; let mut query_string = format!( - "SELECT tt.torrent_id, tp.username AS uploader, tt.info_hash, ti.title, ti.description, tt.category_id, DATE_FORMAT(tt.date_uploaded, '%Y-%m-%d %H:%i:%s') AS date_uploaded, tt.size AS file_size, + "SELECT + tt.torrent_id, + tp.username AS uploader, + tt.info_hash, + ti.title, + ti.description, + tt.category_id, + DATE_FORMAT(tt.date_uploaded, '%Y-%m-%d %H:%i:%s') AS date_uploaded, + tt.size AS file_size, + tt.name, + tt.comment, CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, CAST(COALESCE(sum(ts.leechers),0) as signed) as leechers - FROM torrust_torrents tt {} + FROM torrust_torrents tt + {category_filter_query} + {tag_filter_query} INNER JOIN torrust_user_profiles tp ON tt.uploader_id = tp.user_id INNER JOIN torrust_torrent_info ti ON tt.torrent_id = ti.torrent_id LEFT JOIN torrust_torrent_tracker_stats ts ON tt.torrent_id = ts.torrent_id WHERE title LIKE ? - GROUP BY torrent_id", - category_filter_query + GROUP BY tt.torrent_id" ); - let count_query = format!("SELECT COUNT(*) as count FROM ({}) AS count_table", query_string); + let count_query = format!("SELECT COUNT(*) as count FROM ({query_string}) AS count_table"); - let count_result: Result = query_as(&count_query) + let count_result: Result = query_as(&count_query) .bind(title.clone()) .fetch_one(&self.pool) .await .map(|(v,)| v) - .map_err(|_| DatabaseError::Error); + .map_err(|_| database::Error::Error); let count = count_result?; - query_string = format!("{} ORDER BY {} LIMIT ?, ?", query_string, sort_query); + query_string = format!("{query_string} ORDER BY {sort_query} LIMIT ?, ?"); let res: Vec = sqlx::query_as::<_, TorrentListing>(&query_string) .bind(title) - .bind(offset as i64) - .bind(page_size) + .bind(i64::saturating_add_unsigned(0, offset)) + .bind(limit) .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; Ok(TorrentsResponse { - total: count as u32, - results: res + total: u32::try_from(count).expect("variable `count` is larger than u32"), + results: res, }) } - async fn insert_torrent_and_get_id(&self, torrent: &Torrent, uploader_id: i64, category_id: i64, title: &str, description: &str) -> Result { - let info_hash = torrent.info_hash(); + #[allow(clippy::too_many_lines)] + async fn insert_torrent_and_get_id( + &self, + original_info_hash: &InfoHash, + torrent: &Torrent, + uploader_id: UserId, + metadata: &Metadata, + ) -> Result { + let info_hash = torrent.canonical_info_hash_hex(); + let canonical_info_hash = torrent.canonical_info_hash(); // open pool connection - let mut conn = self.pool.acquire() - .await - .map_err(|_| DatabaseError::Error)?; + let mut conn = self.pool.acquire().await.map_err(|_| database::Error::Error)?; // start db transaction - let mut tx = conn.begin() - .await - .map_err(|_| DatabaseError::Error)?; + let mut tx = conn.begin().await.map_err(|_| database::Error::Error)?; // torrent file can only hold a pieces key or a root hash key: http://www.bittorrent.org/beps/bep_0030.html let (pieces, root_hash): (String, bool) = if let Some(pieces) = &torrent.info.pieces { - (bytes_to_hex(pieces.as_ref()), false) + (from_bytes(pieces.as_ref()), false) } else { - let root_hash = torrent.info.root_hash.as_ref().ok_or(DatabaseError::Error)?; + let root_hash = torrent.info.root_hash.as_ref().ok_or(database::Error::Error)?; (root_hash.to_string(), true) }; - let private = torrent.info.private.unwrap_or(0); - // add torrent - let torrent_id = query("INSERT INTO torrust_torrents (uploader_id, category_id, info_hash, size, name, pieces, piece_length, private, root_hash, date_uploaded) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, UTC_TIMESTAMP())") - .bind(uploader_id) - .bind(category_id) - .bind(info_hash) - .bind(torrent.file_size()) - .bind(torrent.info.name.to_string()) - .bind(pieces) - .bind(torrent.info.piece_length) - .bind(private) - .bind(root_hash) - .execute(&self.pool) - .await - .map(|v| v.last_insert_id() as i64) - .map_err(|e| match e { - sqlx::Error::Database(err) => { - if err.message().contains("info_hash") { - DatabaseError::TorrentAlreadyExists - } else if err.message().contains("title") { - DatabaseError::TorrentTitleAlreadyExists - } else { - DatabaseError::Error - } + let torrent_id = query( + "INSERT INTO torrust_torrents ( + uploader_id, + category_id, + info_hash, + size, + name, + pieces, + piece_length, + private, + root_hash, + `source`, + comment, + date_uploaded + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, UTC_TIMESTAMP())", + ) + .bind(uploader_id) + .bind(metadata.category_id) + .bind(info_hash.to_lowercase()) + .bind(torrent.file_size()) + .bind(torrent.info.name.to_string()) + .bind(pieces) + .bind(torrent.info.piece_length) + .bind(torrent.info.private) + .bind(root_hash) + .bind(torrent.info.source.clone()) + .bind(torrent.comment.clone()) + .execute(&mut *tx) + .await + .map(|v| i64::try_from(v.last_insert_id()).expect("last ID is larger than i64")) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + log::error!("DB error: {:?}", err); + if err.message().contains("Duplicate entry") && err.message().contains("info_hash") { + database::Error::TorrentAlreadyExists + } else { + database::Error::Error } - _ => DatabaseError::Error - })?; + } + _ => database::Error::Error, + })?; + + // add torrent canonical infohash + + let insert_info_hash_result = + query("INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) VALUES (?, ?, ?)") + .bind(original_info_hash.to_hex_string()) + .bind(canonical_info_hash.to_hex_string()) + .bind(true) + .execute(&mut *tx) + .await + .map(|_| ()) + .map_err(|err| { + log::error!("DB error: {:?}", err); + database::Error::Error + }); + + // rollback transaction on error + if let Err(e) = insert_info_hash_result { + drop(tx.rollback().await); + return Err(e); + } let insert_torrent_files_result = if let Some(length) = torrent.info.length { query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, length) VALUES (?, ?, ?)") .bind(torrent.info.md5sum.clone()) .bind(torrent_id) .bind(length) - .execute(&mut tx) + .execute(&mut *tx) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } else { let files = torrent.info.files.as_ref().unwrap(); - for file in files.iter() { + for file in files { let path = file.path.join("/"); let _ = query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, length, path) VALUES (?, ?, ?, ?)") @@ -432,9 +535,9 @@ impl Database for MysqlDatabase { .bind(torrent_id) .bind(file.length) .bind(path) - .execute(&mut tx) + .execute(&mut *tx) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; } Ok(()) @@ -442,127 +545,215 @@ impl Database for MysqlDatabase { // rollback transaction on error if let Err(e) = insert_torrent_files_result { - let _ = tx.rollback().await; - return Err(e) + drop(tx.rollback().await); + return Err(e); } - let insert_torrent_announce_urls_result: Result<(), DatabaseError> = if let Some(tracker_url) = &torrent.announce { - query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") - .bind(torrent_id) - .bind(tracker_url) - .execute(&mut tx) - .await - .map(|_| ()) - .map_err(|_| DatabaseError::Error) - } else { + let insert_torrent_announce_urls_result: Result<(), database::Error> = if let Some(announce_urls) = &torrent.announce_list + { // flatten the nested vec (this will however remove the) - let announce_urls = torrent.announce_list.clone().unwrap().into_iter().flatten().collect::>(); + let announce_urls = announce_urls.iter().flatten().collect::>(); - for tracker_url in announce_urls.iter() { - let _ = query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + for tracker_url in &announce_urls { + let () = query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") .bind(torrent_id) .bind(tracker_url) - .execute(&mut tx) + .execute(&mut *tx) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; } Ok(()) + } else { + let tracker_url = torrent.announce.as_ref().unwrap(); + + query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + .bind(torrent_id) + .bind(tracker_url) + .execute(&mut *tx) + .await + .map(|_| ()) + .map_err(|_| database::Error::Error) }; // rollback transaction on error if let Err(e) = insert_torrent_announce_urls_result { - let _ = tx.rollback().await; - return Err(e) + drop(tx.rollback().await); + return Err(e); } - let insert_torrent_info_result = query(r#"INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, NULLIF(?, ""))"#) - .bind(torrent_id) - .bind(title) - .bind(description) - .execute(&mut tx) - .await - .map_err(|e| match e { - sqlx::Error::Database(err) => { - if err.message().contains("info_hash") { - DatabaseError::TorrentAlreadyExists - } else if err.message().contains("title") { - DatabaseError::TorrentTitleAlreadyExists - } else { - DatabaseError::Error + // Insert tags + + for tag_id in &metadata.tags { + let insert_torrent_tag_result = query("INSERT INTO torrust_torrent_tag_links (torrent_id, tag_id) VALUES (?, ?)") + .bind(torrent_id) + .bind(tag_id) + .execute(&mut *tx) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string())); + + // rollback transaction on error + if let Err(e) = insert_torrent_tag_result { + drop(tx.rollback().await); + return Err(e); + } + } + + let insert_torrent_info_result = + query(r#"INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, NULLIF(?, ""))"#) + .bind(torrent_id) + .bind(metadata.title.clone()) + .bind(metadata.description.clone()) + .execute(&mut *tx) + .await + .map_err(|e| match e { + sqlx::Error::Database(err) => { + log::error!("DB error: {:?}", err); + if err.message().contains("Duplicate entry") && err.message().contains("title") { + database::Error::TorrentTitleAlreadyExists + } else { + database::Error::Error + } } - } - _ => DatabaseError::Error - }); + _ => database::Error::Error, + }); // commit or rollback transaction and return user_id on success match insert_torrent_info_result { Ok(_) => { - let _ = tx.commit().await; - Ok(torrent_id as i64) + drop(tx.commit().await); + Ok(torrent_id) } Err(e) => { - let _ = tx.rollback().await; + drop(tx.rollback().await); Err(e) } } } - async fn get_torrent_from_id(&self, torrent_id: i64) -> Result { - let torrent_info = self.get_torrent_info_from_id(torrent_id).await?; + async fn get_torrent_canonical_info_hash_group( + &self, + canonical: &InfoHash, + ) -> Result { + let db_info_hashes = query_as::<_, DbTorrentInfoHash>( + "SELECT info_hash, canonical_info_hash, original_is_known FROM torrust_torrent_info_hashes WHERE canonical_info_hash = ?", + ) + .bind(canonical.to_hex_string()) + .fetch_all(&self.pool) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; + + let info_hashes: Vec = db_info_hashes + .into_iter() + .map(|db_info_hash| { + InfoHash::from_str(&db_info_hash.info_hash) + .unwrap_or_else(|_| panic!("Invalid info-hash in database: {}", db_info_hash.info_hash)) + }) + .collect(); - let torrent_files = self.get_torrent_files_from_id(torrent_id).await?; + Ok(CanonicalInfoHashGroup { + canonical_info_hash: *canonical, + original_info_hashes: info_hashes, + }) + } - let torrent_announce_urls = self.get_torrent_announce_urls_from_id(torrent_id).await?; + async fn find_canonical_info_hash_for(&self, info_hash: &InfoHash) -> Result, database::Error> { + let maybe_db_torrent_info_hash = query_as::<_, DbTorrentInfoHash>( + "SELECT info_hash, canonical_info_hash, original_is_known FROM torrust_torrent_info_hashes WHERE info_hash = ?", + ) + .bind(info_hash.to_hex_string()) + .fetch_optional(&self.pool) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; + + match maybe_db_torrent_info_hash { + Some(db_torrent_info_hash) => Ok(Some( + InfoHash::from_str(&db_torrent_info_hash.canonical_info_hash) + .unwrap_or_else(|_| panic!("Invalid info-hash in database: {}", db_torrent_info_hash.canonical_info_hash)), + )), + None => Ok(None), + } + } - Ok(Torrent::from_db_info_files_and_announce_urls(torrent_info, torrent_files, torrent_announce_urls)) + async fn add_info_hash_to_canonical_info_hash_group( + &self, + info_hash: &InfoHash, + canonical: &InfoHash, + ) -> Result<(), database::Error> { + query("INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) VALUES (?, ?, ?)") + .bind(info_hash.to_hex_string()) + .bind(canonical.to_hex_string()) + .bind(true) + .execute(&self.pool) + .await + .map(|_| ()) + .map_err(|err| database::Error::ErrorWithText(err.to_string())) } - async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result { - query_as::<_, DbTorrentInfo>( - "SELECT name, pieces, piece_length, private, root_hash FROM torrust_torrents WHERE torrent_id = ?" - ) + async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result { + query_as::<_, DbTorrent>("SELECT * FROM torrust_torrents WHERE torrent_id = ?") .bind(torrent_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::TorrentNotFound) } - async fn get_torrent_files_from_id(&self, torrent_id: i64) -> Result, DatabaseError> { - let db_torrent_files = query_as::<_, DbTorrentFile>( - "SELECT md5sum, length, path FROM torrust_torrent_files WHERE torrent_id = ?" - ) - .bind(torrent_id) - .fetch_all(&self.pool) + async fn get_torrent_info_from_info_hash(&self, info_hash: &InfoHash) -> Result { + query_as::<_, DbTorrent>("SELECT * FROM torrust_torrents WHERE info_hash = ?") + .bind(info_hash.to_hex_string().to_lowercase()) + .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::TorrentNotFound)?; + .map_err(|_| database::Error::TorrentNotFound) + } - let torrent_files: Vec = db_torrent_files.into_iter().map(|tf| { - TorrentFile { - path: tf.path.unwrap_or("".to_string()).split('/').map(|v| v.to_string()).collect(), + async fn get_torrent_files_from_id(&self, torrent_id: i64) -> Result, database::Error> { + let db_torrent_files = + query_as::<_, DbTorrentFile>("SELECT md5sum, length, path FROM torrust_torrent_files WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + .map_err(|_| database::Error::TorrentNotFound)?; + + let torrent_files: Vec = db_torrent_files + .into_iter() + .map(|tf| TorrentFile { + path: tf + .path + .unwrap_or_default() + .split('/') + .map(std::string::ToString::to_string) + .collect(), length: tf.length, - md5sum: tf.md5sum - } - }).collect(); + md5sum: tf.md5sum, + }) + .collect(); Ok(torrent_files) } - async fn get_torrent_announce_urls_from_id(&self, torrent_id: i64) -> Result>, DatabaseError> { - query_as::<_, DbTorrentAnnounceUrl>( - "SELECT tracker_url FROM torrust_torrent_announce_urls WHERE torrent_id = ?" - ) + async fn get_torrent_announce_urls_from_id(&self, torrent_id: i64) -> Result>, database::Error> { + query_as::<_, DbTorrentAnnounceUrl>("SELECT tracker_url FROM torrust_torrent_announce_urls WHERE torrent_id = ?") .bind(torrent_id) .fetch_all(&self.pool) .await .map(|v| v.iter().map(|a| vec![a.tracker_url.to_string()]).collect()) - .map_err(|_| DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::TorrentNotFound) } - async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result { + async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result { query_as::<_, TorrentListing>( - "SELECT tt.torrent_id, tp.username AS uploader, tt.info_hash, ti.title, ti.description, tt.category_id, DATE_FORMAT(tt.date_uploaded, '%Y-%m-%d %H:%i:%s') AS date_uploaded, tt.size AS file_size, + "SELECT + tt.torrent_id, + tp.username AS uploader, + tt.info_hash, + ti.title, + ti.description, + tt.category_id, + DATE_FORMAT(tt.date_uploaded, '%Y-%m-%d %H:%i:%s') AS date_uploaded, + tt.size AS file_size, + tt.name, + tt.comment, CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, CAST(COALESCE(sum(ts.leechers),0) as signed) as leechers FROM torrust_torrents tt @@ -570,22 +761,50 @@ impl Database for MysqlDatabase { INNER JOIN torrust_torrent_info ti ON tt.torrent_id = ti.torrent_id LEFT JOIN torrust_torrent_tracker_stats ts ON tt.torrent_id = ts.torrent_id WHERE tt.torrent_id = ? - GROUP BY torrent_id" + GROUP BY torrent_id", ) - .bind(torrent_id) - .fetch_one(&self.pool) - .await - .map_err(|_| DatabaseError::TorrentNotFound) + .bind(torrent_id) + .fetch_one(&self.pool) + .await + .map_err(|_| database::Error::TorrentNotFound) + } + + async fn get_torrent_listing_from_info_hash(&self, info_hash: &InfoHash) -> Result { + query_as::<_, TorrentListing>( + "SELECT + tt.torrent_id, + tp.username AS uploader, + tt.info_hash, + ti.title, + ti.description, + tt.category_id, + DATE_FORMAT(tt.date_uploaded, '%Y-%m-%d %H:%i:%s') AS date_uploaded, + tt.size AS file_size, + tt.name, + tt.comment, + CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, + CAST(COALESCE(sum(ts.leechers),0) as signed) as leechers + FROM torrust_torrents tt + INNER JOIN torrust_user_profiles tp ON tt.uploader_id = tp.user_id + INNER JOIN torrust_torrent_info ti ON tt.torrent_id = ti.torrent_id + LEFT JOIN torrust_torrent_tracker_stats ts ON tt.torrent_id = ts.torrent_id + WHERE tt.info_hash = ? + GROUP BY torrent_id", + ) + .bind(info_hash.to_hex_string().to_lowercase()) + .fetch_one(&self.pool) + .await + .map_err(|_| database::Error::TorrentNotFound) } - async fn get_all_torrents_compact(&self) -> Result, DatabaseError> { + async fn get_all_torrents_compact(&self) -> Result, database::Error> { query_as::<_, TorrentCompact>("SELECT torrent_id, info_hash FROM torrust_torrents") .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), DatabaseError> { + async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), database::Error> { query("UPDATE torrust_torrent_info SET title = ? WHERE torrent_id = ?") .bind(title) .bind(torrent_id) @@ -593,110 +812,202 @@ impl Database for MysqlDatabase { .await .map_err(|e| match e { sqlx::Error::Database(err) => { - if err.message().contains("UNIQUE") { - DatabaseError::TorrentTitleAlreadyExists + log::error!("DB error: {:?}", err); + if err.message().contains("Duplicate entry") && err.message().contains("title") { + database::Error::TorrentTitleAlreadyExists } else { - DatabaseError::Error + database::Error::Error } } - _ => DatabaseError::Error + _ => database::Error::Error, }) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::TorrentNotFound) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } }) } - async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), DatabaseError> { + async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), database::Error> { query("UPDATE torrust_torrent_info SET description = ? WHERE torrent_id = ?") .bind(description) .bind(torrent_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } }) } - async fn update_tracker_info(&self, torrent_id: i64, tracker_url: &str, seeders: i64, leechers: i64) -> Result<(), DatabaseError> { - query("REPLACE INTO torrust_torrent_tracker_stats (torrent_id, tracker_url, seeders, leechers) VALUES (?, ?, ?, ?)") + async fn update_torrent_category(&self, torrent_id: i64, category_id: CategoryId) -> Result<(), database::Error> { + query("UPDATE torrust_torrents SET category_id = ? WHERE torrent_id = ?") + .bind(category_id) .bind(torrent_id) - .bind(tracker_url) - .bind(seeders) - .bind(leechers) .execute(&self.pool) .await - .map(|_| ()) - .map_err(|_| DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } + }) } - async fn delete_torrent(&self, torrent_id: i64) -> Result<(), DatabaseError> { - query("DELETE FROM torrust_torrents WHERE torrent_id = ?") - .bind(torrent_id) + async fn insert_tag_and_get_id(&self, name: &str) -> Result { + query("INSERT INTO torrust_torrent_tags (name) VALUES (?)") + .bind(name) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::TorrentNotFound) + .map(|v| i64::try_from(v.last_insert_id()).expect("last ID is larger than i64")) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + log::error!("DB error: {:?}", err); + if err.message().contains("Duplicate entry") && err.message().contains("name") { + database::Error::TagAlreadyExists + } else { + database::Error::Error + } + } + _ => database::Error::Error, }) } - async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { - query("DELETE FROM torrust_categories;") + async fn delete_tag(&self, tag_id: TagId) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrent_tags WHERE tag_id = ?") + .bind(tag_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|err| database::Error::ErrorWithText(err.to_string())) + } - query("DELETE FROM torrust_torrents;") + async fn add_torrent_tag_link(&self, torrent_id: i64, tag_id: TagId) -> Result<(), database::Error> { + query("INSERT INTO torrust_torrent_tag_links (torrent_id, tag_id) VALUES (?, ?)") + .bind(torrent_id) + .bind(tag_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|_| database::Error::Error) + } - query("DELETE FROM torrust_tracker_keys;") - .execute(&self.pool) + async fn add_torrent_tag_links(&self, torrent_id: i64, tag_ids: &[TagId]) -> Result<(), database::Error> { + let mut tx = self + .pool + .begin() .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; - query("DELETE FROM torrust_users;") - .execute(&self.pool) + for tag_id in tag_ids { + query("INSERT INTO torrust_torrent_tag_links (torrent_id, tag_id) VALUES (?, ?)") + .bind(torrent_id) + .bind(tag_id) + .execute(&mut *tx) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; + } + + tx.commit() .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|err| database::Error::ErrorWithText(err.to_string())) + } - query("DELETE FROM torrust_user_authentication;") + async fn delete_torrent_tag_link(&self, torrent_id: i64, tag_id: TagId) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrent_tag_links WHERE torrent_id = ? AND tag_id = ?") + .bind(torrent_id) + .bind(tag_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|_| database::Error::Error) + } - query("DELETE FROM torrust_user_bans;") + async fn delete_all_torrent_tag_links(&self, torrent_id: i64) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrent_tag_links WHERE torrent_id = ?") + .bind(torrent_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|err| database::Error::ErrorWithText(err.to_string())) + } - query("DELETE FROM torrust_user_invitations;") - .execute(&self.pool) + async fn get_tag_from_name(&self, name: &str) -> Result { + query_as::<_, TorrentTag>("SELECT tag_id, name FROM torrust_torrent_tags WHERE name = ?") + .bind(name) + .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::TagNotFound) + } - query("DELETE FROM torrust_user_profiles;") - .execute(&self.pool) + async fn get_tags(&self) -> Result, database::Error> { + query_as::<_, TorrentTag>("SELECT tag_id, name FROM torrust_torrent_tags") + .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error) + } - query("DELETE FROM torrust_torrents;") + async fn get_tags_for_torrent_id(&self, torrent_id: i64) -> Result, database::Error> { + query_as::<_, TorrentTag>( + "SELECT torrust_torrent_tags.tag_id, torrust_torrent_tags.name + FROM torrust_torrent_tags + JOIN torrust_torrent_tag_links ON torrust_torrent_tags.tag_id = torrust_torrent_tag_links.tag_id + WHERE torrust_torrent_tag_links.torrent_id = ?", + ) + .bind(torrent_id) + .fetch_all(&self.pool) + .await + .map_err(|_| database::Error::Error) + } + + async fn update_tracker_info( + &self, + torrent_id: i64, + tracker_url: &str, + seeders: i64, + leechers: i64, + ) -> Result<(), database::Error> { + query("REPLACE INTO torrust_torrent_tracker_stats (torrent_id, tracker_url, seeders, leechers) VALUES (?, ?, ?, ?)") + .bind(torrent_id) + .bind(tracker_url) + .bind(seeders) + .bind(leechers) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|_| database::Error::TorrentNotFound) + } - query("DELETE FROM torrust_user_public_keys;") + async fn delete_torrent(&self, torrent_id: i64) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrents WHERE torrent_id = ?") + .bind(torrent_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } + }) + } + + async fn delete_all_database_rows(&self) -> Result<(), database::Error> { + for table in TABLES_TO_TRUNCATE { + query(&format!("DELETE FROM {table};")) + .execute(&self.pool) + .await + .map_err(|_| database::Error::Error)?; + } Ok(()) } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 8ac8deab..02abfc24 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -1,25 +1,44 @@ -use sqlx::{Acquire, query, query_as, SqlitePool}; -use sqlx::sqlite::SqlitePoolOptions; +use std::str::FromStr; +use std::time::Duration; + use async_trait::async_trait; -use chrono::{NaiveDateTime}; - -use crate::models::torrent::TorrentListing; -use crate::utils::time::current_time; -use crate::models::tracker_key::{TrackerKey}; -use crate::databases::database::{Category, Database, DatabaseDriver, DatabaseError, Sorting, TorrentCompact}; -use crate::models::response::{TorrentsResponse}; -use crate::models::torrent_file::{DbTorrentAnnounceUrl, DbTorrentFile, DbTorrentInfo, Torrent, TorrentFile}; -use crate::models::user::{User, UserAuthentication, UserCompact, UserProfile}; -use crate::utils::hex::bytes_to_hex; - -pub struct SqliteDatabase { - pub pool: SqlitePool +use chrono::NaiveDateTime; +use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; +use sqlx::{query, query_as, Acquire, ConnectOptions, SqlitePool}; + +use super::database::TABLES_TO_TRUNCATE; +use crate::databases::database; +use crate::databases::database::{Category, Database, Driver, Sorting, TorrentCompact}; +use crate::models::category::CategoryId; +use crate::models::info_hash::InfoHash; +use crate::models::response::TorrentsResponse; +use crate::models::torrent::{Metadata, TorrentListing}; +use crate::models::torrent_file::{DbTorrent, DbTorrentAnnounceUrl, DbTorrentFile, Torrent, TorrentFile}; +use crate::models::torrent_tag::{TagId, TorrentTag}; +use crate::models::tracker_key::TrackerKey; +use crate::models::user::{User, UserAuthentication, UserCompact, UserId, UserProfile}; +use crate::services::torrent::{CanonicalInfoHashGroup, DbTorrentInfoHash}; +use crate::utils::clock; +use crate::utils::hex::from_bytes; + +pub struct Sqlite { + pub pool: SqlitePool, } -impl SqliteDatabase { - pub async fn new(database_url: &str) -> Self { +#[async_trait] +impl Database for Sqlite { + fn get_database_driver(&self) -> Driver { + Driver::Sqlite3 + } + + async fn new(database_url: &str) -> Self { + let connection_options = SqliteConnectOptions::from_str(database_url) + .expect("Unable to create connection options.") + .log_statements(log::LevelFilter::Error) + .log_slow_statements(log::LevelFilter::Warn, Duration::from_secs(1)); + let db = SqlitePoolOptions::new() - .connect(database_url) + .connect_with(connection_options) .await .expect("Unable to create database pool."); @@ -28,49 +47,36 @@ impl SqliteDatabase { .await .expect("Could not run database migrations."); - Self { - pool: db - } - } -} - -#[async_trait] -impl Database for SqliteDatabase { - fn get_database_driver(&self) -> DatabaseDriver { - DatabaseDriver::Sqlite3 + Self { pool: db } } - async fn insert_user_and_get_id(&self, username: &str, email: &str, password_hash: &str) -> Result { - + async fn insert_user_and_get_id(&self, username: &str, email: &str, password_hash: &str) -> Result { // open pool connection - let mut conn = self.pool.acquire() - .await - .map_err(|_| DatabaseError::Error)?; + let mut conn = self.pool.acquire().await.map_err(|_| database::Error::Error)?; // start db transaction - let mut tx = conn.begin() - .await - .map_err(|_| DatabaseError::Error)?; + let mut tx = conn.begin().await.map_err(|_| database::Error::Error)?; // create the user account and get the user id - let user_id = query("INSERT INTO torrust_users (date_registered) VALUES (strftime('%Y-%m-%d %H:%M:%S',DATETIME('now', 'utc')))") - .execute(&mut tx) - .await - .map(|v| v.last_insert_rowid()) - .map_err(|_| DatabaseError::Error)?; + let user_id = + query("INSERT INTO torrust_users (date_registered) VALUES (strftime('%Y-%m-%d %H:%M:%S',DATETIME('now', 'utc')))") + .execute(&mut *tx) + .await + .map(|v| v.last_insert_rowid()) + .map_err(|_| database::Error::Error)?; // add password hash for account let insert_user_auth_result = query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") .bind(user_id) .bind(password_hash) - .execute(&mut tx) + .execute(&mut *tx) .await - .map_err(|_| DatabaseError::Error); + .map_err(|_| database::Error::Error); // rollback transaction on error if let Err(e) = insert_user_auth_result { - let _ = tx.rollback().await; - return Err(e) + drop(tx.rollback().await); + return Err(e); } // add account profile details @@ -78,71 +84,71 @@ impl Database for SqliteDatabase { .bind(user_id) .bind(username) .bind(email) - .execute(&mut tx) + .execute(&mut *tx) .await .map_err(|e| match e { sqlx::Error::Database(err) => { if err.message().contains("username") { - DatabaseError::UsernameTaken + database::Error::UsernameTaken } else if err.message().contains("email") { - DatabaseError::EmailTaken + database::Error::EmailTaken } else { - DatabaseError::Error + database::Error::Error } } - _ => DatabaseError::Error + _ => database::Error::Error }); // commit or rollback transaction and return user_id on success match insert_user_profile_result { Ok(_) => { - let _ = tx.commit().await; - Ok(user_id as i64) + drop(tx.commit().await); + Ok(user_id) } Err(e) => { - let _ = tx.rollback().await; + drop(tx.rollback().await); Err(e) } } } - async fn get_user_from_id(&self, user_id: i64) -> Result { + async fn get_user_from_id(&self, user_id: i64) -> Result { query_as::<_, User>("SELECT * FROM torrust_users WHERE user_id = ?") .bind(user_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } - async fn get_user_authentication_from_id(&self, user_id: i64) -> Result { + async fn get_user_authentication_from_id(&self, user_id: UserId) -> Result { query_as::<_, UserAuthentication>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") .bind(user_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } - async fn get_user_profile_from_username(&self, username: &str) -> Result { + async fn get_user_profile_from_username(&self, username: &str) -> Result { query_as::<_, UserProfile>("SELECT * FROM torrust_user_profiles WHERE username = ?") .bind(username) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } - async fn get_user_compact_from_id(&self, user_id: i64) -> Result { + async fn get_user_compact_from_id(&self, user_id: i64) -> Result { query_as::<_, UserCompact>("SELECT tu.user_id, tp.username, tu.administrator FROM torrust_users tu INNER JOIN torrust_user_profiles tp ON tu.user_id = tp.user_id WHERE tu.user_id = ?") .bind(user_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::UserNotFound) + .map_err(|_| database::Error::UserNotFound) } async fn get_user_tracker_key(&self, user_id: i64) -> Option { const HOUR_IN_SECONDS: i64 = 3600; // casting current_time() to i64 will overflow in the year 2262 - let current_time_plus_hour = (current_time() as i64) + HOUR_IN_SECONDS; + let current_time_plus_hour = i64::try_from(clock::now()).unwrap().saturating_add(HOUR_IN_SECONDS); // get tracker key that is valid for at least one hour from now query_as::<_, TrackerKey>("SELECT tracker_key AS key, date_expiry AS valid_until FROM torrust_tracker_keys WHERE user_id = $1 AND date_expiry > $2 ORDER BY date_expiry DESC") @@ -153,15 +159,15 @@ impl Database for SqliteDatabase { .ok() } - async fn count_users(&self) -> Result { + async fn count_users(&self) -> Result { query_as("SELECT COUNT(*) FROM torrust_users") .fetch_one(&self.pool) .await .map(|(v,)| v) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), DatabaseError> { + async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), database::Error> { // date needs to be in ISO 8601 format let date_expiry_string = date_expiry.format("%Y-%m-%d %H:%M:%S").to_string(); @@ -172,32 +178,34 @@ impl Database for SqliteDatabase { .execute(&self.pool) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn grant_admin_role(&self, user_id: i64) -> Result<(), DatabaseError> { + async fn grant_admin_role(&self, user_id: i64) -> Result<(), database::Error> { query("UPDATE torrust_users SET administrator = TRUE WHERE user_id = ?") .bind(user_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::UserNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::UserNotFound) + } }) } - async fn verify_email(&self, user_id: i64) -> Result<(), DatabaseError> { + async fn verify_email(&self, user_id: i64) -> Result<(), database::Error> { query("UPDATE torrust_user_profiles SET email_verified = TRUE WHERE user_id = ?") .bind(user_id) .execute(&self.pool) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), DatabaseError> { + async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), database::Error> { let key = tracker_key.key.clone(); query("INSERT INTO torrust_tracker_keys (user_id, tracker_key, date_expiry) VALUES ($1, $2, $3)") @@ -207,23 +215,25 @@ impl Database for SqliteDatabase { .execute(&self.pool) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn delete_user(&self, user_id: i64) -> Result<(), DatabaseError> { + async fn delete_user(&self, user_id: i64) -> Result<(), database::Error> { query("DELETE FROM torrust_users WHERE user_id = ?") .bind(user_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::UserNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::UserNotFound) + } }) } - async fn insert_category_and_get_id(&self, category_name: &str) -> Result { + async fn insert_category_and_get_id(&self, category_name: &str) -> Result { query("INSERT INTO torrust_categories (name) VALUES (?)") .bind(category_name) .execute(&self.pool) @@ -231,57 +241,69 @@ impl Database for SqliteDatabase { .map(|v| v.last_insert_rowid()) .map_err(|e| match e { sqlx::Error::Database(err) => { - if err.message().contains("UNIQUE") { - DatabaseError::CategoryAlreadyExists + log::error!("DB error: {:?}", err); + if err.message().contains("UNIQUE") && err.message().contains("name") { + database::Error::CategoryAlreadyExists } else { - DatabaseError::Error + database::Error::Error } - }, - _ => DatabaseError::Error + } + _ => database::Error::Error, }) } - async fn get_category_from_id(&self, category_id: i64) -> Result { + async fn get_category_from_id(&self, category_id: i64) -> Result { query_as::<_, Category>("SELECT category_id, name, (SELECT COUNT(*) FROM torrust_torrents WHERE torrust_torrents.category_id = torrust_categories.category_id) AS num_torrents FROM torrust_categories WHERE category_id = ?") .bind(category_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::CategoryNotFound) + .map_err(|_| database::Error::CategoryNotFound) } - async fn get_category_from_name(&self, category_name: &str) -> Result { + async fn get_category_from_name(&self, category_name: &str) -> Result { query_as::<_, Category>("SELECT category_id, name, (SELECT COUNT(*) FROM torrust_torrents WHERE torrust_torrents.category_id = torrust_categories.category_id) AS num_torrents FROM torrust_categories WHERE name = ?") .bind(category_name) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::CategoryNotFound) + .map_err(|_| database::Error::CategoryNotFound) } - async fn get_categories(&self) -> Result, DatabaseError> { + async fn get_categories(&self) -> Result, database::Error> { query_as::<_, Category>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn delete_category(&self, category_name: &str) -> Result<(), DatabaseError> { + async fn delete_category(&self, category_name: &str) -> Result<(), database::Error> { query("DELETE FROM torrust_categories WHERE name = ?") .bind(category_name) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::CategoryNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::CategoryNotFound) + } }) } - // TODO: refactor this - async fn get_torrents_search_sorted_paginated(&self, search: &Option, categories: &Option>, sort: &Sorting, offset: u64, page_size: u8) -> Result { + // todo: refactor this + #[allow(clippy::too_many_lines)] + async fn get_torrents_search_sorted_paginated( + &self, + search: &Option, + categories: &Option>, + tags: &Option>, + sort: &Sorting, + offset: u64, + limit: u8, + ) -> Result { let title = match search { None => "%".to_string(), - Some(v) => format!("%{}%", v) + Some(v) => format!("%{v}%"), }; let sort_query: String = match sort { @@ -300,127 +322,202 @@ impl Database for SqliteDatabase { let category_filter_query = if let Some(c) = categories { let mut i = 0; let mut category_filters = String::new(); - for category in c.iter() { + for category in c { // don't take user input in the db query if let Ok(sanitized_category) = self.get_category_from_name(category).await { let mut str = format!("tc.name = '{}'", sanitized_category.name); - if i > 0 { str = format!(" OR {}", str); } + if i > 0 { + str = format!(" OR {str}"); + } category_filters.push_str(&str); i += 1; } } - if category_filters.len() > 0 { - format!("INNER JOIN torrust_categories tc ON tt.category_id = tc.category_id AND ({}) ", category_filters) + if category_filters.is_empty() { + String::new() } else { + format!("INNER JOIN torrust_categories tc ON tt.category_id = tc.category_id AND ({category_filters}) ") + } + } else { + String::new() + }; + + let tag_filter_query = if let Some(t) = tags { + let mut i = 0; + let mut tag_filters = String::new(); + for tag in t { + // don't take user input in the db query + if let Ok(sanitized_tag) = self.get_tag_from_name(tag).await { + let mut str = format!("tl.tag_id = '{}'", sanitized_tag.tag_id); + if i > 0 { + str = format!(" OR {str}"); + } + tag_filters.push_str(&str); + i += 1; + } + } + if tag_filters.is_empty() { String::new() + } else { + format!("INNER JOIN torrust_torrent_tag_links tl ON tt.torrent_id = tl.torrent_id AND ({tag_filters}) ") } } else { String::new() }; let mut query_string = format!( - "SELECT tt.torrent_id, tp.username AS uploader, tt.info_hash, ti.title, ti.description, tt.category_id, tt.date_uploaded, tt.size AS file_size, + "SELECT + tt.torrent_id, + tp.username AS uploader, + tt.info_hash, + ti.title, + ti.description, + tt.category_id, + tt.date_uploaded, + tt.size AS file_size, + tt.name, + tt.comment, CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, CAST(COALESCE(sum(ts.leechers),0) as signed) as leechers - FROM torrust_torrents tt {} + FROM torrust_torrents tt + {category_filter_query} + {tag_filter_query} INNER JOIN torrust_user_profiles tp ON tt.uploader_id = tp.user_id INNER JOIN torrust_torrent_info ti ON tt.torrent_id = ti.torrent_id LEFT JOIN torrust_torrent_tracker_stats ts ON tt.torrent_id = ts.torrent_id WHERE title LIKE ? - GROUP BY ts.torrent_id", - category_filter_query + GROUP BY tt.torrent_id" ); - let count_query = format!("SELECT COUNT(*) as count FROM ({}) AS count_table", query_string); + let count_query = format!("SELECT COUNT(*) as count FROM ({query_string}) AS count_table"); - let count_result: Result = query_as(&count_query) + let count_result: Result = query_as(&count_query) .bind(title.clone()) .fetch_one(&self.pool) .await .map(|(v,)| v) - .map_err(|_| DatabaseError::Error); + .map_err(|_| database::Error::Error); let count = count_result?; - query_string = format!("{} ORDER BY {} LIMIT ?, ?", query_string, sort_query); + query_string = format!("{query_string} ORDER BY {sort_query} LIMIT ?, ?"); let res: Vec = sqlx::query_as::<_, TorrentListing>(&query_string) .bind(title) - .bind(offset as i64) - .bind(page_size) + .bind(i64::saturating_add_unsigned(0, offset)) + .bind(limit) .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; Ok(TorrentsResponse { - total: count as u32, - results: res + total: u32::try_from(count).expect("variable `count` is larger than u32"), + results: res, }) } - async fn insert_torrent_and_get_id(&self, torrent: &Torrent, uploader_id: i64, category_id: i64, title: &str, description: &str) -> Result { - let info_hash = torrent.info_hash(); + #[allow(clippy::too_many_lines)] + async fn insert_torrent_and_get_id( + &self, + original_info_hash: &InfoHash, + torrent: &Torrent, + uploader_id: UserId, + metadata: &Metadata, + ) -> Result { + let info_hash = torrent.canonical_info_hash_hex(); + let canonical_info_hash = torrent.canonical_info_hash(); // open pool connection - let mut conn = self.pool.acquire() - .await - .map_err(|_| DatabaseError::Error)?; + let mut conn = self.pool.acquire().await.map_err(|_| database::Error::Error)?; // start db transaction - let mut tx = conn.begin() - .await - .map_err(|_| DatabaseError::Error)?; + let mut tx = conn.begin().await.map_err(|_| database::Error::Error)?; // torrent file can only hold a pieces key or a root hash key: http://www.bittorrent.org/beps/bep_0030.html let (pieces, root_hash): (String, bool) = if let Some(pieces) = &torrent.info.pieces { - (bytes_to_hex(pieces.as_ref()), false) + (from_bytes(pieces.as_ref()), false) } else { - let root_hash = torrent.info.root_hash.as_ref().ok_or(DatabaseError::Error)?; + let root_hash = torrent.info.root_hash.as_ref().ok_or(database::Error::Error)?; (root_hash.to_string(), true) }; - let private = torrent.info.private.unwrap_or(0); - // add torrent - let torrent_id = query("INSERT INTO torrust_torrents (uploader_id, category_id, info_hash, size, name, pieces, piece_length, private, root_hash, date_uploaded) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%Y-%m-%d %H:%M:%S',DATETIME('now', 'utc')))") - .bind(uploader_id) - .bind(category_id) - .bind(info_hash) - .bind(torrent.file_size()) - .bind(torrent.info.name.to_string()) - .bind(pieces) - .bind(torrent.info.piece_length) - .bind(private) - .bind(root_hash) - .execute(&self.pool) - .await - .map(|v| v.last_insert_rowid() as i64) - .map_err(|e| match e { - sqlx::Error::Database(err) => { - if err.message().contains("info_hash") { - DatabaseError::TorrentAlreadyExists - } else if err.message().contains("title") { - DatabaseError::TorrentTitleAlreadyExists - } else { - DatabaseError::Error - } + let torrent_id = query( + "INSERT INTO torrust_torrents ( + uploader_id, + category_id, + info_hash, + size, + name, + pieces, + piece_length, + private, + root_hash, + `source`, + comment, + date_uploaded + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%Y-%m-%d %H:%M:%S',DATETIME('now', 'utc')))", + ) + .bind(uploader_id) + .bind(metadata.category_id) + .bind(info_hash.to_lowercase()) + .bind(torrent.file_size()) + .bind(torrent.info.name.to_string()) + .bind(pieces) + .bind(torrent.info.piece_length) + .bind(torrent.info.private) + .bind(root_hash) + .bind(torrent.info.source.clone()) + .bind(torrent.comment.clone()) + .execute(&mut *tx) + .await + .map(|v| v.last_insert_rowid()) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + log::error!("DB error: {:?}", err); + if err.message().contains("UNIQUE") && err.message().contains("info_hash") { + database::Error::TorrentAlreadyExists + } else { + database::Error::Error } - _ => DatabaseError::Error - })?; + } + _ => database::Error::Error, + })?; + + // add torrent canonical infohash + + let insert_info_hash_result = + query("INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) VALUES (?, ?, ?)") + .bind(original_info_hash.to_hex_string()) + .bind(canonical_info_hash.to_hex_string()) + .bind(true) + .execute(&mut *tx) + .await + .map(|_| ()) + .map_err(|err| { + log::error!("DB error: {:?}", err); + database::Error::Error + }); + + // rollback transaction on error + if let Err(e) = insert_info_hash_result { + drop(tx.rollback().await); + return Err(e); + } let insert_torrent_files_result = if let Some(length) = torrent.info.length { query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, length) VALUES (?, ?, ?)") .bind(torrent.info.md5sum.clone()) .bind(torrent_id) .bind(length) - .execute(&mut tx) + .execute(&mut *tx) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } else { let files = torrent.info.files.as_ref().unwrap(); - for file in files.iter() { + for file in files { let path = file.path.join("/"); let _ = query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, length, path) VALUES (?, ?, ?, ?)") @@ -428,9 +525,9 @@ impl Database for SqliteDatabase { .bind(torrent_id) .bind(file.length) .bind(path) - .execute(&mut tx) + .execute(&mut *tx) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; } Ok(()) @@ -438,127 +535,214 @@ impl Database for SqliteDatabase { // rollback transaction on error if let Err(e) = insert_torrent_files_result { - let _ = tx.rollback().await; - return Err(e) + drop(tx.rollback().await); + return Err(e); } - let insert_torrent_announce_urls_result: Result<(), DatabaseError> = if let Some(tracker_url) = &torrent.announce { - query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") - .bind(torrent_id) - .bind(tracker_url) - .execute(&mut tx) - .await - .map(|_| ()) - .map_err(|_| DatabaseError::Error) - } else { + let insert_torrent_announce_urls_result: Result<(), database::Error> = if let Some(announce_urls) = &torrent.announce_list + { // flatten the nested vec (this will however remove the) - let announce_urls = torrent.announce_list.clone().unwrap().into_iter().flatten().collect::>(); + let announce_urls = announce_urls.iter().flatten().collect::>(); - for tracker_url in announce_urls.iter() { - let _ = query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + for tracker_url in &announce_urls { + let () = query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") .bind(torrent_id) .bind(tracker_url) - .execute(&mut tx) + .execute(&mut *tx) .await .map(|_| ()) - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error)?; } Ok(()) + } else { + let tracker_url = torrent.announce.as_ref().unwrap(); + + query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + .bind(torrent_id) + .bind(tracker_url) + .execute(&mut *tx) + .await + .map(|_| ()) + .map_err(|_| database::Error::Error) }; // rollback transaction on error if let Err(e) = insert_torrent_announce_urls_result { - let _ = tx.rollback().await; - return Err(e) + drop(tx.rollback().await); + return Err(e); } - let insert_torrent_info_result = query(r#"INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, NULLIF(?, ""))"#) - .bind(torrent_id) - .bind(title) - .bind(description) - .execute(&mut tx) - .await - .map_err(|e| match e { - sqlx::Error::Database(err) => { - if err.message().contains("info_hash") { - DatabaseError::TorrentAlreadyExists - } else if err.message().contains("title") { - DatabaseError::TorrentTitleAlreadyExists - } else { - DatabaseError::Error + // Insert tags + + for tag_id in &metadata.tags { + let insert_torrent_tag_result = query("INSERT INTO torrust_torrent_tag_links (torrent_id, tag_id) VALUES (?, ?)") + .bind(torrent_id) + .bind(tag_id) + .execute(&mut *tx) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string())); + + // rollback transaction on error + if let Err(e) = insert_torrent_tag_result { + drop(tx.rollback().await); + return Err(e); + } + } + + let insert_torrent_info_result = + query(r#"INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, NULLIF(?, ""))"#) + .bind(torrent_id) + .bind(metadata.title.clone()) + .bind(metadata.description.clone()) + .execute(&mut *tx) + .await + .map_err(|e| match e { + sqlx::Error::Database(err) => { + log::error!("DB error: {:?}", err); + if err.message().contains("UNIQUE") && err.message().contains("title") { + database::Error::TorrentTitleAlreadyExists + } else { + database::Error::Error + } } - } - _ => DatabaseError::Error - }); + _ => database::Error::Error, + }); // commit or rollback transaction and return user_id on success match insert_torrent_info_result { Ok(_) => { - let _ = tx.commit().await; - Ok(torrent_id as i64) + drop(tx.commit().await); + Ok(torrent_id) } Err(e) => { - let _ = tx.rollback().await; + drop(tx.rollback().await); Err(e) } } } - async fn get_torrent_from_id(&self, torrent_id: i64) -> Result { - let torrent_info = self.get_torrent_info_from_id(torrent_id).await?; + async fn get_torrent_canonical_info_hash_group( + &self, + canonical: &InfoHash, + ) -> Result { + let db_info_hashes = query_as::<_, DbTorrentInfoHash>( + "SELECT info_hash, canonical_info_hash, original_is_known FROM torrust_torrent_info_hashes WHERE canonical_info_hash = ?", + ) + .bind(canonical.to_hex_string()) + .fetch_all(&self.pool) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; + + let info_hashes: Vec = db_info_hashes + .into_iter() + .map(|db_info_hash| { + InfoHash::from_str(&db_info_hash.info_hash) + .unwrap_or_else(|_| panic!("Invalid info-hash in database: {}", db_info_hash.info_hash)) + }) + .collect(); - let torrent_files = self.get_torrent_files_from_id(torrent_id).await?; + Ok(CanonicalInfoHashGroup { + canonical_info_hash: *canonical, + original_info_hashes: info_hashes, + }) + } - let torrent_announce_urls = self.get_torrent_announce_urls_from_id(torrent_id).await?; + async fn find_canonical_info_hash_for(&self, info_hash: &InfoHash) -> Result, database::Error> { + let maybe_db_torrent_info_hash = query_as::<_, DbTorrentInfoHash>( + "SELECT info_hash, canonical_info_hash, original_is_known FROM torrust_torrent_info_hashes WHERE info_hash = ?", + ) + .bind(info_hash.to_hex_string()) + .fetch_optional(&self.pool) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; + + match maybe_db_torrent_info_hash { + Some(db_torrent_info_hash) => Ok(Some( + InfoHash::from_str(&db_torrent_info_hash.canonical_info_hash) + .unwrap_or_else(|_| panic!("Invalid info-hash in database: {}", db_torrent_info_hash.canonical_info_hash)), + )), + None => Ok(None), + } + } - Ok(Torrent::from_db_info_files_and_announce_urls(torrent_info, torrent_files, torrent_announce_urls)) + async fn add_info_hash_to_canonical_info_hash_group( + &self, + original: &InfoHash, + canonical: &InfoHash, + ) -> Result<(), database::Error> { + query("INSERT INTO torrust_torrent_info_hashes (info_hash, canonical_info_hash, original_is_known) VALUES (?, ?, ?)") + .bind(original.to_hex_string()) + .bind(canonical.to_hex_string()) + .bind(true) + .execute(&self.pool) + .await + .map(|_| ()) + .map_err(|err| database::Error::ErrorWithText(err.to_string())) } - async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result { - query_as::<_, DbTorrentInfo>( - "SELECT name, pieces, piece_length, private, root_hash FROM torrust_torrents WHERE torrent_id = ?" - ) + async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result { + query_as::<_, DbTorrent>("SELECT * FROM torrust_torrents WHERE torrent_id = ?") .bind(torrent_id) .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::TorrentNotFound) } - async fn get_torrent_files_from_id(&self, torrent_id: i64) -> Result, DatabaseError> { - let db_torrent_files = query_as::<_, DbTorrentFile>( - "SELECT md5sum, length, path FROM torrust_torrent_files WHERE torrent_id = ?" - ) - .bind(torrent_id) - .fetch_all(&self.pool) + async fn get_torrent_info_from_info_hash(&self, info_hash: &InfoHash) -> Result { + query_as::<_, DbTorrent>("SELECT * FROM torrust_torrents WHERE info_hash = ?") + .bind(info_hash.to_hex_string().to_lowercase()) + .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::TorrentNotFound)?; + .map_err(|_| database::Error::TorrentNotFound) + } - let torrent_files: Vec = db_torrent_files.into_iter().map(|tf| { - TorrentFile { - path: tf.path.unwrap_or("".to_string()).split('/').map(|v| v.to_string()).collect(), + async fn get_torrent_files_from_id(&self, torrent_id: i64) -> Result, database::Error> { + let db_torrent_files = + query_as::<_, DbTorrentFile>("SELECT md5sum, length, path FROM torrust_torrent_files WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + .map_err(|_| database::Error::TorrentNotFound)?; + + let torrent_files: Vec = db_torrent_files + .into_iter() + .map(|tf| TorrentFile { + path: tf + .path + .unwrap_or_default() + .split('/') + .map(std::string::ToString::to_string) + .collect(), length: tf.length, - md5sum: tf.md5sum - } - }).collect(); + md5sum: tf.md5sum, + }) + .collect(); Ok(torrent_files) } - async fn get_torrent_announce_urls_from_id(&self, torrent_id: i64) -> Result>, DatabaseError> { - query_as::<_, DbTorrentAnnounceUrl>( - "SELECT tracker_url FROM torrust_torrent_announce_urls WHERE torrent_id = ?" - ) + async fn get_torrent_announce_urls_from_id(&self, torrent_id: i64) -> Result>, database::Error> { + query_as::<_, DbTorrentAnnounceUrl>("SELECT tracker_url FROM torrust_torrent_announce_urls WHERE torrent_id = ?") .bind(torrent_id) .fetch_all(&self.pool) .await .map(|v| v.iter().map(|a| vec![a.tracker_url.to_string()]).collect()) - .map_err(|_| DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::TorrentNotFound) } - async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result { + async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result { query_as::<_, TorrentListing>( - "SELECT tt.torrent_id, tp.username AS uploader, tt.info_hash, ti.title, ti.description, tt.category_id, tt.date_uploaded, tt.size AS file_size, + "SELECT + tt.torrent_id, + tp.username AS uploader, + tt.info_hash, ti.title, + ti.description, + tt.category_id, + tt.date_uploaded, + tt.size AS file_size, + tt.name, + tt.comment, CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, CAST(COALESCE(sum(ts.leechers),0) as signed) as leechers FROM torrust_torrents tt @@ -566,22 +750,49 @@ impl Database for SqliteDatabase { INNER JOIN torrust_torrent_info ti ON tt.torrent_id = ti.torrent_id LEFT JOIN torrust_torrent_tracker_stats ts ON tt.torrent_id = ts.torrent_id WHERE tt.torrent_id = ? - GROUP BY ts.torrent_id" + GROUP BY ts.torrent_id", ) - .bind(torrent_id) - .fetch_one(&self.pool) - .await - .map_err(|_| DatabaseError::TorrentNotFound) + .bind(torrent_id) + .fetch_one(&self.pool) + .await + .map_err(|_| database::Error::TorrentNotFound) } - async fn get_all_torrents_compact(&self) -> Result, DatabaseError> { + async fn get_torrent_listing_from_info_hash(&self, info_hash: &InfoHash) -> Result { + query_as::<_, TorrentListing>( + "SELECT + tt.torrent_id, + tp.username AS uploader, + tt.info_hash, ti.title, + ti.description, + tt.category_id, + tt.date_uploaded, + tt.size AS file_size, + tt.name, + tt.comment, + CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, + CAST(COALESCE(sum(ts.leechers),0) as signed) as leechers + FROM torrust_torrents tt + INNER JOIN torrust_user_profiles tp ON tt.uploader_id = tp.user_id + INNER JOIN torrust_torrent_info ti ON tt.torrent_id = ti.torrent_id + LEFT JOIN torrust_torrent_tracker_stats ts ON tt.torrent_id = ts.torrent_id + WHERE tt.info_hash = ? + GROUP BY ts.torrent_id", + ) + .bind(info_hash.to_string().to_lowercase()) + .fetch_one(&self.pool) + .await + .map_err(|_| database::Error::TorrentNotFound) + } + + async fn get_all_torrents_compact(&self) -> Result, database::Error> { query_as::<_, TorrentCompact>("SELECT torrent_id, info_hash FROM torrust_torrents") .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error) + .map_err(|_| database::Error::Error) } - async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), DatabaseError> { + async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), database::Error> { query("UPDATE torrust_torrent_info SET title = $1 WHERE torrent_id = $2") .bind(title) .bind(torrent_id) @@ -589,110 +800,202 @@ impl Database for SqliteDatabase { .await .map_err(|e| match e { sqlx::Error::Database(err) => { - if err.message().contains("UNIQUE") { - DatabaseError::TorrentTitleAlreadyExists + log::error!("DB error: {:?}", err); + if err.message().contains("UNIQUE") && err.message().contains("title") { + database::Error::TorrentTitleAlreadyExists } else { - DatabaseError::Error + database::Error::Error } } - _ => DatabaseError::Error + _ => database::Error::Error, }) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::TorrentNotFound) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } }) } - async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), DatabaseError> { + async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), database::Error> { query("UPDATE torrust_torrent_info SET description = $1 WHERE torrent_id = $2") .bind(description) .bind(torrent_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } }) } - async fn update_tracker_info(&self, torrent_id: i64, tracker_url: &str, seeders: i64, leechers: i64) -> Result<(), DatabaseError> { - query("REPLACE INTO torrust_torrent_tracker_stats (torrent_id, tracker_url, seeders, leechers) VALUES ($1, $2, $3, $4)") + async fn update_torrent_category(&self, torrent_id: i64, category_id: CategoryId) -> Result<(), database::Error> { + query("UPDATE torrust_torrents SET category_id = $1 WHERE torrent_id = $2") + .bind(category_id) .bind(torrent_id) - .bind(tracker_url) - .bind(seeders) - .bind(leechers) .execute(&self.pool) .await - .map(|_| ()) - .map_err(|_| DatabaseError::TorrentNotFound) + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } + }) } - async fn delete_torrent(&self, torrent_id: i64) -> Result<(), DatabaseError> { - query("DELETE FROM torrust_torrents WHERE torrent_id = ?") - .bind(torrent_id) + async fn insert_tag_and_get_id(&self, tag_name: &str) -> Result { + query("INSERT INTO torrust_torrent_tags (name) VALUES (?)") + .bind(tag_name) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error) - .and_then(|v| if v.rows_affected() > 0 { - Ok(()) - } else { - Err(DatabaseError::TorrentNotFound) + .map(|v| v.last_insert_rowid()) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + log::error!("DB error: {:?}", err); + if err.message().contains("UNIQUE") && err.message().contains("name") { + database::Error::TagAlreadyExists + } else { + database::Error::Error + } + } + _ => database::Error::Error, }) } - async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { - query("DELETE FROM torrust_categories;") + async fn delete_tag(&self, tag_id: TagId) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrent_tags WHERE tag_id = ?") + .bind(tag_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|err| database::Error::ErrorWithText(err.to_string())) + } - query("DELETE FROM torrust_torrents;") + async fn add_torrent_tag_link(&self, torrent_id: i64, tag_id: TagId) -> Result<(), database::Error> { + query("INSERT INTO torrust_torrent_tag_links (torrent_id, tag_id) VALUES (?, ?)") + .bind(torrent_id) + .bind(tag_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|_| database::Error::Error) + } - query("DELETE FROM torrust_tracker_keys;") - .execute(&self.pool) + async fn add_torrent_tag_links(&self, torrent_id: i64, tag_ids: &[TagId]) -> Result<(), database::Error> { + let mut tx = self + .pool + .begin() .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; - query("DELETE FROM torrust_users;") - .execute(&self.pool) + for tag_id in tag_ids { + query("INSERT INTO torrust_torrent_tag_links (torrent_id, tag_id) VALUES (?, ?)") + .bind(torrent_id) + .bind(tag_id) + .execute(&mut *tx) + .await + .map_err(|err| database::Error::ErrorWithText(err.to_string()))?; + } + + tx.commit() .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|err| database::Error::ErrorWithText(err.to_string())) + } - query("DELETE FROM torrust_user_authentication;") + async fn delete_torrent_tag_link(&self, torrent_id: i64, tag_id: TagId) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrent_tag_links WHERE torrent_id = ? AND tag_id = ?") + .bind(torrent_id) + .bind(tag_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|_| database::Error::Error) + } - query("DELETE FROM torrust_user_bans;") + async fn delete_all_torrent_tag_links(&self, torrent_id: i64) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrent_tag_links WHERE torrent_id = ?") + .bind(torrent_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|err| database::Error::ErrorWithText(err.to_string())) + } - query("DELETE FROM torrust_user_invitations;") - .execute(&self.pool) + async fn get_tag_from_name(&self, name: &str) -> Result { + query_as::<_, TorrentTag>("SELECT tag_id, name FROM torrust_torrent_tags WHERE name = ?") + .bind(name) + .fetch_one(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::TagNotFound) + } - query("DELETE FROM torrust_user_profiles;") - .execute(&self.pool) + async fn get_tags(&self) -> Result, database::Error> { + query_as::<_, TorrentTag>("SELECT tag_id, name FROM torrust_torrent_tags") + .fetch_all(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error) + } - query("DELETE FROM torrust_torrents;") + async fn get_tags_for_torrent_id(&self, torrent_id: i64) -> Result, database::Error> { + query_as::<_, TorrentTag>( + "SELECT torrust_torrent_tags.tag_id, torrust_torrent_tags.name + FROM torrust_torrent_tags + JOIN torrust_torrent_tag_links ON torrust_torrent_tags.tag_id = torrust_torrent_tag_links.tag_id + WHERE torrust_torrent_tag_links.torrent_id = ?", + ) + .bind(torrent_id) + .fetch_all(&self.pool) + .await + .map_err(|_| database::Error::Error) + } + + async fn update_tracker_info( + &self, + torrent_id: i64, + tracker_url: &str, + seeders: i64, + leechers: i64, + ) -> Result<(), database::Error> { + query("REPLACE INTO torrust_torrent_tracker_stats (torrent_id, tracker_url, seeders, leechers) VALUES ($1, $2, $3, $4)") + .bind(torrent_id) + .bind(tracker_url) + .bind(seeders) + .bind(leechers) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map(|_| ()) + .map_err(|_| database::Error::TorrentNotFound) + } - query("DELETE FROM torrust_user_public_keys;") + async fn delete_torrent(&self, torrent_id: i64) -> Result<(), database::Error> { + query("DELETE FROM torrust_torrents WHERE torrent_id = ?") + .bind(torrent_id) .execute(&self.pool) .await - .map_err(|_| DatabaseError::Error)?; + .map_err(|_| database::Error::Error) + .and_then(|v| { + if v.rows_affected() > 0 { + Ok(()) + } else { + Err(database::Error::TorrentNotFound) + } + }) + } + + async fn delete_all_database_rows(&self) -> Result<(), database::Error> { + for table in TABLES_TO_TRUNCATE { + query(&format!("DELETE FROM {table};")) + .execute(&self.pool) + .await + .map_err(|_| database::Error::Error)?; + } Ok(()) } diff --git a/src/errors.rs b/src/errors.rs index eef4f851..c92a0361 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,22 +1,22 @@ use std::borrow::Cow; -use derive_more::{Display, Error}; -use actix_web::{ResponseError, HttpResponse, HttpResponseBuilder}; -use actix_web::http::{header, StatusCode}; -use serde::{Deserialize, Serialize}; use std::error; -use crate::databases::database::DatabaseError; + +use derive_more::{Display, Error}; +use hyper::StatusCode; + +use crate::databases::database; +use crate::models::torrent::MetadataError; +use crate::utils::parse_torrent::DecodeTorrentFileError; pub type ServiceResult = Result; -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum ServiceError { #[display(fmt = "internal server error")] InternalServerError, - #[display( - fmt = "This server is is closed for registration. Contact admin if this is unexpected" - )] + #[display(fmt = "This server is is closed for registration. Contact admin if this is unexpected")] ClosedForRegistration, #[display(fmt = "Email is required")] //405j @@ -96,161 +96,206 @@ pub enum ServiceError { #[display(fmt = "Only .torrent files can be uploaded.")] InvalidFileType, - #[display(fmt = "Bad request.")] - BadRequest, + #[display(fmt = "Torrent title is too short.")] + InvalidTorrentTitleLength, + + #[display(fmt = "Some mandatory metadata fields are missing.")] + MissingMandatoryMetadataFields, #[display(fmt = "Selected category does not exist.")] InvalidCategory, + #[display(fmt = "Selected tag does not exist.")] + InvalidTag, + #[display(fmt = "Unauthorized action.")] Unauthorized, #[display(fmt = "This torrent already exists in our database.")] InfoHashAlreadyExists, + #[display(fmt = "A torrent with the same canonical infohash already exists in our database.")] + CanonicalInfoHashAlreadyExists, + #[display(fmt = "This torrent title has already been used.")] TorrentTitleAlreadyExists, #[display(fmt = "Sorry, we have an error with our tracker connection.")] TrackerOffline, + #[display(fmt = "Could not whitelist torrent.")] + WhitelistingError, + #[display(fmt = "Failed to send verification email.")] FailedToSendVerificationEmail, - #[display(fmt = "Category already exists..")] - CategoryExists, -} - -#[derive(Serialize, Deserialize)] -pub struct ErrorToResponse { - pub error: String, -} - -impl ResponseError for ServiceError { - fn status_code(&self) -> StatusCode { - match self { - ServiceError::ClosedForRegistration => StatusCode::FORBIDDEN, - ServiceError::EmailInvalid => StatusCode::BAD_REQUEST, - ServiceError::NotAUrl => StatusCode::BAD_REQUEST, - ServiceError::WrongPasswordOrUsername => StatusCode::FORBIDDEN, - ServiceError::UsernameNotFound => StatusCode::NOT_FOUND, - ServiceError::UserNotFound => StatusCode::NOT_FOUND, - ServiceError::AccountNotFound => StatusCode::NOT_FOUND, - - ServiceError::ProfanityError => StatusCode::BAD_REQUEST, - ServiceError::BlacklistError => StatusCode::BAD_REQUEST, - ServiceError::UsernameCaseMappedError => StatusCode::BAD_REQUEST, - - ServiceError::PasswordTooShort => StatusCode::BAD_REQUEST, - ServiceError::PasswordTooLong => StatusCode::BAD_REQUEST, - ServiceError::PasswordsDontMatch => StatusCode::BAD_REQUEST, - - ServiceError::UsernameTaken => StatusCode::BAD_REQUEST, - ServiceError::UsernameInvalid => StatusCode::BAD_REQUEST, - ServiceError::EmailTaken => StatusCode::BAD_REQUEST, - ServiceError::EmailNotVerified => StatusCode::FORBIDDEN, - - ServiceError::TokenNotFound => StatusCode::UNAUTHORIZED, - ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, - ServiceError::TokenInvalid => StatusCode::UNAUTHORIZED, - - ServiceError::TorrentNotFound => StatusCode::BAD_REQUEST, - - ServiceError::InvalidTorrentFile => StatusCode::BAD_REQUEST, - ServiceError::InvalidTorrentPiecesLength => StatusCode::BAD_REQUEST, - ServiceError::InvalidFileType => StatusCode::BAD_REQUEST, - - ServiceError::BadRequest => StatusCode::BAD_REQUEST, - - ServiceError::InvalidCategory => StatusCode::BAD_REQUEST, - - ServiceError::Unauthorized => StatusCode::FORBIDDEN, + #[display(fmt = "Category already exists.")] + CategoryAlreadyExists, - ServiceError::InfoHashAlreadyExists => StatusCode::BAD_REQUEST, + #[display(fmt = "Category name cannot be empty.")] + CategoryNameEmpty, - ServiceError::TorrentTitleAlreadyExists => StatusCode::BAD_REQUEST, + #[display(fmt = "Tag already exists.")] + TagAlreadyExists, - ServiceError::TrackerOffline => StatusCode::INTERNAL_SERVER_ERROR, + #[display(fmt = "Tag name cannot be empty.")] + TagNameEmpty, - ServiceError::CategoryExists => StatusCode::BAD_REQUEST, + #[display(fmt = "Category not found.")] + CategoryNotFound, - _ => StatusCode::INTERNAL_SERVER_ERROR - } - } + #[display(fmt = "Tag not found.")] + TagNotFound, - fn error_response(&self) -> HttpResponse { - HttpResponseBuilder::new(self.status_code()) - .append_header((header::CONTENT_TYPE, "application/json; charset=UTF-8")) - .body( - serde_json::to_string(&ErrorToResponse { - error: self.to_string(), - }) - .unwrap(), - ) - .into() - } + #[display(fmt = "Database error.")] + DatabaseError, } impl From for ServiceError { fn from(e: sqlx::Error) -> Self { - eprintln!("{:?}", e); + eprintln!("{e:?}"); if let Some(err) = e.as_database_error() { return if err.code() == Some(Cow::from("2067")) { if err.message().contains("torrust_torrents.info_hash") { + println!("info_hash already exists {}", err.message()); ServiceError::InfoHashAlreadyExists } else { ServiceError::InternalServerError } } else { ServiceError::TorrentNotFound - } + }; } ServiceError::InternalServerError } } -impl From for ServiceError { - fn from(e: DatabaseError) -> Self { - match e { - DatabaseError::Error => ServiceError::InternalServerError, - DatabaseError::UsernameTaken => ServiceError::UsernameTaken, - DatabaseError::EmailTaken => ServiceError::EmailTaken, - DatabaseError::UserNotFound => ServiceError::UserNotFound, - DatabaseError::CategoryAlreadyExists => ServiceError::CategoryExists, - DatabaseError::CategoryNotFound => ServiceError::InvalidCategory, - DatabaseError::TorrentNotFound => ServiceError::TorrentNotFound, - DatabaseError::TorrentAlreadyExists => ServiceError::InfoHashAlreadyExists, - DatabaseError::TorrentTitleAlreadyExists => ServiceError::TorrentTitleAlreadyExists - } +impl From for ServiceError { + fn from(e: database::Error) -> Self { + map_database_error_to_service_error(&e) } } impl From for ServiceError { fn from(e: argon2::password_hash::Error) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); ServiceError::InternalServerError } } impl From for ServiceError { fn from(e: std::io::Error) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); ServiceError::InternalServerError } } impl From> for ServiceError { fn from(e: Box) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); ServiceError::InternalServerError } } impl From for ServiceError { fn from(e: serde_json::Error) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); ServiceError::InternalServerError } } + +impl From for ServiceError { + fn from(e: MetadataError) -> Self { + eprintln!("{e}"); + match e { + MetadataError::MissingTorrentTitle => ServiceError::MissingMandatoryMetadataFields, + MetadataError::InvalidTorrentTitleLength => ServiceError::InvalidTorrentTitleLength, + } + } +} + +impl From for ServiceError { + fn from(e: DecodeTorrentFileError) -> Self { + eprintln!("{e}"); + match e { + DecodeTorrentFileError::InvalidTorrentPiecesLength => ServiceError::InvalidTorrentTitleLength, + DecodeTorrentFileError::CannotBencodeInfoDict + | DecodeTorrentFileError::InvalidInfoDictionary + | DecodeTorrentFileError::InvalidBencodeData => ServiceError::InvalidTorrentFile, + } + } +} + +#[must_use] +pub fn http_status_code_for_service_error(error: &ServiceError) -> StatusCode { + #[allow(clippy::match_same_arms)] + match error { + ServiceError::ClosedForRegistration => StatusCode::FORBIDDEN, + ServiceError::EmailInvalid => StatusCode::BAD_REQUEST, + ServiceError::NotAUrl => StatusCode::BAD_REQUEST, + ServiceError::WrongPasswordOrUsername => StatusCode::FORBIDDEN, + ServiceError::UsernameNotFound => StatusCode::NOT_FOUND, + ServiceError::UserNotFound => StatusCode::NOT_FOUND, + ServiceError::AccountNotFound => StatusCode::NOT_FOUND, + ServiceError::ProfanityError => StatusCode::BAD_REQUEST, + ServiceError::BlacklistError => StatusCode::BAD_REQUEST, + ServiceError::UsernameCaseMappedError => StatusCode::BAD_REQUEST, + ServiceError::PasswordTooShort => StatusCode::BAD_REQUEST, + ServiceError::PasswordTooLong => StatusCode::BAD_REQUEST, + ServiceError::PasswordsDontMatch => StatusCode::BAD_REQUEST, + ServiceError::UsernameTaken => StatusCode::BAD_REQUEST, + ServiceError::UsernameInvalid => StatusCode::BAD_REQUEST, + ServiceError::EmailTaken => StatusCode::BAD_REQUEST, + ServiceError::EmailNotVerified => StatusCode::FORBIDDEN, + ServiceError::TokenNotFound => StatusCode::UNAUTHORIZED, + ServiceError::TokenExpired => StatusCode::UNAUTHORIZED, + ServiceError::TokenInvalid => StatusCode::UNAUTHORIZED, + ServiceError::TorrentNotFound => StatusCode::NOT_FOUND, + ServiceError::InvalidTorrentFile => StatusCode::BAD_REQUEST, + ServiceError::InvalidTorrentPiecesLength => StatusCode::BAD_REQUEST, + ServiceError::InvalidFileType => StatusCode::BAD_REQUEST, + ServiceError::InvalidTorrentTitleLength => StatusCode::BAD_REQUEST, + ServiceError::MissingMandatoryMetadataFields => StatusCode::BAD_REQUEST, + ServiceError::InvalidCategory => StatusCode::BAD_REQUEST, + ServiceError::InvalidTag => StatusCode::BAD_REQUEST, + ServiceError::Unauthorized => StatusCode::FORBIDDEN, + ServiceError::InfoHashAlreadyExists => StatusCode::BAD_REQUEST, + ServiceError::CanonicalInfoHashAlreadyExists => StatusCode::BAD_REQUEST, + ServiceError::TorrentTitleAlreadyExists => StatusCode::BAD_REQUEST, + ServiceError::TrackerOffline => StatusCode::INTERNAL_SERVER_ERROR, + ServiceError::CategoryNameEmpty => StatusCode::BAD_REQUEST, + ServiceError::CategoryAlreadyExists => StatusCode::BAD_REQUEST, + ServiceError::TagNameEmpty => StatusCode::BAD_REQUEST, + ServiceError::TagAlreadyExists => StatusCode::BAD_REQUEST, + ServiceError::InternalServerError => StatusCode::INTERNAL_SERVER_ERROR, + ServiceError::EmailMissing => StatusCode::NOT_FOUND, + ServiceError::FailedToSendVerificationEmail => StatusCode::INTERNAL_SERVER_ERROR, + ServiceError::WhitelistingError => StatusCode::INTERNAL_SERVER_ERROR, + ServiceError::DatabaseError => StatusCode::INTERNAL_SERVER_ERROR, + ServiceError::CategoryNotFound => StatusCode::NOT_FOUND, + ServiceError::TagNotFound => StatusCode::NOT_FOUND, + } +} + +#[must_use] +pub fn map_database_error_to_service_error(error: &database::Error) -> ServiceError { + #[allow(clippy::match_same_arms)] + match error { + database::Error::Error => ServiceError::InternalServerError, + database::Error::ErrorWithText(_) => ServiceError::InternalServerError, + database::Error::UsernameTaken => ServiceError::UsernameTaken, + database::Error::EmailTaken => ServiceError::EmailTaken, + database::Error::UserNotFound => ServiceError::UserNotFound, + database::Error::CategoryAlreadyExists => ServiceError::CategoryAlreadyExists, + database::Error::CategoryNotFound => ServiceError::InvalidCategory, + database::Error::TagAlreadyExists => ServiceError::TagAlreadyExists, + database::Error::TagNotFound => ServiceError::InvalidTag, + database::Error::TorrentNotFound => ServiceError::TorrentNotFound, + database::Error::TorrentAlreadyExists => ServiceError::InfoHashAlreadyExists, + database::Error::TorrentTitleAlreadyExists => ServiceError::TorrentTitleAlreadyExists, + database::Error::UnrecognizedDatabaseDriver => ServiceError::InternalServerError, + database::Error::TorrentInfoHashNotFound => ServiceError::TorrentNotFound, + } +} diff --git a/src/lib.rs b/src/lib.rs index a4f34c34..397dc04f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,34 +1,298 @@ -pub mod routes; -pub mod models; -pub mod utils; +//! Documentation for [Torrust Tracker Index](https://github.com/torrust/torrust-index) API. +//! +//! This is the index API for [Torrust Tracker Index](https://github.com/torrust/torrust-index). +//! +//! It is written in Rust and uses the [Axum](https://github.com/tokio-rs/axum) framework. It is designed to be +//! used with by the [Torrust Tracker Index Gui](https://github.com/torrust/torrust-index-gui). +//! +//! If you are looking for information on how to use the API, please see the +//! [API v1](crate::web::api::v1) section of the documentation. +//! +//! # Table of contents +//! +//! - [Features](#features) +//! - [Services](#services) +//! - [Installation](#installation) +//! - [Minimum requirements](#minimum-requirements) +//! - [Prerequisites](#prerequisites) +//! - [Install from sources](#install-from-sources) +//! - [Run with docker](#run-with-docker) +//! - [Development](#development) +//! - [Configuration](#configuration) +//! - [Usage](#usage) +//! - [API](#api) +//! - [Tracker Statistics Importer](#tracker-statistics-importer) +//! - [Upgrader](#upgrader) +//! - [Contributing](#contributing) +//! - [Documentation](#documentation) +//! +//! # Features +//! +//! - Torrent categories +//! - Image proxy cache for torrent descriptions +//! - User registration and authentication +//! - DB Support for `SQLite` and `MySQl` +//! +//! # Services +//! +//! From the end-user perspective the Torrust Tracker exposes three different services. +//! +//! - A REST [API](crate::web::api::v1) +//! +//! From the administrator perspective, the Torrust Index exposes: +//! +//! - A console command to update torrents statistics from the associated tracker +//! - A console command to upgrade the database schema from version `1.0.0` to `2.0.0` +//! +//! # Installation +//! +//! ## Minimum requirements +//! +//! - Rust Stable `1.68` +//! +//! ## Prerequisites +//! +//! In order the run the index you will need a running torrust tracker. In the +//! configuration you need to fill the `index` section with the following: +//! +//! ```toml +//! [tracker] +//! url = "udp://localhost:6969" +//! mode = "Public" +//! api_url = "http://localhost:1212" +//! token = "MyAccessToken" +//! token_valid_seconds = 7257600 +//! ``` +//! +//! Refer to the [`config::tracker`](crate::config::Tracker) documentation for more information. +//! +//! You can follow the tracker installation instructions [here](https://docs.rs/torrust-tracker) +//! or you can use the docker to run both the tracker and the index. Refer to the +//! [Run with docker](#run-with-docker) section for more information. +//! +//! If you are using `SQLite3` as database driver, you will need to install the +//! following dependency: +//! +//! ```text +//! sudo apt-get install libsqlite3-dev +//! ``` +//! +//! > **NOTICE**: those are the commands for `Ubuntu`. If you are using a +//! different OS, you will need to install the equivalent packages. Please +//! refer to the documentation of your OS. +//! +//! With the default configuration you will need to create the `storage` directory: +//! +//! ```text +//! storage/ +//! └── database +//!    └── data.db +//! ``` +//! +//! The default configuration expects a directory `./storage/database` to be writable by the app process. +//! +//! By default the index uses `SQLite` and the database file name `data.db`. +//! +//! ## Install from sources +//! +//! ```text +//! git clone git@github.com:torrust/torrust-index.git \ +//! && cd torrust-index \ +//! && cargo build --release \ +//! && mkdir -p ./storage/database +//! ``` +//! +//! Then you can run it with: `./target/release/main` +//! +//! ## Run with docker +//! +//! You can run the index with a pre-built docker image: +//! +//! ```text +//! mkdir -p ./storage/database \ +//! && export TORRUST_IDX_BACK_USER_UID=1000 \ +//! && docker run -it \ +//! --user="$TORRUST_IDX_BACK_USER_UID" \ +//! --publish 3001:3001/tcp \ +//! --volume "$(pwd)/storage":"/app/storage" \ +//! torrust/index +//! ``` +//! +//! For more information about using docker visit the [tracker docker documentation](https://github.com/torrust/torrust-index/tree/develop/docker). +//! +//! ## Development +//! +//! We are using the [The Rust SQL Toolkit](https://github.com/launchbadge/sqlx) +//! [(sqlx)](https://github.com/launchbadge/sqlx) for database migrations. +//! +//! You can install it with: +//! +//! ```text +//! cargo install sqlx-cli +//! ``` +//! +//! To initialize the database run: +//! +//! ```text +//! echo "DATABASE_URL=sqlite://data.db?mode=rwc" > .env +//! sqlx db setup +//! ``` +//! +//! The `sqlx db setup` command will create the database specified in your +//! `DATABASE_URL` and run any pending migrations. +//! +//! > **WARNING**: The `.env` file is also used by docker-compose. +//! +//! > **NOTICE**: Refer to the [sqlx-cli](https://github.com/launchbadge/sqlx/tree/main/sqlx-cli) +//! documentation for other commands to create new migrations or run them. +//! +//! > **NOTICE**: You can run the index with [tmux](https://github.com/tmux/tmux/wiki) with `tmux new -s torrust-index`. +//! +//! # Configuration +//! In order to run the index you need to provide the configuration. If you run the index without providing the configuration, +//! the tracker will generate the default configuration the first time you run it. It will generate a `config.toml` file with +//! in the root directory. +//! +//! The default configuration is: +//! +//! ```toml +//! [website] +//! name = "Torrust" +//! +//! [tracker] +//! url = "udp://localhost:6969" +//! mode = "Public" +//! api_url = "http://localhost:1212" +//! token = "MyAccessToken" +//! token_valid_seconds = 7257600 +//! +//! [net] +//! port = 3001 +//! +//! [auth] +//! email_on_signup = "Optional" +//! min_password_length = 6 +//! max_password_length = 64 +//! secret_key = "MaxVerstappenWC2021" +//! +//! [database] +//! connect_url = "sqlite://data.db?mode=rwc" +//! +//! [mail] +//! email_verification_enabled = false +//! from = "example@email.com" +//! reply_to = "noreply@email.com" +//! username = "" +//! password = "" +//! server = "" +//! port = 25 +//! +//! [image_cache] +//! max_request_timeout_ms = 1000 +//! capacity = 128000000 +//! entry_size_limit = 4000000 +//! user_quota_period_seconds = 3600 +//! user_quota_bytes = 64000000 +//! +//! [api] +//! default_torrent_page_size = 10 +//! max_torrent_page_size = 30 +//! +//! [tracker_statistics_importer] +//! torrent_info_update_interval = 3600 +//! ``` +//! +//! For more information about configuration you can visit the documentation for the [`config`]) module. +//! +//! Alternatively to the `config.toml` file you can use one environment variable `TORRUST_IDX_BACK_CONFIG` to pass the configuration to the tracker: +//! +//! ```text +//! TORRUST_IDX_BACK_CONFIG=$(cat config.toml) +//! cargo run +//! ``` +//! +//! In the previous example you are just setting the env var with the contents of the `config.toml` file. +//! +//! The env var contains the same data as the `config.toml`. It's particularly useful in you are [running the index with docker](https://github.com/torrust/torrust-index/tree/develop/docker). +//! +//! > **NOTICE**: The `TORRUST_IDX_BACK_CONFIG` env var has priority over the `config.toml` file. +//! +//! > **NOTICE**: You can also change the location for the configuration file with the `TORRUST_IDX_BACK_CONFIG_PATH` env var. +//! +//! # Usage +//! +//! ## API +//! +//! Running the index with the default configuration will expose the REST API on port 3001: +//! +//! ## Tracker Statistics Importer +//! +//! This console command allows you to manually import the tracker statistics. +//! +//! For more information about this command you can visit the documentation for +//! the [`Import tracker statistics`](crate::console::commands::import_tracker_statistics) module. +//! +//! ## Upgrader +//! +//! This console command allows you to manually upgrade the application from one +//! version to another. +//! +//! For more information about this command you can visit the documentation for +//! the [`Upgrade app from version 1.0.0 to 2.0.0`](crate::upgrades::from_v1_0_0_to_v2_0_0::upgrader) module. +//! +//! # Contributing +//! +//! If you want to contribute to this documentation you can: +//! +//! - [Open a new discussion](https://github.com/torrust/torrust-index/discussions) +//! - [Open a new issue](https://github.com/torrust/torrust-index/issues). +//! - [Open a new pull request](https://github.com/torrust/torrust-index/pulls). +//! +//! # Documentation +//! +//! You can find this documentation on [docs.rs](https://docs.rs/torrust-index/). +//! +//! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-index/pulls). +//! +//! In addition to the production code documentation you can find a lot of +//! examples in the [tests](https://github.com/torrust/torrust-index/tree/develop/tests/e2e/contexts) directory. +pub mod app; +pub mod bootstrap; +pub mod cache; +pub mod common; pub mod config; +pub mod console; +pub mod databases; pub mod errors; -pub mod common; -pub mod auth; -pub mod tracker; pub mod mailer; -pub mod databases; +pub mod models; +pub mod services; +pub mod tracker; +pub mod ui; +pub mod upgrades; +pub mod utils; +pub mod web; trait AsCSV { fn as_csv(&self) -> Result>, ()> - where - T: std::str::FromStr; + where + T: std::str::FromStr; } impl AsCSV for Option - where - S: AsRef, +where + S: AsRef, { fn as_csv(&self) -> Result>, ()> - where - T: std::str::FromStr, + where + T: std::str::FromStr, { match self { Some(ref s) if !s.as_ref().trim().is_empty() => { let mut acc = vec![]; for s in s.as_ref().split(',') { let item = s.trim().parse::().map_err(|_| ())?; - acc.push(item) + acc.push(item); } if acc.is_empty() { Ok(None) diff --git a/src/mailer.rs b/src/mailer.rs index e71f213c..0c48acd6 100644 --- a/src/mailer.rs +++ b/src/mailer.rs @@ -1,17 +1,56 @@ -use crate::config::Configuration; +use std::collections::HashMap; use std::sync::Arc; -use crate::errors::ServiceError; -use serde::{Serialize, Deserialize}; -use lettre::{AsyncSmtpTransport, Tokio1Executor, Message, AsyncTransport}; -use lettre::transport::smtp::authentication::{Credentials, Mechanism}; + +use jsonwebtoken::{encode, EncodingKey, Header}; +use lazy_static::lazy_static; use lettre::message::{MessageBuilder, MultiPart, SinglePart}; -use jsonwebtoken::{encode, Header, EncodingKey}; -use sailfish::TemplateOnce; -use crate::utils::time::current_time; +use lettre::transport::smtp::authentication::{Credentials, Mechanism}; +use lettre::{AsyncSmtpTransport, AsyncTransport, Message, Tokio1Executor}; +use serde::{Deserialize, Serialize}; +use serde_json::value::{to_value, Value}; +use tera::{try_get_value, Context, Tera}; + +use crate::config::Configuration; +use crate::errors::ServiceError; +use crate::utils::clock; +use crate::web::api::v1::routes::API_VERSION_URL_PREFIX; + +lazy_static! { + pub static ref TEMPLATES: Tera = { + let mut tera = Tera::default(); + + match tera.add_template_file("templates/verify.html", Some("html_verify_email")) { + Ok(()) => {} + Err(e) => { + println!("Parsing error(s): {e}"); + ::std::process::exit(1); + } + }; -pub struct MailerService { + tera.autoescape_on(vec![".html", ".sql"]); + tera.register_filter("do_nothing", do_nothing_filter); + tera + }; +} + +/// This function is a dummy filter for tera. +/// +/// # Panics +/// +/// Panics if unable to convert values. +/// +/// # Errors +/// +/// This function will return an error if... +#[allow(clippy::implicit_hasher)] +pub fn do_nothing_filter(value: &Value, _: &HashMap) -> tera::Result { + let s = try_get_value!("do_nothing_filter", "value", String, value); + Ok(to_value(s).unwrap()) +} + +pub struct Service { cfg: Arc, - mailer: Arc + mailer: Arc, } #[derive(Debug, Serialize, Deserialize)] @@ -21,85 +60,60 @@ pub struct VerifyClaims { pub exp: u64, } -#[derive(TemplateOnce)] -#[template(path = "../templates/verify.html")] -struct VerifyTemplate { - username: String, - verification_url: String, -} - - -impl MailerService { - pub async fn new(cfg: Arc) -> MailerService { +impl Service { + pub async fn new(cfg: Arc) -> Service { let mailer = Arc::new(Self::get_mailer(&cfg).await); - Self { - cfg, - mailer, - } + Self { cfg, mailer } } async fn get_mailer(cfg: &Configuration) -> Mailer { let settings = cfg.settings.read().await; - let creds = Credentials::new(settings.mail.username.to_owned(), settings.mail.password.to_owned()); - - AsyncSmtpTransport::::builder_dangerous(&settings.mail.server) - .port(settings.mail.port) - .credentials(creds) - .authentication(vec![ - Mechanism::Login, - Mechanism::Xoauth2, - Mechanism::Plain, - ]) - .build() + if !settings.mail.username.is_empty() && !settings.mail.password.is_empty() { + // SMTP authentication + let creds = Credentials::new(settings.mail.username.clone(), settings.mail.password.clone()); + + AsyncSmtpTransport::::builder_dangerous(&settings.mail.server) + .port(settings.mail.port) + .credentials(creds) + .authentication(vec![Mechanism::Login, Mechanism::Xoauth2, Mechanism::Plain]) + .build() + } else { + // SMTP without authentication + AsyncSmtpTransport::::builder_dangerous(&settings.mail.server) + .port(settings.mail.port) + .build() + } } - pub async fn send_verification_mail(&self, to: &str, username: &str, user_id: i64, base_url: &str) -> Result<(), ServiceError> { + /// Send Verification Email. + /// + /// # Errors + /// + /// This function will return an error if unable to send an email. + /// + /// # Panics + /// + /// This function will panic if the multipart builder had an error. + pub async fn send_verification_mail( + &self, + to: &str, + username: &str, + user_id: i64, + base_url: &str, + ) -> Result<(), ServiceError> { let builder = self.get_builder(to).await; let verification_url = self.get_verification_url(user_id, base_url).await; - let mail_body = format!( - r#" - Welcome to Torrust, {}! - - Please click the confirmation link below to verify your account. - {} - - If this account wasn't made by you, you can ignore this email. - "#, - username, - verification_url - ); - - let ctx = VerifyTemplate { - username: String::from(username), - verification_url, - }; - - let mail = builder - .subject("Torrust - Email verification") - .multipart( - MultiPart::alternative() - .singlepart( - SinglePart::builder() - .header(lettre::message::header::ContentType::TEXT_PLAIN) - .body(mail_body) - ) - .singlepart( - SinglePart::builder() - .header(lettre::message::header::ContentType::TEXT_HTML) - .body(ctx.render_once().unwrap()) - ) - ) - .unwrap(); + let mail = build_letter(verification_url.as_str(), username, builder)?; match self.mailer.send(mail).await { Ok(_res) => Ok(()), Err(e) => { - eprintln!("Failed to send email: {}", e); + eprintln!("Failed to send email: {e}"); Err(ServiceError::FailedToSendVerificationEmail) - }, + } } } @@ -122,23 +136,84 @@ impl MailerService { let claims = VerifyClaims { iss: String::from("email-verification"), sub: user_id, - exp: current_time() + 315_569_260 // 10 years from now + exp: clock::now() + 315_569_260, // 10 years from now }; - let token = encode( - &Header::default(), - &claims, - &EncodingKey::from_secret(key), - ) - .unwrap(); + let token = encode(&Header::default(), &claims, &EncodingKey::from_secret(key)).unwrap(); - let mut base_url = base_url.clone(); + let mut base_url = &base_url.to_string(); if let Some(cfg_base_url) = &settings.net.base_url { base_url = cfg_base_url; } - format!("{}/user/email/verify/{}", base_url, token) + format!("{base_url}/{API_VERSION_URL_PREFIX}/user/email/verify/{token}") } } +fn build_letter(verification_url: &str, username: &str, builder: MessageBuilder) -> Result { + let (plain_body, html_body) = build_content(verification_url, username).map_err(|e| { + log::error!("{e}"); + ServiceError::InternalServerError + })?; + + Ok(builder + .subject("Torrust - Email verification") + .multipart( + MultiPart::alternative() + .singlepart( + SinglePart::builder() + .header(lettre::message::header::ContentType::TEXT_PLAIN) + .body(plain_body), + ) + .singlepart( + SinglePart::builder() + .header(lettre::message::header::ContentType::TEXT_HTML) + .body(html_body), + ), + ) + .expect("the `multipart` builder had an error")) +} + +fn build_content(verification_url: &str, username: &str) -> Result<(String, String), tera::Error> { + let plain_body = format!( + r#" + Welcome to Torrust, {username}! + + Please click the confirmation link below to verify your account. + {verification_url} + + If this account wasn't made by you, you can ignore this email. + "# + ); + let mut context = Context::new(); + context.insert("verification", &verification_url); + context.insert("username", &username); + let html_body = TEMPLATES.render("html_verify_email", &context)?; + Ok((plain_body, html_body)) +} + pub type Mailer = AsyncSmtpTransport; + +#[cfg(test)] +mod tests { + use lettre::Message; + + use super::{build_content, build_letter}; + + #[test] + fn it_should_build_a_letter() { + let builder = Message::builder() + .from("from@a.b.c".parse().unwrap()) + .reply_to("reply@a.b.c".parse().unwrap()) + .to("to@a.b.c".parse().unwrap()); + + let _letter = build_letter("https://a.b.c/", "user", builder).unwrap(); + } + + #[test] + fn it_should_build_content() { + let (plain_body, html_body) = build_content("https://a.b.c/", "user").unwrap(); + assert_ne!(plain_body, ""); + assert_ne!(html_body, ""); + } +} diff --git a/src/main.rs b/src/main.rs index 06304307..b09eedb6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,71 +1,16 @@ -use std::sync::Arc; -use actix_web::{App, HttpServer, middleware, web}; -use actix_cors::Cors; -use torrust_index_backend::{routes}; -use torrust_index_backend::config::{Configuration}; -use torrust_index_backend::common::AppData; -use torrust_index_backend::auth::AuthorizationService; -use torrust_index_backend::databases::database::connect_database; -use torrust_index_backend::tracker::TrackerService; -use torrust_index_backend::mailer::MailerService; +use torrust_index::app; +use torrust_index::bootstrap::config::initialize_configuration; +use torrust_index::web::api::Version; -#[actix_web::main] -async fn main() -> std::io::Result<()> { - let cfg = match Configuration::load_from_file().await { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } - }; +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + let configuration = initialize_configuration(); - let settings = cfg.settings.read().await; + let api_version = Version::V1; - let database = Arc::new(connect_database(&settings.database.db_driver, &settings.database.connect_url).await); - let auth = Arc::new(AuthorizationService::new(cfg.clone(), database.clone())); - let tracker_service = Arc::new(TrackerService::new(cfg.clone(), database.clone())); - let mailer_service = Arc::new(MailerService::new(cfg.clone()).await); - let app_data = Arc::new( - AppData::new( - cfg.clone(), - database.clone(), - auth.clone(), - tracker_service.clone(), - mailer_service.clone(), - ) - ); + let app = app::run(configuration, &api_version).await; - let interval = settings.database.torrent_info_update_interval; - let weak_tracker_service = Arc::downgrade(&tracker_service); - - // repeating task, update all seeders and leechers info - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval); - let mut interval = tokio::time::interval(interval); - interval.tick().await; // first tick is immediate... - loop { - interval.tick().await; - if let Some(tracker) = weak_tracker_service.upgrade() { - let _ = tracker.update_torrents().await; - } else { - break; - } - } - }); - - let port = settings.net.port; - - drop(settings); - - println!("Listening on 0.0.0.0:{}", port); - - HttpServer::new(move || { - App::new() - .wrap(Cors::permissive()) - .app_data(web::Data::new(app_data.clone())) - .wrap(middleware::Logger::default()) - .configure(routes::init_routes) - }) - .bind(("0.0.0.0", port))? - .run() - .await + match api_version { + Version::V1 => app.api_server.unwrap().await.expect("the API server was dropped"), + } } diff --git a/src/models/category.rs b/src/models/category.rs new file mode 100644 index 00000000..76b74f20 --- /dev/null +++ b/src/models/category.rs @@ -0,0 +1,2 @@ +#[allow(clippy::module_name_repetitions)] +pub type CategoryId = i64; diff --git a/src/models/info_hash.rs b/src/models/info_hash.rs new file mode 100644 index 00000000..0b111031 --- /dev/null +++ b/src/models/info_hash.rs @@ -0,0 +1,487 @@ +//! A `BitTorrent` `InfoHash`. It's a unique identifier for a `BitTorrent` torrent. +//! +//! "The 20-byte sha1 hash of the bencoded form of the info value +//! from the metainfo file." +//! +//! See [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! for the official specification. +//! +//! This modules provides a type that can be used to represent info-hashes. +//! +//! > **NOTICE**: It only supports Info Hash v1. +//! +//! Typically info-hashes are represented as hex strings, but internally they are +//! a 20-byte array. +//! +//! # Calculating the info-hash of a torrent file +//! +//! A sample torrent: +//! +//! - Torrent file: `mandelbrot_2048x2048_infohash_v1.png.torrent` +//! - File: `mandelbrot_2048x2048.png` +//! - Info Hash v1: `5452869be36f9f3350ccee6b4544e7e76caaadab` +//! - Sha1 hash of the info dictionary: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! A torrent file is a binary file encoded with [Bencode encoding](https://en.wikipedia.org/wiki/Bencode): +//! +//! ```text +//! 0000000: 6431 303a 6372 6561 7465 6420 6279 3138 d10:created by18 +//! 0000010: 3a71 4269 7474 6f72 7265 6e74 2076 342e :qBittorrent v4. +//! 0000020: 342e 3131 333a 6372 6561 7469 6f6e 2064 4.113:creation d +//! 0000030: 6174 6569 3136 3739 3637 3436 3238 6534 atei1679674628e4 +//! 0000040: 3a69 6e66 6f64 363a 6c65 6e67 7468 6931 :infod6:lengthi1 +//! 0000050: 3732 3230 3465 343a 6e61 6d65 3234 3a6d 72204e4:name24:m +//! 0000060: 616e 6465 6c62 726f 745f 3230 3438 7832 andelbrot_2048x2 +//! 0000070: 3034 382e 706e 6731 323a 7069 6563 6520 048.png12:piece +//! 0000080: 6c65 6e67 7468 6931 3633 3834 6536 3a70 lengthi16384e6:p +//! 0000090: 6965 6365 7332 3230 3a7d 9171 0d9d 4dba ieces220:}.q..M. +//! 00000a0: 889b 5420 54d5 2672 8d5a 863f e121 df77 ..T T.&r.Z.?.!.w +//! 00000b0: c7f7 bb6c 7796 2166 2538 c5d9 cdab 8b08 ...lw.!f%8...... +//! 00000c0: ef8c 249b b2f5 c4cd 2adf 0bc0 0cf0 addf ..$.....*....... +//! 00000d0: 7290 e5b6 414c 236c 479b 8e9f 46aa 0c0d r...AL#lG...F... +//! 00000e0: 8ed1 97ff ee68 8b5f 34a3 87d7 71c5 a6f9 .....h._4...q... +//! 00000f0: 8e2e a631 7cbd f0f9 e223 f9cc 80af 5400 ...1|....#....T. +//! 0000100: 04f9 8569 1c77 89c1 764e d6aa bf61 a6c2 ...i.w..vN...a.. +//! 0000110: 8099 abb6 5f60 2f40 a825 be32 a33d 9d07 ...._`/@.%.2.=.. +//! 0000120: 0c79 6898 d49d 6349 af20 5866 266f 986b .yh...cI. Xf&o.k +//! 0000130: 6d32 34cd 7d08 155e 1ad0 0009 57ab 303b m24.}..^....W.0; +//! 0000140: 2060 c1dc 1287 d6f3 e745 4f70 6709 3631 `.......EOpg.61 +//! 0000150: 55f2 20f6 6ca5 156f 2c89 9569 1653 817d U. .l..o,..i.S.} +//! 0000160: 31f1 b6bd 3742 cc11 0bb2 fc2b 49a5 85b6 1...7B.....+I... +//! 0000170: fc76 7444 9365 65 .vtD.ee +//! ``` +//! +//! You can generate that output with the command: +//! +//! ```text +//! xxd mandelbrot_2048x2048_infohash_v1.png.torrent +//! ``` +//! +//! And you can show only the bytes (hexadecimal): +//! +//! ```text +//! 6431303a6372656174656420627931383a71426974746f7272656e742076 +//! 342e342e3131333a6372656174696f6e2064617465693136373936373436 +//! 323865343a696e666f64363a6c656e6774686931373232303465343a6e61 +//! 6d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 +//! 323a7069656365206c656e67746869313633383465363a70696563657332 +//! 32303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c +//! 779621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 +//! e5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 +//! 8e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 +//! a6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 +//! 266f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 +//! 4f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 +//! 0bb2fc2b49a585b6fc767444936565 +//! ``` +//! +//! You can generate that output with the command: +//! +//! ```text +//! `xxd -ps mandelbrot_2048x2048_infohash_v1.png.torrent`. +//! ``` +//! +//! The same data can be represented in a JSON format: +//! +//! ```json +//! { +//! "created by": "qBittorrent v4.4.1", +//! "creation date": 1679674628, +//! "info": { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! } +//! } +//! ``` +//! +//! The JSON object was generated with: +//! +//! As you can see, there is a `info` attribute: +//! +//! ```json +//! { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! } +//! ``` +//! +//! The info-hash is the [SHA1](https://en.wikipedia.org/wiki/SHA-1) hash +//! of the `info` attribute. That is, the SHA1 hash of: +//! +//! ```text +//! 64363a6c656e6774686931373232303465343a6e61 +//! d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 +//! 23a7069656365206c656e67746869313633383465363a70696563657332 +//! 2303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c +//! 79621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 +//! 5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 +//! e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 +//! 6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 +//! 66f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 +//! f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 +//! bb2fc2b49a585b6fc7674449365 +//! ``` +//! +//! You can hash that byte string with +//! +//! > NOTICE: you need to remove the line breaks from the byte string before hashing. +//! +//! ```text +//! 64363a6c656e6774686931373232303465343a6e616d6532343a6d616e64656c62726f745f3230343878323034382e706e6731323a7069656365206c656e67746869313633383465363a7069656365733232303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c779621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290e5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f98e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61a6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866266f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e7454f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc110bb2fc2b49a585b6fc7674449365 +//! ``` +//! +//! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! The `info` dictionary can contain more fields, like the following example: +//! +//! ```json +//! { +//! "length": 172204, +//! "name": "mandelbrot_2048x2048.png", +//! "piece length": 16384, +//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" +//! "private": 1, +//! "md5sum": "e2ea6317cbdf0f9e223f9cc80af54000 +//! "source": "GGn", +//! } +//! ``` +//! +//! Refer to the struct [`TorrentInfoDictionary`](crate::models::torrent_file::TorrentInfoDictionary) for more info. +//! +//! Regarding the `source` field, it is not clear was was the initial intention +//! for that field. It could be an string to identify the source of the torrent. +//! But it has been used by private trackers to identify the tracker that +//! created the torrent and it's usually a three-char string. Refer to +//! for more info. +//! +//! The `md5sum` field is a string with the MD5 hash of the file. It seems is +//! not used by the protocol. +//! +//! Some fields are exclusive to `BitTorrent` v2. +//! +//! For the [`]BitTorrent` Version 1 specification](https://www.bittorrent.org/beps/bep_0003.html) there are two types of torrent +//! files: single file and multiple files. Some fields are only valid for one +//! type of torrent file. +//! +//! An example for a single-file torrent info dictionary: +//! +//! ```json +//! { +//! "length": 11, +//! "name": "sample.txt", +//! "piece length": 16384, +//! "pieces": "D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A" +//! } +//! ``` +//! +//! An example for a multi-file torrent info dictionary: +//! +//! ```json +//! { +//! "files": [ +//! { +//! "length": 11, +//! "path": [ +//! "sample.txt" +//! ] +//! } +//! ], +//! "name": "sample", +//! "piece length": 16384, +//! "pieces": "D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A" +//! } +//! ``` +//! +//! An example torrent creator implementation can be found [here](https://www.bittorrent.org/beps/bep_0052_torrent_creator.py). +use std::panic::Location; + +use thiserror::Error; + +/// `BitTorrent` Info Hash v1 +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash(pub [u8; 20]); + +const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + /// Returns the `InfoHash` internal byte array. + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + /// Returns the `InfoHash` as a hex string. + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + Some(self.cmp(other)) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. +#[derive(Error, Debug)] +pub enum ConversionError { + /// Not enough bytes for info-hash. An info-hash is 20 bytes. + #[error("not enough bytes for info-hash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + /// Too many bytes for info-hash. An info-hash is 20 bytes. + #[error("too many bytes for info-hash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} + +#[cfg(test)] +mod tests { + + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use super::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{info_hash}"); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_should_return_its_a_40_utf8_lowercased_char_hex_representations_as_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + assert_eq!(info_hash.to_hex_string(), "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_byte_vector() { + let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); + } + + #[test] + fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { + assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} diff --git a/src/models/mod.rs b/src/models/mod.rs index cb31379a..754bfe80 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -1,5 +1,8 @@ -pub mod user; +pub mod category; +pub mod info_hash; +pub mod response; pub mod torrent; pub mod torrent_file; -pub mod response; +pub mod torrent_tag; pub mod tracker_key; +pub mod user; diff --git a/src/models/response.rs b/src/models/response.rs index ac34a34b..7d408b79 100644 --- a/src/models/response.rs +++ b/src/models/response.rs @@ -1,22 +1,28 @@ use serde::{Deserialize, Serialize}; + +use super::torrent::TorrentId; use crate::databases::database::Category; use crate::models::torrent::TorrentListing; use crate::models::torrent_file::TorrentFile; +use crate::models::torrent_tag::TorrentTag; pub enum OkResponses { - TokenResponse(TokenResponse) + TokenResponse(TokenResponse), } +#[allow(clippy::module_name_repetitions)] #[derive(Serialize, Deserialize, Debug)] pub struct OkResponse { - pub data: T + pub data: T, } +#[allow(clippy::module_name_repetitions)] #[derive(Serialize, Deserialize, Debug)] pub struct ErrorResponse { - pub errors: Vec + pub errors: Vec, } +#[allow(clippy::module_name_repetitions)] #[derive(Serialize, Deserialize, Debug)] pub struct TokenResponse { pub token: String, @@ -24,11 +30,21 @@ pub struct TokenResponse { pub admin: bool, } +#[allow(clippy::module_name_repetitions)] #[derive(Serialize, Deserialize, Debug)] pub struct NewTorrentResponse { - pub torrent_id: i64, + pub torrent_id: TorrentId, + pub info_hash: String, +} + +#[allow(clippy::module_name_repetitions)] +#[derive(Serialize, Deserialize, Debug)] +pub struct DeletedTorrentResponse { + pub torrent_id: TorrentId, + pub info_hash: String, } +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct TorrentResponse { pub torrent_id: i64, @@ -36,7 +52,7 @@ pub struct TorrentResponse { pub info_hash: String, pub title: String, pub description: Option, - pub category: Category, + pub category: Option, pub upload_date: String, pub file_size: i64, pub seeders: i64, @@ -44,28 +60,36 @@ pub struct TorrentResponse { pub files: Vec, pub trackers: Vec, pub magnet_link: String, + pub tags: Vec, + pub name: String, + pub comment: Option, } impl TorrentResponse { - pub fn from_listing(torrent_listing: TorrentListing) -> TorrentResponse { + #[must_use] + pub fn from_listing(torrent_listing: TorrentListing, category: Option) -> TorrentResponse { TorrentResponse { torrent_id: torrent_listing.torrent_id, uploader: torrent_listing.uploader, info_hash: torrent_listing.info_hash, title: torrent_listing.title, description: torrent_listing.description, - category: Category { category_id: 0, name: "".to_string(), num_torrents: 0 }, + category, upload_date: torrent_listing.date_uploaded, file_size: torrent_listing.file_size, seeders: torrent_listing.seeders, leechers: torrent_listing.leechers, files: vec![], trackers: vec![], - magnet_link: "".to_string(), + magnet_link: String::new(), + tags: vec![], + name: torrent_listing.name, + comment: torrent_listing.comment, } } } +#[allow(clippy::module_name_repetitions)] #[derive(Serialize, Deserialize, Debug, sqlx::FromRow)] pub struct TorrentsResponse { pub total: u32, diff --git a/src/models/torrent.rs b/src/models/torrent.rs index 0395b985..1c2d10cc 100644 --- a/src/models/torrent.rs +++ b/src/models/torrent.rs @@ -1,24 +1,93 @@ +use derive_more::{Display, Error}; use serde::{Deserialize, Serialize}; -use crate::models::torrent_file::Torrent; -use crate::routes::torrent::CreateTorrent; +use super::category::CategoryId; +use super::torrent_tag::TagId; + +const MIN_TORRENT_TITLE_LENGTH: usize = 3; + +#[allow(clippy::module_name_repetitions)] +pub type TorrentId = i64; + +#[allow(clippy::module_name_repetitions)] #[allow(dead_code)] -#[derive(Debug, PartialEq, Serialize, Deserialize, sqlx::FromRow)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, sqlx::FromRow)] pub struct TorrentListing { - pub torrent_id: i64, + pub torrent_id: TorrentId, pub uploader: String, pub info_hash: String, pub title: String, pub description: Option, - pub category_id: i64, + pub category_id: Option, pub date_uploaded: String, pub file_size: i64, pub seeders: i64, pub leechers: i64, + pub name: String, + pub comment: Option, } -#[derive(Debug)] -pub struct TorrentRequest { - pub fields: CreateTorrent, - pub torrent: Torrent, +#[derive(Debug, Display, PartialEq, Eq, Error)] +pub enum MetadataError { + #[display(fmt = "Missing mandatory torrent title.")] + MissingTorrentTitle, + + #[display(fmt = "Torrent title is too short.")] + InvalidTorrentTitleLength, +} + +#[derive(Debug, Deserialize)] +pub struct Metadata { + pub title: String, + pub description: String, + pub category_id: CategoryId, + pub tags: Vec, +} + +impl Metadata { + /// Create a new struct. + /// + /// # Errors + /// + /// This function will return an error if the metadata fields do not have a + /// valid format. + pub fn new(title: &str, description: &str, category_id: CategoryId, tag_ids: &[TagId]) -> Result { + Self::validate_format(title, description, category_id, tag_ids)?; + + Ok(Self { + title: title.to_owned(), + description: description.to_owned(), + category_id, + tags: tag_ids.to_vec(), + }) + } + + /// It validates the format of the metadata fields. + /// + /// It does not validate domain rules, like: + /// + /// - Duplicate titles. + /// - Non-existing categories. + /// - ... + /// + /// # Errors + /// + /// This function will return an error if any of the metadata fields does + /// not have a valid format. + fn validate_format( + title: &str, + _description: &str, + _category_id: CategoryId, + _tag_ids: &[TagId], + ) -> Result<(), MetadataError> { + if title.is_empty() { + return Err(MetadataError::MissingTorrentTitle); + } + + if title.len() < MIN_TORRENT_TITLE_LENGTH { + return Err(MetadataError::InvalidTorrentTitleLength); + } + + Ok(()) + } } diff --git a/src/models/torrent_file.rs b/src/models/torrent_file.rs index c659b67f..98e57b92 100644 --- a/src/models/torrent_file.rs +++ b/src/models/torrent_file.rs @@ -1,27 +1,44 @@ use serde::{Deserialize, Serialize}; -use crate::config::Configuration; use serde_bencode::ser; use serde_bytes::ByteBuf; use sha1::{Digest, Sha1}; -use crate::utils::hex::{bytes_to_hex, hex_to_bytes}; -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] -pub struct TorrentNode(String, i64); +use super::info_hash::InfoHash; +use crate::utils::hex::{from_bytes, into_bytes}; #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] -pub struct TorrentFile { - pub path: Vec, - pub length: i64, +pub struct Torrent { + pub info: TorrentInfoDictionary, // #[serde(default)] - pub md5sum: Option, + pub announce: Option, + #[serde(default)] + pub nodes: Option>, + #[serde(default)] + pub encoding: Option, + #[serde(default)] + pub httpseeds: Option>, + #[serde(default)] + #[serde(rename = "announce-list")] + pub announce_list: Option>>, + #[serde(default)] + #[serde(rename = "creation date")] + pub creation_date: Option, + #[serde(default)] + pub comment: Option, + #[serde(default)] + #[serde(rename = "created by")] + pub created_by: Option, } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] -pub struct TorrentInfo { +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +pub struct TorrentNode(String, i64); + +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +pub struct TorrentInfoDictionary { pub name: String, #[serde(default)] pub pieces: Option, - #[serde(rename="piece length")] + #[serde(rename = "piece length")] pub piece_length: i64, #[serde(default)] pub md5sum: Option, @@ -34,113 +51,84 @@ pub struct TorrentInfo { #[serde(default)] pub path: Option>, #[serde(default)] - #[serde(rename="root hash")] + #[serde(rename = "root hash")] pub root_hash: Option, + #[serde(default)] + pub source: Option, } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] -pub struct Torrent { - pub info: TorrentInfo, // - #[serde(default)] - pub announce: Option, - #[serde(default)] - pub nodes: Option>, - #[serde(default)] - pub encoding: Option, - #[serde(default)] - pub httpseeds: Option>, - #[serde(default)] - #[serde(rename="announce-list")] - pub announce_list: Option>>, - #[serde(default)] - #[serde(rename="creation date")] - pub creation_date: Option, - #[serde(rename="comment")] - pub comment: Option, +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +pub struct TorrentFile { + pub path: Vec, + pub length: i64, #[serde(default)] - #[serde(rename="created by")] - pub created_by: Option, + pub md5sum: Option, } impl Torrent { - pub fn from_db_info_files_and_announce_urls(torrent_info: DbTorrentInfo, torrent_files: Vec, torrent_announce_urls: Vec>) -> Self { - let private = if let Some(private_i64) = torrent_info.private { - // must fit in a byte - let private = if (0..256).contains(&private_i64) { private_i64 } else { 0 }; - Some(private as u8) - } else { - None - }; - - // the info part of the torrent file - let mut info = TorrentInfo { - name: torrent_info.name.to_string(), - pieces: None, - piece_length: torrent_info.piece_length, - md5sum: None, - length: None, - files: None, - private, - path: None, - root_hash: None - }; - - // a torrent file has a root hash or a pieces key, but not both. - if torrent_info.root_hash > 0 { - info.root_hash = Some(torrent_info.pieces); - } else { - let pieces = hex_to_bytes(&torrent_info.pieces).unwrap(); - info.pieces = Some(ByteBuf::from(pieces)); - } - - // either set the single file or the multiple files information - if torrent_files.len() == 1 { - // can safely unwrap because we know there is 1 element - let torrent_file = torrent_files.first().unwrap(); - - info.md5sum = torrent_file.md5sum.clone(); - - info.length = Some(torrent_file.length); - - let path = if torrent_file.path.first().as_ref().unwrap().is_empty() { - None - } else { - Some(torrent_file.path.clone()) - }; - - info.path = path; - } else { - info.files = Some(torrent_files); - } + /// It hydrates a `Torrent` struct from the database data. + /// + /// # Panics + /// + /// This function will panic if the `torrent_info.pieces` is not a valid + /// hex string. + #[must_use] + pub fn from_database( + db_torrent: &DbTorrent, + torrent_files: &Vec, + torrent_announce_urls: Vec>, + ) -> Self { + let info_dict = TorrentInfoDictionary::with( + &db_torrent.name, + db_torrent.piece_length, + db_torrent.private, + db_torrent.root_hash, + &db_torrent.pieces, + torrent_files, + ); Self { - info, + info: info_dict, announce: None, nodes: None, encoding: None, httpseeds: None, announce_list: Some(torrent_announce_urls), creation_date: None, - comment: None, - created_by: None + comment: db_torrent.comment.clone(), + created_by: None, } } - pub async fn set_torrust_config(&mut self, cfg: &Configuration) { - let settings = cfg.settings.read().await; + /// Sets the announce url to the tracker url. + pub fn set_announce_to(&mut self, tracker_url: &str) { + self.announce = Some(tracker_url.to_owned()); + } - self.announce = Some(settings.tracker.url.clone()); + /// Removes all other trackers if the torrent is private. + pub fn reset_announce_list_if_private(&mut self) { + if self.is_private() { + self.announce_list = None; + } + } - // if torrent is private, remove all other trackers + fn is_private(&self) -> bool { if let Some(private) = self.info.private { if private == 1 { - self.announce_list = None; + return true; } } + false } + /// It calculates the info hash of the torrent file. + /// + /// # Panics + /// + /// This function will panic if the `info` part of the torrent file cannot be serialized. + #[must_use] pub fn calculate_info_hash_as_bytes(&self) -> [u8; 20] { - let info_bencoded = ser::to_bytes(&self.info).unwrap(); + let info_bencoded = ser::to_bytes(&self.info).expect("variable `info` was not able to be serialized."); let mut hasher = Sha1::new(); hasher.update(info_bencoded); let sum_hex = hasher.finalize(); @@ -149,47 +137,415 @@ impl Torrent { sum_bytes } - pub fn info_hash(&self) -> String { - bytes_to_hex(&self.calculate_info_hash_as_bytes()) + #[must_use] + pub fn canonical_info_hash(&self) -> InfoHash { + self.calculate_info_hash_as_bytes().into() } + #[must_use] + pub fn canonical_info_hash_hex(&self) -> String { + self.canonical_info_hash().to_hex_string() + } + + #[must_use] pub fn file_size(&self) -> i64 { - if self.info.length.is_some() { - return self.info.length.unwrap() - } else { - match &self.info.files { + match self.info.length { + Some(length) => length, + None => match &self.info.files { None => 0, Some(files) => { let mut file_size = 0; - for file in files.iter() { + for file in files { file_size += file.length; } file_size } - } + }, + } + } + + /// It returns the announce urls of the torrent file. + /// + /// # Panics + /// + /// This function will panic if both the `announce_list` and the `announce` are `None`. + #[must_use] + pub fn announce_urls(&self) -> Vec { + match &self.announce_list { + Some(list) => list.clone().into_iter().flatten().collect::>(), + None => vec![self.announce.clone().expect("variable `announce` should not be None")], } } + + #[must_use] + pub fn is_a_single_file_torrent(&self) -> bool { + self.info.is_a_single_file_torrent() + } + + #[must_use] + pub fn is_a_multiple_file_torrent(&self) -> bool { + self.info.is_a_multiple_file_torrent() + } } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] -pub struct DbTorrentFile { - pub path: Option, - pub length: i64, - #[serde(default)] - pub md5sum: Option, +impl TorrentInfoDictionary { + /// Constructor. + /// + /// # Panics + /// + /// This function will panic if: + /// + /// - The `pieces` field is not a valid hex string. + /// - For single files torrents the `TorrentFile` path is empty. + #[must_use] + pub fn with( + name: &str, + piece_length: i64, + private: Option, + root_hash: i64, + pieces: &str, + files: &Vec, + ) -> Self { + let mut info_dict = Self { + name: name.to_string(), + pieces: None, + piece_length, + md5sum: None, + length: None, + files: None, + private, + path: None, + root_hash: None, + source: None, + }; + + // a torrent file has a root hash or a pieces key, but not both. + if root_hash > 0 { + // If `root_hash` is true the `pieces` field contains the `root hash` + info_dict.root_hash = Some(pieces.to_owned()); + } else { + let buffer = into_bytes(pieces).expect("variable `torrent_info.pieces` is not a valid hex string"); + info_dict.pieces = Some(ByteBuf::from(buffer)); + } + + // either set the single file or the multiple files information + if files.len() == 1 { + let torrent_file = files + .first() + .expect("vector `torrent_files` should have at least one element"); + + info_dict.md5sum = torrent_file.md5sum.clone(); + + info_dict.length = Some(torrent_file.length); + + let path = if torrent_file + .path + .first() + .as_ref() + .expect("the vector for the `path` should have at least one element") + .is_empty() + { + None + } else { + Some(torrent_file.path.clone()) + }; + + info_dict.path = path; + } else { + info_dict.files = Some(files.clone()); + } + + info_dict + } + + /// torrent file can only hold a pieces key or a root hash key: + /// [BEP 39](http://www.bittorrent.org/beps/bep_0030.html) + #[must_use] + pub fn get_pieces_as_string(&self) -> String { + match &self.pieces { + None => String::new(), + Some(byte_buf) => from_bytes(byte_buf.as_ref()), + } + } + + /// It returns the root hash as a `i64` value. + /// + /// # Panics + /// + /// This function will panic if the root hash cannot be converted into a + /// `i64` value. + #[must_use] + pub fn get_root_hash_as_i64(&self) -> i64 { + match &self.root_hash { + None => 0i64, + Some(root_hash) => root_hash + .parse::() + .expect("variable `root_hash` cannot be converted into a `i64`"), + } + } + + #[must_use] + pub fn is_a_single_file_torrent(&self) -> bool { + self.length.is_some() + } + + #[must_use] + pub fn is_a_multiple_file_torrent(&self) -> bool { + self.files.is_some() + } } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] -pub struct DbTorrentInfo { +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] +pub struct DbTorrent { + pub torrent_id: i64, + pub info_hash: String, pub name: String, pub pieces: String, pub piece_length: i64, #[serde(default)] - pub private: Option, + pub private: Option, pub root_hash: i64, + pub comment: Option, +} + +#[allow(clippy::module_name_repetitions)] +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] +pub struct DbTorrentFile { + pub path: Option, + pub length: i64, + #[serde(default)] + pub md5sum: Option, } -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] pub struct DbTorrentAnnounceUrl { pub tracker_url: String, } + +#[cfg(test)] +mod tests { + + mod info_hash_calculation_for_version_v1 { + + use serde_bytes::ByteBuf; + + use crate::models::torrent_file::{Torrent, TorrentInfoDictionary}; + + #[test] + fn the_parsed_torrent_file_should_calculated_the_torrent_info_hash() { + /* The sample.txt content (`mandelbrot`): + + ``` + 6d616e64656c62726f740a + ``` + + The sample.txt.torrent content: + + ``` + 6431303a6372656174656420627931383a71426974746f7272656e742076 + 342e352e3431333a6372656174696f6e2064617465693136393131343935 + 373265343a696e666f64363a6c656e67746869313165343a6e616d653130 + 3a73616d706c652e74787431323a7069656365206c656e67746869313633 + 383465363a70696563657332303ad491587f1c42dff0cb0ff5c2b8cefe22 + b3ad310a6565 + ``` + + ```json + { + "created by": "qBittorrent v4.5.4", + "creation date": 1691149572, + "info": { + "length": 11, + "name": "sample.txt", + "piece length": 16384, + "pieces": "D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A" + } + } + ``` + */ + + let sample_data_in_txt_file = "mandelbrot\n"; + + let info = TorrentInfoDictionary { + name: "sample.txt".to_string(), + pieces: Some(ByteBuf::from(vec![ + // D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A // hex + 212, 145, 88, 127, 28, 66, 223, 240, 203, 15, 245, 194, 184, 206, 254, 34, 179, 173, 49, 10, // dec + ])), + piece_length: 16384, + md5sum: None, + length: Some(sample_data_in_txt_file.len().try_into().unwrap()), + files: None, + private: None, + path: None, + root_hash: None, + source: None, + }; + + let torrent = Torrent { + info: info.clone(), + announce: None, + announce_list: Some(vec![]), + creation_date: None, + comment: None, + created_by: None, + nodes: None, + encoding: None, + httpseeds: None, + }; + + assert_eq!(torrent.canonical_info_hash_hex(), "79fa9e4a2927804fe4feab488a76c8c2d3d1cdca"); + } + + mod infohash_should_be_calculated_for { + + use serde_bytes::ByteBuf; + + use crate::models::torrent_file::{Torrent, TorrentFile, TorrentInfoDictionary}; + + #[test] + fn a_simple_single_file_torrent() { + let sample_data_in_txt_file = "mandelbrot\n"; + + let info = TorrentInfoDictionary { + name: "sample.txt".to_string(), + pieces: Some(ByteBuf::from(vec![ + // D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A // hex + 212, 145, 88, 127, 28, 66, 223, 240, 203, 15, 245, 194, 184, 206, 254, 34, 179, 173, 49, 10, // dec + ])), + piece_length: 16384, + md5sum: None, + length: Some(sample_data_in_txt_file.len().try_into().unwrap()), + files: None, + private: None, + path: None, + root_hash: None, + source: None, + }; + + let torrent = Torrent { + info: info.clone(), + announce: None, + announce_list: Some(vec![]), + creation_date: None, + comment: None, + created_by: None, + nodes: None, + encoding: None, + httpseeds: None, + }; + + assert_eq!(torrent.canonical_info_hash_hex(), "79fa9e4a2927804fe4feab488a76c8c2d3d1cdca"); + } + + #[test] + fn a_simple_multi_file_torrent() { + let sample_data_in_txt_file = "mandelbrot\n"; + + let info = TorrentInfoDictionary { + name: "sample".to_string(), + pieces: Some(ByteBuf::from(vec![ + // D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A // hex + 212, 145, 88, 127, 28, 66, 223, 240, 203, 15, 245, 194, 184, 206, 254, 34, 179, 173, 49, 10, // dec + ])), + piece_length: 16384, + md5sum: None, + length: None, + files: Some(vec![TorrentFile { + path: vec!["sample.txt".to_string()], + length: sample_data_in_txt_file.len().try_into().unwrap(), + md5sum: None, + }]), + private: None, + path: None, + root_hash: None, + source: None, + }; + + let torrent = Torrent { + info: info.clone(), + announce: None, + announce_list: Some(vec![]), + creation_date: None, + comment: None, + created_by: None, + nodes: None, + encoding: None, + httpseeds: None, + }; + + assert_eq!(torrent.canonical_info_hash_hex(), "aa2aca91ab650c4d249c475ca3fa604f2ccb0d2a"); + } + + #[test] + fn a_simple_single_file_torrent_with_a_source() { + let sample_data_in_txt_file = "mandelbrot\n"; + + let info = TorrentInfoDictionary { + name: "sample.txt".to_string(), + pieces: Some(ByteBuf::from(vec![ + // D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A // hex + 212, 145, 88, 127, 28, 66, 223, 240, 203, 15, 245, 194, 184, 206, 254, 34, 179, 173, 49, 10, // dec + ])), + piece_length: 16384, + md5sum: None, + length: Some(sample_data_in_txt_file.len().try_into().unwrap()), + files: None, + private: None, + path: None, + root_hash: None, + source: Some("ABC".to_string()), // The tracker three-letter code + }; + + let torrent = Torrent { + info: info.clone(), + announce: None, + announce_list: Some(vec![]), + creation_date: None, + comment: None, + created_by: None, + nodes: None, + encoding: None, + httpseeds: None, + }; + + assert_eq!(torrent.canonical_info_hash_hex(), "ccc1cf4feb59f3fa85c96c9be1ebbafcfe8a9cc8"); + } + + #[test] + fn a_simple_single_file_private_torrent() { + let sample_data_in_txt_file = "mandelbrot\n"; + + let info = TorrentInfoDictionary { + name: "sample.txt".to_string(), + pieces: Some(ByteBuf::from(vec![ + // D4 91 58 7F 1C 42 DF F0 CB 0F F5 C2 B8 CE FE 22 B3 AD 31 0A // hex + 212, 145, 88, 127, 28, 66, 223, 240, 203, 15, 245, 194, 184, 206, 254, 34, 179, 173, 49, 10, // dec + ])), + piece_length: 16384, + md5sum: None, + length: Some(sample_data_in_txt_file.len().try_into().unwrap()), + files: None, + private: Some(1), + path: None, + root_hash: None, + source: None, + }; + + let torrent = Torrent { + info: info.clone(), + announce: None, + announce_list: Some(vec![]), + creation_date: None, + comment: None, + created_by: None, + nodes: None, + encoding: None, + httpseeds: None, + }; + + assert_eq!(torrent.canonical_info_hash_hex(), "d3a558d0a19aaa23ba6f9f430f40924d10fefa86"); + } + } + } +} diff --git a/src/models/torrent_tag.rs b/src/models/torrent_tag.rs new file mode 100644 index 00000000..2da97303 --- /dev/null +++ b/src/models/torrent_tag.rs @@ -0,0 +1,10 @@ +use serde::{Deserialize, Serialize}; +use sqlx::FromRow; + +pub type TagId = i64; + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, FromRow)] +pub struct TorrentTag { + pub tag_id: TagId, + pub name: String, +} diff --git a/src/models/tracker_key.rs b/src/models/tracker_key.rs index d130cd6d..60993c61 100644 --- a/src/models/tracker_key.rs +++ b/src/models/tracker_key.rs @@ -1,4 +1,4 @@ -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sqlx::FromRow; #[derive(Debug, Serialize, Deserialize, FromRow)] @@ -6,3 +6,9 @@ pub struct TrackerKey { pub key: String, pub valid_until: i64, } + +#[derive(Debug, Serialize, Deserialize)] +pub struct Duration { + pub secs: i64, + pub nanos: i64, +} diff --git a/src/models/user.rs b/src/models/user.rs index 3885aaa2..b115e10c 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -1,21 +1,27 @@ -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; + +#[allow(clippy::module_name_repetitions)] +pub type UserId = i64; #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct User { - pub user_id: i64, - pub date_registered: String, + pub user_id: UserId, + pub date_registered: Option, + pub date_imported: Option, pub administrator: bool, } +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserAuthentication { - pub user_id: i64, + pub user_id: UserId, pub password_hash: String, } -#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, sqlx::FromRow)] +#[allow(clippy::module_name_repetitions)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserProfile { - pub user_id: i64, + pub user_id: UserId, pub username: String, pub email: String, pub email_verified: bool, @@ -23,17 +29,20 @@ pub struct UserProfile { pub avatar: String, } +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserCompact { - pub user_id: i64, + pub user_id: UserId, pub username: String, pub administrator: bool, } +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserFull { - pub user_id: i64, - pub date_registered: String, + pub user_id: UserId, + pub date_registered: Option, + pub date_imported: Option, pub administrator: bool, pub username: String, pub email: String, @@ -42,6 +51,7 @@ pub struct UserFull { pub avatar: String, } +#[allow(clippy::module_name_repetitions)] #[derive(Debug, Serialize, Deserialize, Clone)] pub struct UserClaims { pub user: UserCompact, diff --git a/src/routes/about.rs b/src/routes/about.rs deleted file mode 100644 index 9f23c4d1..00000000 --- a/src/routes/about.rs +++ /dev/null @@ -1,76 +0,0 @@ -use actix_web::{Responder, web, HttpResponse}; -use actix_web::http::StatusCode; - -use crate::errors::ServiceResult; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - cfg.service( - web::scope("/about") - .service(web::resource("") - .route(web::get().to(get_about)) - ) - .service(web::resource("/license") - .route(web::get().to(get_license)) - ) - ); -} - -const ABOUT: &str = r#" - - - About - - -

Torrust Index Backend

- -

About

- -

Hi! This is a running torrust-index-backend.

- - - -"#; - -pub async fn get_about() -> ServiceResult { - Ok(HttpResponse::build(StatusCode::OK) - .content_type("text/html; charset=utf-8") - .body(ABOUT) - ) -} - -const LICENSE: &str = r#" - - - Licensing - - -

Torrust Index Backend

- -

Licensing

- -

Multiple Licenses

- -

This repository has multiple licenses depending on the content type, the date of contributions or stemming from external component licenses that were not developed by any of Torrust team members or Torrust repository contributors.

- -

The two main applicable license to most of its content are:

- -

- For Code -- agpl-3.0

- -

- For Media (Images, etc.) -- cc-by-sa

- -

If you want to read more about all the licenses and how they apply please refer to the contributor agreement.

- - - -"#; - -pub async fn get_license() -> ServiceResult { - Ok(HttpResponse::build(StatusCode::OK) - .content_type("text/html; charset=utf-8") - .body(LICENSE) - ) -} diff --git a/src/routes/category.rs b/src/routes/category.rs deleted file mode 100644 index 34db369d..00000000 --- a/src/routes/category.rs +++ /dev/null @@ -1,59 +0,0 @@ -use actix_web::{HttpRequest, HttpResponse, Responder, web}; -use serde::{Serialize, Deserialize}; - -use crate::common::WebAppData; -use crate::errors::{ServiceError, ServiceResult}; -use crate::models::response::{OkResponse}; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - cfg.service( - web::scope("/category") - .service(web::resource("") - .route(web::get().to(get_categories)) - .route(web::post().to(add_category)) - .route(web::delete().to(delete_category)) - ) - ); -} - -pub async fn get_categories(app_data: WebAppData) -> ServiceResult { - let categories = app_data.database.get_categories().await?; - - Ok(HttpResponse::Ok().json(OkResponse { - data: categories - })) -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct Category { - pub name: String, - pub icon: Option -} - -pub async fn add_category(req: HttpRequest, payload: web::Json, app_data: WebAppData) -> ServiceResult { - // check for user - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // check if user is administrator - if !user.administrator { return Err(ServiceError::Unauthorized) } - - let _ = app_data.database.insert_category_and_get_id(&payload.name).await?; - - Ok(HttpResponse::Ok().json(OkResponse { - data: payload.name.clone() - })) -} - -pub async fn delete_category(req: HttpRequest, payload: web::Json, app_data: WebAppData) -> ServiceResult { - // check for user - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // check if user is administrator - if !user.administrator { return Err(ServiceError::Unauthorized) } - - let _ = app_data.database.delete_category(&payload.name).await?; - - Ok(HttpResponse::Ok().json(OkResponse { - data: payload.name.clone() - })) -} diff --git a/src/routes/mod.rs b/src/routes/mod.rs deleted file mode 100644 index dbbcc31a..00000000 --- a/src/routes/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -use actix_web::web; - -pub mod user; -pub mod torrent; -pub mod category; -pub mod settings; -pub mod about; -pub mod root; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - user::init_routes(cfg); - torrent::init_routes(cfg); - category::init_routes(cfg); - settings::init_routes(cfg); - about::init_routes(cfg); - root::init_routes(cfg); -} diff --git a/src/routes/root.rs b/src/routes/root.rs deleted file mode 100644 index 9ae00e4e..00000000 --- a/src/routes/root.rs +++ /dev/null @@ -1,11 +0,0 @@ -use actix_web::web; -use crate::routes::about; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - cfg.service( - web::scope("/") - .service(web::resource("") - .route(web::get().to(about::get_about)) - ) - ); -} diff --git a/src/routes/settings.rs b/src/routes/settings.rs deleted file mode 100644 index fc48a7e1..00000000 --- a/src/routes/settings.rs +++ /dev/null @@ -1,67 +0,0 @@ -use actix_web::{HttpRequest, HttpResponse, Responder, web}; -use crate::common::WebAppData; -use crate::config::{TorrustConfig}; -use crate::errors::{ServiceError, ServiceResult}; -use crate::models::response::{OkResponse}; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - cfg.service( - web::scope("/settings") - .service(web::resource("") - .route(web::get().to(get_settings)) - .route(web::post().to(update_settings)) - ) - .service(web::resource("/name") - .route(web::get().to(get_site_name)) - ) - .service(web::resource("/public") - .route(web::get().to(get_public_settings)) - ) - ); -} - -pub async fn get_settings(req: HttpRequest, app_data: WebAppData) -> ServiceResult { - // check for user - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // check if user is administrator - if !user.administrator { return Err(ServiceError::Unauthorized) } - - let settings = app_data.cfg.settings.read().await; - - Ok(HttpResponse::Ok().json(OkResponse { - data: &*settings - })) -} - -pub async fn get_public_settings(app_data: WebAppData) -> ServiceResult { - let public_settings = app_data.cfg.get_public().await; - - Ok(HttpResponse::Ok().json(OkResponse { - data: public_settings - })) -} - -pub async fn get_site_name(app_data: WebAppData) -> ServiceResult { - let settings = app_data.cfg.settings.read().await; - - Ok(HttpResponse::Ok().json(OkResponse { - data: &settings.website.name - })) -} - -pub async fn update_settings(req: HttpRequest, payload: web::Json, app_data: WebAppData) -> ServiceResult { - // check for user - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // check if user is administrator - if !user.administrator { return Err(ServiceError::Unauthorized) } - - let _ = app_data.cfg.update_settings(payload.into_inner()).await; - - let settings = app_data.cfg.settings.read().await; - - Ok(HttpResponse::Ok().json(OkResponse { - data: &*settings - })) -} diff --git a/src/routes/torrent.rs b/src/routes/torrent.rs deleted file mode 100644 index ea43b3d9..00000000 --- a/src/routes/torrent.rs +++ /dev/null @@ -1,364 +0,0 @@ -use actix_multipart::Multipart; -use actix_web::{HttpRequest, HttpResponse, Responder, web}; -use actix_web::web::{Query}; -use futures::{StreamExt, TryStreamExt}; -use serde::{Deserialize}; -use std::io::Cursor; -use std::io::{Write}; -use sqlx::{FromRow}; - -use crate::AsCSV; -use crate::databases::database::Sorting; -use crate::errors::{ServiceError, ServiceResult}; -use crate::models::response::{NewTorrentResponse, OkResponse, TorrentResponse}; -use crate::models::torrent::TorrentRequest; -use crate::utils::parse_torrent; -use crate::common::{WebAppData}; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - cfg.service( - web::scope("/torrent") - .service(web::resource("/upload") - .route(web::post().to(upload_torrent))) - .service(web::resource("/download/{id}") - .route(web::get().to(download_torrent))) - .service(web::resource("/{id}") - .route(web::get().to(get_torrent)) - .route(web::put().to(update_torrent)) - .route(web::delete().to(delete_torrent))) - ); - cfg.service( - web::scope("/torrents") - .service(web::resource("") - .route(web::get().to(get_torrents))) - ); -} - -#[derive(FromRow)] -pub struct TorrentCount { - pub count: i32, -} - -#[derive(Debug, Deserialize)] -pub struct CreateTorrent { - pub title: String, - pub description: String, - pub category: String, -} - -impl CreateTorrent { - pub fn verify(&self) -> Result<(), ServiceError>{ - if !self.title.is_empty() && !self.category.is_empty() { - return Ok(()) - } - - Err(ServiceError::BadRequest) - } -} - -#[derive(Debug, Deserialize)] -pub struct TorrentSearch { - page_size: Option, - page: Option, - sort: Option, - // expects comma separated string, eg: "?categories=movie,other,app" - categories: Option, - search: Option, -} - -#[derive(Debug, Deserialize)] -pub struct TorrentUpdate { - title: Option, - description: Option -} - -pub async fn upload_torrent(req: HttpRequest, payload: Multipart, app_data: WebAppData) -> ServiceResult { - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // get torrent and fields from request - let mut torrent_request = get_torrent_request_from_payload(payload).await?; - - // update announce url to our own tracker url - torrent_request.torrent.set_torrust_config(&app_data.cfg).await; - - // get the correct category name from database - let category = app_data.database.get_category_from_name(&torrent_request.fields.category).await - .map_err(|_| ServiceError::InvalidCategory)?; - - // insert entire torrent in database - let torrent_id = app_data.database.insert_torrent_and_get_id( - &torrent_request.torrent, - user.user_id, - category.category_id, - &torrent_request.fields.title, - &torrent_request.fields.description - ).await?; - - // whitelist info hash on tracker - if let Err(e) = app_data.tracker.whitelist_info_hash(torrent_request.torrent.info_hash()).await { - // if the torrent can't be whitelisted somehow, remove the torrent from database - let _ = app_data.database.delete_torrent(torrent_id).await; - return Err(e); - } - - // respond with the newly uploaded torrent id - Ok(HttpResponse::Ok().json(OkResponse { - data: NewTorrentResponse { - torrent_id - } - })) -} - -pub async fn download_torrent(req: HttpRequest, app_data: WebAppData) -> ServiceResult { - let torrent_id = get_torrent_id_from_request(&req)?; - - // optional - let user = app_data.auth.get_user_compact_from_request(&req).await; - - let mut torrent = app_data.database.get_torrent_from_id(torrent_id).await?; - - let settings = app_data.cfg.settings.read().await; - - let tracker_url = settings.tracker.url.clone(); - - drop(settings); - - // add personal tracker url or default tracker url - match user { - Ok(user) => { - let personal_announce_url = app_data.tracker.get_personal_announce_url(user.user_id).await.unwrap_or(tracker_url); - torrent.announce = Some(personal_announce_url.clone()); - if let Some(list) = &mut torrent.announce_list { - let vec = vec![personal_announce_url]; - list.insert(0, vec); - } - } - Err(_) => { - torrent.announce = Some(tracker_url); - } - } - - let buffer = parse_torrent::encode_torrent(&torrent).map_err(|_| ServiceError::InternalServerError)?; - - Ok(HttpResponse::Ok() - .content_type("application/x-bittorrent") - .body(buffer) - ) -} - -pub async fn get_torrent(req: HttpRequest, app_data: WebAppData) -> ServiceResult { - // optional - let user = app_data.auth.get_user_compact_from_request(&req).await; - - let settings = app_data.cfg.settings.read().await; - - let torrent_id = get_torrent_id_from_request(&req)?; - - let torrent_listing = app_data.database.get_torrent_listing_from_id(torrent_id).await?; - - let category = app_data.database.get_category_from_id(torrent_listing.category_id).await?; - - let mut torrent_response = TorrentResponse::from_listing(torrent_listing); - - torrent_response.category = category; - - let tracker_url = settings.tracker.url.clone(); - - drop(settings); - - torrent_response.files = app_data.database.get_torrent_files_from_id(torrent_id).await?; - - if torrent_response.files.len() == 1 { - let torrent_info = app_data.database.get_torrent_info_from_id(torrent_id).await?; - - torrent_response.files.iter_mut().for_each(|v| v.path = vec![torrent_info.name.to_string()]); - } - - torrent_response.trackers = app_data.database.get_torrent_announce_urls_from_id(torrent_id) - .await - .map(|v| v.into_iter().flatten().collect())?; - - // add tracker url - match user { - Ok(user) => { - // if no user owned tracker key can be found, use default tracker url - let personal_announce_url = app_data.tracker.get_personal_announce_url(user.user_id).await.unwrap_or(tracker_url); - // add personal tracker url to front of vec - torrent_response.trackers.insert(0, personal_announce_url); - }, - Err(_) => { - torrent_response.trackers.insert(0, tracker_url); - } - } - - // add magnet link - let mut magnet = format!("magnet:?xt=urn:btih:{}&dn={}", torrent_response.info_hash, urlencoding::encode(&torrent_response.title)); - - // add trackers from torrent file to magnet link - for tracker in &torrent_response.trackers { - magnet.push_str(&format!("&tr={}", urlencoding::encode(tracker))); - } - - torrent_response.magnet_link = magnet; - - // get realtime seeders and leechers - if let Ok(torrent_info) = app_data.tracker.get_torrent_info(torrent_response.torrent_id, &torrent_response.info_hash).await { - torrent_response.seeders = torrent_info.seeders; - torrent_response.leechers = torrent_info.leechers; - } - - Ok(HttpResponse::Ok().json(OkResponse { - data: torrent_response - })) -} - -pub async fn update_torrent(req: HttpRequest, payload: web::Json, app_data: WebAppData) -> ServiceResult { - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - let torrent_id = get_torrent_id_from_request(&req)?; - - let torrent_listing = app_data.database.get_torrent_listing_from_id(torrent_id).await?; - - // check if user is owner or administrator - if torrent_listing.uploader != user.username && !user.administrator { return Err(ServiceError::Unauthorized) } - - // update torrent title - if let Some(title) = &payload.title { - let _res = app_data.database.update_torrent_title(torrent_id, title).await?; - } - - // update torrent description - if let Some(description) = &payload.description { - let _res = app_data.database.update_torrent_description(torrent_id, description).await?; - } - - let torrent_listing = app_data.database.get_torrent_listing_from_id(torrent_id).await?; - - let torrent_response = TorrentResponse::from_listing(torrent_listing); - - Ok(HttpResponse::Ok().json(OkResponse { - data: torrent_response - })) -} - -pub async fn delete_torrent(req: HttpRequest, app_data: WebAppData) -> ServiceResult { - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // check if user is administrator - if !user.administrator { return Err(ServiceError::Unauthorized) } - - let torrent_id = get_torrent_id_from_request(&req)?; - - // needed later for removing torrent from tracker whitelist - let torrent_listing = app_data.database.get_torrent_listing_from_id(torrent_id).await?; - - let _res = app_data.database.delete_torrent(torrent_id).await?; - - // remove info_hash from tracker whitelist - let _ = app_data.tracker.remove_info_hash_from_whitelist(torrent_listing.info_hash).await; - - Ok(HttpResponse::Ok().json(OkResponse { - data: NewTorrentResponse { - torrent_id - } - })) -} - -// eg: /torrents?categories=music,other,movie&search=bunny&sort=size_DESC -pub async fn get_torrents(params: Query, app_data: WebAppData) -> ServiceResult { - let sort = params.sort.unwrap_or(Sorting::UploadedDesc); - - let page = params.page.unwrap_or(0); - - // make sure the min page size = 10 - let page_size = match params.page_size.unwrap_or(30) { - 0 ..= 9 => 10, - v => v - }; - - let offset = (page * page_size as u32) as u64; - - let categories = params.categories.as_csv::().unwrap_or(None); - - let torrents_response = app_data.database.get_torrents_search_sorted_paginated(¶ms.search, &categories, &sort, offset, page_size as u8).await?; - - Ok(HttpResponse::Ok().json(OkResponse { - data: torrents_response - })) -} - -fn get_torrent_id_from_request(req: &HttpRequest) -> Result { - match req.match_info().get("id") { - None => Err(ServiceError::BadRequest), - Some(torrent_id) => { - match torrent_id.parse() { - Err(_) => Err(ServiceError::BadRequest), - Ok(v) => Ok(v) - } - } - } -} - -async fn get_torrent_request_from_payload(mut payload: Multipart) -> Result { - let torrent_buffer = vec![0u8]; - let mut torrent_cursor = Cursor::new(torrent_buffer); - - let mut title = "".to_string(); - let mut description = "".to_string(); - let mut category = "".to_string(); - - while let Ok(Some(mut field)) = payload.try_next().await { - let content_type = field.content_disposition().unwrap(); - let name = content_type.get_name().unwrap(); - - match name { - "title" | "description" | "category" => { - let data = field.next().await; - if data.is_none() { continue } - let wrapped_data = &data.unwrap().unwrap(); - let parsed_data = std::str::from_utf8(&wrapped_data).unwrap(); - - match name { - "title" => { title = parsed_data.to_string() } - "description" => { description = parsed_data.to_string() } - "category" => { category = parsed_data.to_string() } - _ => {} - } - } - "torrent" => { - if *field.content_type() != "application/x-bittorrent" { - return Err(ServiceError::InvalidFileType) - } - - while let Some(chunk) = field.next().await { - let data = chunk.unwrap(); - torrent_cursor.write_all(&data)?; - } - } - _ => {} - } - } - - let fields = CreateTorrent { - title, - description, - category, - }; - - fields.verify()?; - - let position = torrent_cursor.position() as usize; - let inner = torrent_cursor.get_ref(); - - let torrent = parse_torrent::decode_torrent(&inner[..position]).map_err(|_| ServiceError::InvalidTorrentFile)?; - - // make sure that the pieces key has a length that is a multiple of 20 - if let Some(pieces) = torrent.info.pieces.as_ref() { - if pieces.as_ref().len() % 20 != 0 { return Err(ServiceError::InvalidTorrentPiecesLength); } - } - - Ok(TorrentRequest { - fields, - torrent, - }) -} diff --git a/src/routes/user.rs b/src/routes/user.rs deleted file mode 100644 index a8c5b20c..00000000 --- a/src/routes/user.rs +++ /dev/null @@ -1,254 +0,0 @@ -use actix_web::{web, Responder, HttpResponse, HttpRequest}; -use argon2::{Argon2, PasswordHash, PasswordHasher, PasswordVerifier}; -use argon2::password_hash::SaltString; -use serde::{Deserialize, Serialize}; -use jsonwebtoken::{DecodingKey, decode, Validation, Algorithm}; -use rand_core::OsRng; - -use crate::errors::{ServiceResult, ServiceError}; -use crate::common::WebAppData; -use crate::config::EmailOnSignup; -use crate::models::response::OkResponse; -use crate::models::response::TokenResponse; -use crate::mailer::VerifyClaims; -use crate::utils::regex::validate_email_address; -use crate::utils::time::current_time; - -pub fn init_routes(cfg: &mut web::ServiceConfig) { - cfg.service( - web::scope("/user") - .service(web::resource("/register") - .route(web::post().to(register))) - .service(web::resource("/login") - .route(web::post().to(login))) - .service(web::resource("/ban/{user}") - .route(web::delete().to(ban_user))) - .service(web::resource("/token/verify") - .route(web::post().to(verify_token))) - .service(web::resource("/token/renew") - .route(web::post().to(renew_token))) - .service(web::resource("/email/verify/{token}") - .route(web::get().to(verify_email))) - ); -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Register { - pub username: String, - pub email: Option, - pub password: String, - pub confirm_password: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Login { - pub login: String, - pub password: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Token { - pub token: String, -} - -pub async fn register(req: HttpRequest, mut payload: web::Json, app_data: WebAppData) -> ServiceResult { - let settings = app_data.cfg.settings.read().await; - - match settings.auth.email_on_signup { - EmailOnSignup::Required => { - if payload.email.is_none() { return Err(ServiceError::EmailMissing) } - } - EmailOnSignup::None => { - payload.email = None - } - _ => {} - } - - if let Some(email) = &payload.email { - // check if email address is valid - if !validate_email_address(email) { - return Err(ServiceError::EmailInvalid) - } - } - - if payload.password != payload.confirm_password { - return Err(ServiceError::PasswordsDontMatch) - } - - let password_length = payload.password.len(); - - if password_length <= settings.auth.min_password_length { - return Err(ServiceError::PasswordTooShort) - } - - if password_length >= settings.auth.max_password_length { - return Err(ServiceError::PasswordTooLong) - } - - let salt = SaltString::generate(&mut OsRng); - - // Argon2 with default params (Argon2id v19) - let argon2 = Argon2::default(); - - // Hash password to PHC string ($argon2id$v=19$...) - let password_hash = argon2.hash_password(payload.password.as_bytes(), &salt)?.to_string(); - - if payload.username.contains('@') { - return Err(ServiceError::UsernameInvalid) - } - - let email = payload.email.as_ref().unwrap_or(&"".to_string()).to_string(); - - let user_id = app_data.database.insert_user_and_get_id(&payload.username, &email, &password_hash).await?; - - // if this is the first created account, give administrator rights - if user_id == 1 { - let _ = app_data.database.grant_admin_role(user_id).await; - } - - let conn_info = req.connection_info(); - - if settings.mail.email_verification_enabled && payload.email.is_some() { - let mail_res = app_data.mailer.send_verification_mail( - payload.email.as_ref().unwrap(), - &payload.username, - user_id, - format!("{}://{}", conn_info.scheme(), conn_info.host()).as_str() - ) - .await; - - if mail_res.is_err() { - let _ = app_data.database.delete_user(user_id).await; - return Err(ServiceError::FailedToSendVerificationEmail) - } - } - - Ok(HttpResponse::Ok()) -} - -pub async fn login(payload: web::Json, app_data: WebAppData) -> ServiceResult { - // get the user profile from database - let user_profile = app_data.database.get_user_profile_from_username(&payload.login) - .await - .map_err(|_| ServiceError::WrongPasswordOrUsername)?; - - // should not be able to fail if user_profile succeeded - let user_authentication = app_data.database.get_user_authentication_from_id(user_profile.user_id) - .await - .map_err(|_| ServiceError::InternalServerError)?; - - // wrap string of the hashed password into a PasswordHash struct for verification - let parsed_hash = PasswordHash::new(&user_authentication.password_hash)?; - - // verify if the user supplied and the database supplied passwords match - if Argon2::default().verify_password(payload.password.as_bytes(), &parsed_hash).is_err() { - return Err(ServiceError::WrongPasswordOrUsername) - } - - let settings = app_data.cfg.settings.read().await; - - // fail login if email verification is required and this email is not verified - if settings.mail.email_verification_enabled && !user_profile.email_verified { - return Err(ServiceError::EmailNotVerified) - } - - // drop read lock on settings - drop(settings); - - let user_compact = app_data.database.get_user_compact_from_id(user_profile.user_id).await?; - - // sign jwt with compact user details as payload - let token = app_data.auth.sign_jwt(user_compact.clone()).await; - - - Ok(HttpResponse::Ok().json(OkResponse { - data: TokenResponse { - token, - username: user_compact.username, - admin: user_compact.administrator - } - })) -} - -pub async fn verify_token(payload: web::Json, app_data: WebAppData) -> ServiceResult { - // verify if token is valid - let _claims = app_data.auth.verify_jwt(&payload.token).await?; - - Ok(HttpResponse::Ok().json(OkResponse { - data: format!("Token is valid.") - })) -} - -pub async fn renew_token(payload: web::Json, app_data: WebAppData) -> ServiceResult { - // verify if token is valid - let claims = app_data.auth.verify_jwt(&payload.token).await?; - - let user_compact = app_data.database.get_user_compact_from_id(claims.user.user_id).await?; - - const ONE_WEEK_IN_SECONDS: u64 = 604_800; - - // renew token if it is valid for less than one week - let token = match claims.exp - current_time() { - x if x < ONE_WEEK_IN_SECONDS => app_data.auth.sign_jwt(user_compact.clone()).await, - _ => payload.token.clone() - }; - - Ok(HttpResponse::Ok().json(OkResponse { - data: TokenResponse { - token, - username: user_compact.username, - admin: user_compact.administrator - } - })) -} - -pub async fn verify_email(req: HttpRequest, app_data: WebAppData) -> String { - let settings = app_data.cfg.settings.read().await; - let token = req.match_info().get("token").unwrap(); - - let token_data = match decode::( - token, - &DecodingKey::from_secret(settings.auth.secret_key.as_bytes()), - &Validation::new(Algorithm::HS256), - ) { - Ok(token_data) => { - if !token_data.claims.iss.eq("email-verification") { - return ServiceError::TokenInvalid.to_string() - } - - token_data.claims - }, - Err(_) => return ServiceError::TokenInvalid.to_string() - }; - - drop(settings); - - if app_data.database.verify_email(token_data.sub).await.is_err() { - return ServiceError::InternalServerError.to_string() - }; - - String::from("Email verified, you can close this page.") -} - -// TODO: add reason and date_expiry parameters to request -pub async fn ban_user(req: HttpRequest, app_data: WebAppData) -> ServiceResult { - let user = app_data.auth.get_user_compact_from_request(&req).await?; - - // check if user is administrator - if !user.administrator { return Err(ServiceError::Unauthorized) } - - let to_be_banned_username = req.match_info().get("user").unwrap(); - - let user_profile = app_data.database.get_user_profile_from_username(to_be_banned_username).await?; - - let reason = "no reason".to_string(); - - // user will be banned until the year 9999 - let date_expiry = chrono::NaiveDateTime::parse_from_str("9999-01-01 00:00:00", "%Y-%m-%d %H:%M:%S").expect("Could not parse date from 9999-01-01 00:00:00."); - - let _ = app_data.database.ban_user(user_profile.user_id, &reason, date_expiry).await?; - - Ok(HttpResponse::Ok().json(OkResponse { - data: format!("Banned user: {}", to_be_banned_username) - })) -} diff --git a/src/services/about.rs b/src/services/about.rs new file mode 100644 index 00000000..82175bf6 --- /dev/null +++ b/src/services/about.rs @@ -0,0 +1,63 @@ +//! Templates for "about" static pages. +use crate::web::api::v1::routes::API_VERSION_URL_PREFIX; + +#[must_use] +pub fn index_page() -> String { + page() +} + +#[must_use] +pub fn page() -> String { + format!( + r#" + + + About + + +

Torrust Index

+ +

About

+ +

Hi! This is a running torrust-index.

+ + + +"# + ) +} + +#[must_use] +pub fn license_page() -> String { + format!( + r#" + + + Licensing + + +

Torrust Index

+ +

Licensing

+ +

Multiple Licenses

+ +

This repository has multiple licenses depending on the content type, the date of contributions or stemming from external component licenses that were not developed by any of Torrust team members or Torrust repository contributors.

+ +

The two main applicable license to most of its content are:

+ +

- For Code -- agpl-3.0

+ +

- For Media (Images, etc.) -- cc-by-sa

+ +

If you want to read more about all the licenses and how they apply please refer to the contributor agreement.

+ + + +"# + ) +} diff --git a/src/services/authentication.rs b/src/services/authentication.rs new file mode 100644 index 00000000..e04342a4 --- /dev/null +++ b/src/services/authentication.rs @@ -0,0 +1,248 @@ +//! Authentication services. +use std::sync::Arc; + +use argon2::{Argon2, PasswordHash, PasswordVerifier}; +use jsonwebtoken::{decode, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation}; +use pbkdf2::Pbkdf2; + +use super::user::{DbUserProfileRepository, DbUserRepository}; +use crate::config::Configuration; +use crate::databases::database::{Database, Error}; +use crate::errors::ServiceError; +use crate::models::user::{UserAuthentication, UserClaims, UserCompact, UserId}; +use crate::utils::clock; + +pub struct Service { + configuration: Arc, + json_web_token: Arc, + user_repository: Arc, + user_profile_repository: Arc, + user_authentication_repository: Arc, +} + +impl Service { + pub fn new( + configuration: Arc, + json_web_token: Arc, + user_repository: Arc, + user_profile_repository: Arc, + user_authentication_repository: Arc, + ) -> Self { + Self { + configuration, + json_web_token, + user_repository, + user_profile_repository, + user_authentication_repository, + } + } + + /// Authenticate user with username and password. + /// It returns a JWT token and a compact user profile. + /// + /// # Errors + /// + /// It returns: + /// + /// * A `ServiceError::WrongPasswordOrUsername` if unable to get user profile. + /// * A `ServiceError::InternalServerError` if unable to get user authentication data from the user id. + /// * A `ServiceError::EmailNotVerified` if the email should be, but is not verified. + /// * An error if unable to verify the password. + /// * An error if unable to get the user data from the database. + pub async fn login(&self, username: &str, password: &str) -> Result<(String, UserCompact), ServiceError> { + // Get the user profile from database + let user_profile = self + .user_profile_repository + .get_user_profile_from_username(username) + .await + .map_err(|_| ServiceError::WrongPasswordOrUsername)?; + + // Should not be able to fail if user_profile succeeded + let user_authentication = self + .user_authentication_repository + .get_user_authentication_from_id(&user_profile.user_id) + .await + .map_err(|_| ServiceError::InternalServerError)?; + + verify_password(password.as_bytes(), &user_authentication)?; + + let settings = self.configuration.settings.read().await; + + // Fail login if email verification is required and this email is not verified + if settings.mail.email_verification_enabled && !user_profile.email_verified { + return Err(ServiceError::EmailNotVerified); + } + + // Drop read lock on settings + drop(settings); + + let user_compact = self.user_repository.get_compact(&user_profile.user_id).await?; + + // Sign JWT with compact user details as payload + let token = self.json_web_token.sign(user_compact.clone()).await; + + Ok((token, user_compact)) + } + + /// Renew a supplied JWT. + /// + /// # Errors + /// + /// This function will return an error if: + /// + /// * Unable to verify the supplied payload as a valid jwt. + /// * Unable to get user data from the database. + pub async fn renew_token(&self, token: &str) -> Result<(String, UserCompact), ServiceError> { + const ONE_WEEK_IN_SECONDS: u64 = 604_800; + + // Verify if token is valid + let claims = self.json_web_token.verify(token).await?; + + let user_compact = self.user_repository.get_compact(&claims.user.user_id).await?; + + // Renew token if it is valid for less than one week + let token = match claims.exp - clock::now() { + x if x < ONE_WEEK_IN_SECONDS => self.json_web_token.sign(user_compact.clone()).await, + _ => token.to_string(), + }; + + Ok((token, user_compact)) + } +} + +pub struct JsonWebToken { + cfg: Arc, +} + +impl JsonWebToken { + pub fn new(cfg: Arc) -> Self { + Self { cfg } + } + + /// Create Json Web Token. + /// + /// # Panics + /// + /// This function will panic if the default encoding algorithm does not ç + /// match the encoding key. + pub async fn sign(&self, user: UserCompact) -> String { + let settings = self.cfg.settings.read().await; + + // Create JWT that expires in two weeks + let key = settings.auth.secret_key.as_bytes(); + + // todo: create config option for setting the token validity in seconds. + let exp_date = clock::now() + 1_209_600; // two weeks from now + + let claims = UserClaims { user, exp: exp_date }; + + encode(&Header::default(), &claims, &EncodingKey::from_secret(key)).expect("argument `Header` should match `EncodingKey`") + } + + /// Verify Json Web Token. + /// + /// # Errors + /// + /// This function will return an error if the JWT is not good or expired. + pub async fn verify(&self, token: &str) -> Result { + let settings = self.cfg.settings.read().await; + + match decode::( + token, + &DecodingKey::from_secret(settings.auth.secret_key.as_bytes()), + &Validation::new(Algorithm::HS256), + ) { + Ok(token_data) => { + if token_data.claims.exp < clock::now() { + return Err(ServiceError::TokenExpired); + } + Ok(token_data.claims) + } + Err(_) => Err(ServiceError::TokenInvalid), + } + } +} + +pub struct DbUserAuthenticationRepository { + database: Arc>, +} + +impl DbUserAuthenticationRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// Get user authentication data from user id. + /// + /// # Errors + /// + /// This function will return an error if unable to get the user + /// authentication data from the database. + pub async fn get_user_authentication_from_id(&self, user_id: &UserId) -> Result { + self.database.get_user_authentication_from_id(*user_id).await + } +} + +/// Verify if the user supplied and the database supplied passwords match +/// +/// # Errors +/// +/// This function will return an error if unable to parse password hash from the stored user authentication value. +/// This function will return a `ServiceError::WrongPasswordOrUsername` if unable to match the password with either `argon2id` or `pbkdf2-sha256`. +fn verify_password(password: &[u8], user_authentication: &UserAuthentication) -> Result<(), ServiceError> { + // wrap string of the hashed password into a PasswordHash struct for verification + let parsed_hash = PasswordHash::new(&user_authentication.password_hash)?; + + match parsed_hash.algorithm.as_str() { + "argon2id" => { + if Argon2::default().verify_password(password, &parsed_hash).is_err() { + return Err(ServiceError::WrongPasswordOrUsername); + } + + Ok(()) + } + "pbkdf2-sha256" => { + if Pbkdf2.verify_password(password, &parsed_hash).is_err() { + return Err(ServiceError::WrongPasswordOrUsername); + } + + Ok(()) + } + _ => Err(ServiceError::WrongPasswordOrUsername), + } +} + +#[cfg(test)] +mod tests { + use super::verify_password; + use crate::models::user::UserAuthentication; + + #[test] + fn password_hashed_with_pbkdf2_sha256_should_be_verified() { + let password = "12345678".as_bytes(); + let password_hash = + "$pbkdf2-sha256$i=10000,l=32$pZIh8nilm+cg6fk5Ubf2zQ$AngLuZ+sGUragqm4bIae/W+ior0TWxYFFaTx8CulqtY".to_string(); + let user_authentication = UserAuthentication { + user_id: 1i64, + password_hash, + }; + + assert!(verify_password(password, &user_authentication).is_ok()); + assert!(verify_password("incorrect password".as_bytes(), &user_authentication).is_err()); + } + + #[test] + fn password_hashed_with_argon2_should_be_verified() { + let password = "87654321".as_bytes(); + let password_hash = + "$argon2id$v=19$m=4096,t=3,p=1$ycK5lJ4xmFBnaJ51M1j1eA$kU3UlNiSc3JDbl48TCj7JBDKmrT92DOUAgo4Yq0+nMw".to_string(); + let user_authentication = UserAuthentication { + user_id: 1i64, + password_hash, + }; + + assert!(verify_password(password, &user_authentication).is_ok()); + assert!(verify_password("incorrect password".as_bytes(), &user_authentication).is_err()); + } +} diff --git a/src/services/category.rs b/src/services/category.rs new file mode 100644 index 00000000..5abe8aa6 --- /dev/null +++ b/src/services/category.rs @@ -0,0 +1,137 @@ +//! Category service. +use std::sync::Arc; + +use super::user::DbUserRepository; +use crate::databases::database::{Category, Database, Error as DatabaseError}; +use crate::errors::ServiceError; +use crate::models::category::CategoryId; +use crate::models::user::UserId; + +pub struct Service { + category_repository: Arc, + user_repository: Arc, +} + +impl Service { + #[must_use] + pub fn new(category_repository: Arc, user_repository: Arc) -> Service { + Service { + category_repository, + user_repository, + } + } + + /// Adds a new category. + /// + /// # Errors + /// + /// It returns an error if: + /// + /// * The user does not have the required permissions. + /// * There is a database error. + pub async fn add_category(&self, category_name: &str, user_id: &UserId) -> Result { + let user = self.user_repository.get_compact(user_id).await?; + + // Check if user is administrator + // todo: extract authorization service + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + let trimmed_name = category_name.trim(); + + if trimmed_name.is_empty() { + return Err(ServiceError::CategoryNameEmpty); + } + + match self.category_repository.add(trimmed_name).await { + Ok(id) => Ok(id), + Err(e) => match e { + DatabaseError::CategoryAlreadyExists => Err(ServiceError::CategoryAlreadyExists), + _ => Err(ServiceError::DatabaseError), + }, + } + } + + /// Deletes a category. + /// + /// # Errors + /// + /// It returns an error if: + /// + /// * The user does not have the required permissions. + /// * There is a database error. + pub async fn delete_category(&self, category_name: &str, user_id: &UserId) -> Result<(), ServiceError> { + let user = self.user_repository.get_compact(user_id).await?; + + // Check if user is administrator + // todo: extract authorization service + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + match self.category_repository.delete(category_name).await { + Ok(()) => Ok(()), + Err(e) => match e { + DatabaseError::CategoryNotFound => Err(ServiceError::CategoryNotFound), + _ => Err(ServiceError::DatabaseError), + }, + } + } +} + +pub struct DbCategoryRepository { + database: Arc>, +} + +impl DbCategoryRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It returns the categories. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_all(&self) -> Result, DatabaseError> { + self.database.get_categories().await + } + + /// Adds a new category. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn add(&self, category_name: &str) -> Result { + self.database.insert_category_and_get_id(category_name).await + } + + /// Deletes a new category. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn delete(&self, category_name: &str) -> Result<(), DatabaseError> { + self.database.delete_category(category_name).await + } + + /// It finds a category by name + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_by_name(&self, category_name: &str) -> Result { + self.database.get_category_from_name(category_name).await + } + + /// It finds a category by id + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_by_id(&self, category_id: &CategoryId) -> Result { + self.database.get_category_from_id(*category_id).await + } +} diff --git a/src/services/hasher.rs b/src/services/hasher.rs new file mode 100644 index 00000000..3ee4f6e8 --- /dev/null +++ b/src/services/hasher.rs @@ -0,0 +1,28 @@ +//! Hashing service +use sha1::{Digest, Sha1}; + +// Calculate the sha1 hash of a string +#[must_use] +pub fn sha1(data: &str) -> String { + // Create a Sha1 object + let mut hasher = Sha1::new(); + + // Write input message + hasher.update(data.as_bytes()); + + // Read hash digest and consume hasher + let result = hasher.finalize(); + + // Convert the hash (a byte array) to a string of hex characters + hex::encode(result) +} + +#[cfg(test)] +mod tests { + use crate::services::hasher::sha1; + + #[test] + fn it_should_hash_an_string() { + assert_eq!(sha1("hello world"), "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed"); + } +} diff --git a/src/services/mod.rs b/src/services/mod.rs new file mode 100644 index 00000000..b2431aec --- /dev/null +++ b/src/services/mod.rs @@ -0,0 +1,11 @@ +//! App services. +pub mod about; +pub mod authentication; +pub mod category; +pub mod hasher; +pub mod proxy; +pub mod settings; +pub mod tag; +pub mod torrent; +pub mod torrent_file; +pub mod user; diff --git a/src/services/proxy.rs b/src/services/proxy.rs new file mode 100644 index 00000000..9ea5ef3d --- /dev/null +++ b/src/services/proxy.rs @@ -0,0 +1,46 @@ +//! Image cache proxy. +//! +//! The image cache proxy is a service that allows users to proxy images +//! through the server. +//! +//! Sample URL: +//! +//! +use std::sync::Arc; + +use bytes::Bytes; + +use super::user::DbUserRepository; +use crate::cache::image::manager::{Error, ImageCacheService}; +use crate::models::user::UserId; + +pub struct Service { + image_cache_service: Arc, + user_repository: Arc, +} + +impl Service { + #[must_use] + pub fn new(image_cache_service: Arc, user_repository: Arc) -> Self { + Self { + image_cache_service, + user_repository, + } + } + + /// It gets image by URL and caches it. + /// + /// # Errors + /// + /// It returns an error if: + /// + /// * The image URL is unreachable. + /// * The image URL is not an image. + /// * The image is too big. + /// * The user quota is met. + pub async fn get_image_by_url(&self, url: &str, user_id: &UserId) -> Result { + let user = self.user_repository.get_compact(user_id).await.ok(); + + self.image_cache_service.get_image_by_url(url, user).await + } +} diff --git a/src/services/settings.rs b/src/services/settings.rs new file mode 100644 index 00000000..5cfe9baf --- /dev/null +++ b/src/services/settings.rs @@ -0,0 +1,57 @@ +//! Settings service. +use std::sync::Arc; + +use super::user::DbUserRepository; +use crate::config::{Configuration, ConfigurationPublic, TorrustIndex}; +use crate::errors::ServiceError; +use crate::models::user::UserId; + +pub struct Service { + configuration: Arc, + user_repository: Arc, +} + +impl Service { + #[must_use] + pub fn new(configuration: Arc, user_repository: Arc) -> Service { + Service { + configuration, + user_repository, + } + } + + /// It gets all the settings. + /// + /// # Errors + /// + /// It returns an error if the user does not have the required permissions. + pub async fn get_all(&self, user_id: &UserId) -> Result { + let user = self.user_repository.get_compact(user_id).await?; + + // Check if user is administrator + // todo: extract authorization service + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + Ok(self.configuration.get_all().await) + } + + /// It gets only the public settings. + /// + /// # Errors + /// + /// It returns an error if the user does not have the required permissions. + pub async fn get_public(&self) -> ConfigurationPublic { + self.configuration.get_public().await + } + + /// It gets the site name from the settings. + /// + /// # Errors + /// + /// It returns an error if the user does not have the required permissions. + pub async fn get_site_name(&self) -> String { + self.configuration.get_site_name().await + } +} diff --git a/src/services/tag.rs b/src/services/tag.rs new file mode 100644 index 00000000..fcbf56c3 --- /dev/null +++ b/src/services/tag.rs @@ -0,0 +1,119 @@ +//! Tag service. +use std::sync::Arc; + +use super::user::DbUserRepository; +use crate::databases::database::{Database, Error as DatabaseError, Error}; +use crate::errors::ServiceError; +use crate::models::torrent_tag::{TagId, TorrentTag}; +use crate::models::user::UserId; + +pub struct Service { + tag_repository: Arc, + user_repository: Arc, +} + +impl Service { + #[must_use] + pub fn new(tag_repository: Arc, user_repository: Arc) -> Service { + Service { + tag_repository, + user_repository, + } + } + + /// Adds a new tag. + /// + /// # Errors + /// + /// It returns an error if: + /// + /// * The user does not have the required permissions. + /// * There is a database error. + pub async fn add_tag(&self, tag_name: &str, user_id: &UserId) -> Result { + let user = self.user_repository.get_compact(user_id).await?; + + // Check if user is administrator + // todo: extract authorization service + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + let trimmed_name = tag_name.trim(); + + if trimmed_name.is_empty() { + return Err(ServiceError::TagNameEmpty); + } + + match self.tag_repository.add(trimmed_name).await { + Ok(id) => Ok(id), + Err(e) => match e { + DatabaseError::TagAlreadyExists => Err(ServiceError::TagAlreadyExists), + _ => Err(ServiceError::DatabaseError), + }, + } + } + + /// Deletes a tag. + /// + /// # Errors + /// + /// It returns an error if: + /// + /// * The user does not have the required permissions. + /// * There is a database error. + pub async fn delete_tag(&self, tag_id: &TagId, user_id: &UserId) -> Result<(), ServiceError> { + let user = self.user_repository.get_compact(user_id).await?; + + // Check if user is administrator + // todo: extract authorization service + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + match self.tag_repository.delete(tag_id).await { + Ok(()) => Ok(()), + Err(e) => match e { + DatabaseError::TagNotFound => Err(ServiceError::TagNotFound), + _ => Err(ServiceError::DatabaseError), + }, + } + } +} + +pub struct DbTagRepository { + database: Arc>, +} + +impl DbTagRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It adds a new tag and returns the newly created tag. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn add(&self, tag_name: &str) -> Result { + self.database.insert_tag_and_get_id(tag_name).await + } + + /// It returns all the tags. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_all(&self) -> Result, Error> { + self.database.get_tags().await + } + + /// It removes a tag and returns it. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn delete(&self, tag_id: &TagId) -> Result<(), Error> { + self.database.delete_tag(*tag_id).await + } +} diff --git a/src/services/torrent.rs b/src/services/torrent.rs new file mode 100644 index 00000000..0a1f6b94 --- /dev/null +++ b/src/services/torrent.rs @@ -0,0 +1,861 @@ +//! Torrent service. +use std::sync::Arc; + +use log::debug; +use serde_derive::{Deserialize, Serialize}; + +use super::category::DbCategoryRepository; +use super::user::DbUserRepository; +use crate::config::Configuration; +use crate::databases::database::{Database, Error, Sorting}; +use crate::errors::ServiceError; +use crate::models::category::CategoryId; +use crate::models::info_hash::InfoHash; +use crate::models::response::{DeletedTorrentResponse, TorrentResponse, TorrentsResponse}; +use crate::models::torrent::{Metadata, TorrentId, TorrentListing}; +use crate::models::torrent_file::{DbTorrent, Torrent, TorrentFile}; +use crate::models::torrent_tag::{TagId, TorrentTag}; +use crate::models::user::UserId; +use crate::tracker::statistics_importer::StatisticsImporter; +use crate::utils::parse_torrent::decode_and_validate_torrent_file; +use crate::{tracker, AsCSV}; + +pub struct Index { + configuration: Arc, + tracker_statistics_importer: Arc, + tracker_service: Arc, + user_repository: Arc, + category_repository: Arc, + torrent_repository: Arc, + torrent_info_hash_repository: Arc, + torrent_info_repository: Arc, + torrent_file_repository: Arc, + torrent_announce_url_repository: Arc, + torrent_tag_repository: Arc, + torrent_listing_generator: Arc, +} + +pub struct AddTorrentRequest { + pub title: String, + pub description: String, + pub category_name: String, + pub tags: Vec, + pub torrent_buffer: Vec, +} + +pub struct AddTorrentResponse { + pub torrent_id: TorrentId, + pub info_hash: String, + pub original_info_hash: String, +} + +/// User request to generate a torrent listing. +#[derive(Debug, Deserialize)] +pub struct ListingRequest { + pub page_size: Option, + pub page: Option, + pub sort: Option, + /// Expects comma separated string, eg: "?categories=movie,other,app" + pub categories: Option, + /// Expects comma separated string, eg: "?tags=Linux,Ubuntu" + pub tags: Option, + pub search: Option, +} + +/// Internal specification for torrent listings. +#[derive(Debug, Deserialize)] +pub struct ListingSpecification { + pub search: Option, + pub categories: Option>, + pub tags: Option>, + pub sort: Sorting, + pub offset: u64, + pub page_size: u8, +} + +impl Index { + #[allow(clippy::too_many_arguments)] + #[must_use] + pub fn new( + configuration: Arc, + tracker_statistics_importer: Arc, + tracker_service: Arc, + user_repository: Arc, + category_repository: Arc, + torrent_repository: Arc, + torrent_info_hash_repository: Arc, + torrent_info_repository: Arc, + torrent_file_repository: Arc, + torrent_announce_url_repository: Arc, + torrent_tag_repository: Arc, + torrent_listing_repository: Arc, + ) -> Self { + Self { + configuration, + tracker_statistics_importer, + tracker_service, + user_repository, + category_repository, + torrent_repository, + torrent_info_hash_repository, + torrent_info_repository, + torrent_file_repository, + torrent_announce_url_repository, + torrent_tag_repository, + torrent_listing_generator: torrent_listing_repository, + } + } + + /// Adds a torrent to the index. + /// + /// # Errors + /// + /// This function will return an error if: + /// + /// * Unable to get the user from the database. + /// * Unable to get torrent request from payload. + /// * Unable to get the category from the database. + /// * Unable to insert the torrent into the database. + /// * Unable to add the torrent to the whitelist. + /// * Torrent title is too short. + /// + /// # Panics + /// + /// This function will panic if: + /// + /// * Unable to parse the torrent info-hash. + pub async fn add_torrent( + &self, + add_torrent_req: AddTorrentRequest, + user_id: UserId, + ) -> Result { + // Guard that the users exists + let _user = self.user_repository.get_compact(&user_id).await?; + + let metadata = self.validate_and_build_metadata(&add_torrent_req).await?; + + let (mut torrent, original_info_hash) = decode_and_validate_torrent_file(&add_torrent_req.torrent_buffer)?; + + self.customize_announcement_info_for(&mut torrent).await; + + self.canonical_info_hash_group_checks(&original_info_hash, &torrent.canonical_info_hash()) + .await?; + + let torrent_id = self + .torrent_repository + .add(&original_info_hash, &torrent, &metadata, user_id) + .await?; + + // Synchronous secondary tasks + + // code-review: consider moving this to a background task + self.import_torrent_statistics_from_tracker(torrent_id, &torrent.canonical_info_hash()) + .await; + + // We always whitelist the torrent on the tracker because + // even if the tracker mode is `public` it could be changed to `private` + // later on. + // + // code-review: maybe we should consider adding a new feature to + // whitelist all torrents from the admin panel if that change happens. + if let Err(e) = self + .tracker_service + .whitelist_info_hash(torrent.canonical_info_hash_hex()) + .await + { + // If the torrent can't be whitelisted somehow, remove the torrent from database + drop(self.torrent_repository.delete(&torrent_id).await); + return Err(e); + } + + // Build response + + Ok(AddTorrentResponse { + torrent_id, + info_hash: torrent.canonical_info_hash_hex(), + original_info_hash: original_info_hash.to_string(), + }) + } + + async fn validate_and_build_metadata(&self, add_torrent_req: &AddTorrentRequest) -> Result { + if add_torrent_req.category_name.is_empty() { + return Err(ServiceError::MissingMandatoryMetadataFields); + } + + let category = self + .category_repository + .get_by_name(&add_torrent_req.category_name) + .await + .map_err(|_| ServiceError::InvalidCategory)?; + + let metadata = Metadata::new( + &add_torrent_req.title, + &add_torrent_req.description, + category.category_id, + &add_torrent_req.tags, + )?; + + Ok(metadata) + } + + async fn canonical_info_hash_group_checks( + &self, + original_info_hash: &InfoHash, + canonical_info_hash: &InfoHash, + ) -> Result<(), ServiceError> { + let original_info_hashes = self + .torrent_info_hash_repository + .get_canonical_info_hash_group(canonical_info_hash) + .await?; + + if !original_info_hashes.is_empty() { + // Torrent with the same canonical infohash was already uploaded + debug!("Canonical infohash found: {:?}", canonical_info_hash.to_hex_string()); + + if let Some(original_info_hash) = original_info_hashes.find(original_info_hash) { + // The exact original infohash was already uploaded + debug!("Original infohash found: {:?}", original_info_hash.to_hex_string()); + + return Err(ServiceError::InfoHashAlreadyExists); + } + + // A new original infohash is being uploaded with a canonical infohash that already exists. + debug!("Original infohash not found: {:?}", original_info_hash.to_hex_string()); + + // Add the new associated original infohash to the canonical one. + self.torrent_info_hash_repository + .add_info_hash_to_canonical_info_hash_group(original_info_hash, canonical_info_hash) + .await?; + return Err(ServiceError::CanonicalInfoHashAlreadyExists); + } + + Ok(()) + } + + async fn customize_announcement_info_for(&self, torrent: &mut Torrent) { + let settings = self.configuration.settings.read().await; + let tracker_url = settings.tracker.url.clone(); + torrent.set_announce_to(&tracker_url); + torrent.reset_announce_list_if_private(); + } + + async fn import_torrent_statistics_from_tracker(&self, torrent_id: TorrentId, canonical_info_hash: &InfoHash) { + drop( + self.tracker_statistics_importer + .import_torrent_statistics(torrent_id, &canonical_info_hash.to_hex_string()) + .await, + ); + } + + /// Gets a torrent from the Index. + /// + /// # Errors + /// + /// This function will return an error if unable to get the torrent from the + /// database. + pub async fn get_torrent(&self, info_hash: &InfoHash, opt_user_id: Option) -> Result { + let mut torrent = self.torrent_repository.get_by_info_hash(info_hash).await?; + + let tracker_url = self.get_tracker_url().await; + + // Add personal tracker url or default tracker url + match opt_user_id { + Some(user_id) => { + let personal_announce_url = self + .tracker_service + .get_personal_announce_url(user_id) + .await + .unwrap_or(tracker_url); + torrent.announce = Some(personal_announce_url.clone()); + if let Some(list) = &mut torrent.announce_list { + let vec = vec![personal_announce_url]; + list.insert(0, vec); + } + } + None => { + torrent.announce = Some(tracker_url); + } + } + + Ok(torrent) + } + + /// Delete a Torrent from the Index + /// + /// # Errors + /// + /// This function will return an error if: + /// + /// * Unable to get the user who is deleting the torrent (logged-in user). + /// * The user does not have permission to delete the torrent. + /// * Unable to get the torrent listing from it's ID. + /// * Unable to delete the torrent from the database. + pub async fn delete_torrent(&self, info_hash: &InfoHash, user_id: &UserId) -> Result { + let user = self.user_repository.get_compact(user_id).await?; + + // Only administrator can delete torrents. + // todo: move this to an authorization service. + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + let torrent_listing = self.torrent_listing_generator.one_torrent_by_info_hash(info_hash).await?; + + self.torrent_repository.delete(&torrent_listing.torrent_id).await?; + + // Remove info-hash from tracker whitelist + let _ = self + .tracker_service + .remove_info_hash_from_whitelist(info_hash.to_string()) + .await; + + Ok(DeletedTorrentResponse { + torrent_id: torrent_listing.torrent_id, + info_hash: torrent_listing.info_hash, + }) + } + + /// Get torrent info from the Index + /// + /// # Errors + /// + /// This function will return an error if: + /// * Unable to get torrent ID. + /// * Unable to get torrent listing from id. + /// * Unable to get torrent category from id. + /// * Unable to get torrent files from id. + /// * Unable to get torrent info from id. + /// * Unable to get torrent announce url(s) from id. + pub async fn get_torrent_info( + &self, + info_hash: &InfoHash, + opt_user_id: Option, + ) -> Result { + let torrent_listing = self.torrent_listing_generator.one_torrent_by_info_hash(info_hash).await?; + + let torrent_id = torrent_listing.torrent_id; + + let category = match torrent_listing.category_id { + Some(category_id) => Some(self.category_repository.get_by_id(&category_id).await?), + None => None, + }; + + let mut torrent_response = TorrentResponse::from_listing(torrent_listing, category); + + // Add files + + torrent_response.files = self.torrent_file_repository.get_by_torrent_id(&torrent_id).await?; + + if torrent_response.files.len() == 1 { + let torrent_info = self.torrent_info_repository.get_by_info_hash(info_hash).await?; + + torrent_response + .files + .iter_mut() + .for_each(|v| v.path = vec![torrent_info.name.to_string()]); + } + + // Add trackers + + torrent_response.trackers = self.torrent_announce_url_repository.get_by_torrent_id(&torrent_id).await?; + + let tracker_url = self.get_tracker_url().await; + + // add tracker url + match opt_user_id { + Some(user_id) => { + // if no user owned tracker key can be found, use default tracker url + let personal_announce_url = self + .tracker_service + .get_personal_announce_url(user_id) + .await + .unwrap_or(tracker_url); + // add personal tracker url to front of vec + torrent_response.trackers.insert(0, personal_announce_url); + } + None => { + torrent_response.trackers.insert(0, tracker_url); + } + } + + // Add magnet link + + // todo: extract a struct or function to build the magnet links + let mut magnet = format!( + "magnet:?xt=urn:btih:{}&dn={}", + torrent_response.info_hash, + urlencoding::encode(&torrent_response.title) + ); + + // Add trackers from torrent file to magnet link + for tracker in &torrent_response.trackers { + magnet.push_str(&format!("&tr={}", urlencoding::encode(tracker))); + } + + torrent_response.magnet_link = magnet; + + // Get realtime seeders and leechers + if let Ok(torrent_info) = self + .tracker_statistics_importer + .import_torrent_statistics(torrent_response.torrent_id, &torrent_response.info_hash) + .await + { + torrent_response.seeders = torrent_info.seeders; + torrent_response.leechers = torrent_info.leechers; + } + + torrent_response.tags = self.torrent_tag_repository.get_tags_for_torrent(&torrent_id).await?; + + Ok(torrent_response) + } + + /// It returns a list of torrents matching the search criteria. + /// + /// # Errors + /// + /// Returns a `ServiceError::DatabaseError` if the database query fails. + pub async fn generate_torrent_info_listing(&self, request: &ListingRequest) -> Result { + let torrent_listing_specification = self.listing_specification_from_user_request(request).await; + + let torrents_response = self + .torrent_listing_generator + .generate_listing(&torrent_listing_specification) + .await?; + + Ok(torrents_response) + } + + /// It converts the user listing request into an internal listing + /// specification. + async fn listing_specification_from_user_request(&self, request: &ListingRequest) -> ListingSpecification { + let settings = self.configuration.settings.read().await; + let default_torrent_page_size = settings.api.default_torrent_page_size; + let max_torrent_page_size = settings.api.max_torrent_page_size; + drop(settings); + + let sort = request.sort.unwrap_or(Sorting::UploadedDesc); + let page = request.page.unwrap_or(0); + let page_size = request.page_size.unwrap_or(default_torrent_page_size); + + // Guard that page size does not exceed the maximum + let page_size = if page_size > max_torrent_page_size { + max_torrent_page_size + } else { + page_size + }; + + let offset = u64::from(page * u32::from(page_size)); + + let categories = request.categories.as_csv::().unwrap_or(None); + + let tags = request.tags.as_csv::().unwrap_or(None); + + ListingSpecification { + search: request.search.clone(), + categories, + tags, + sort, + offset, + page_size, + } + } + + /// Update the torrent info on the Index. + /// + /// # Errors + /// + /// This function will return an error if: + /// + /// * Unable to get the user. + /// * Unable to get listing from id. + /// * Unable to update the torrent tile or description. + /// * User does not have the permissions to update the torrent. + pub async fn update_torrent_info( + &self, + info_hash: &InfoHash, + title: &Option, + description: &Option, + category_id: &Option, + tags: &Option>, + user_id: &UserId, + ) -> Result { + let updater = self.user_repository.get_compact(user_id).await?; + + let torrent_listing = self.torrent_listing_generator.one_torrent_by_info_hash(info_hash).await?; + + // Check if user is owner or administrator + // todo: move this to an authorization service. + if !(torrent_listing.uploader == updater.username || updater.administrator) { + return Err(ServiceError::Unauthorized); + } + + self.torrent_info_repository + .update(&torrent_listing.torrent_id, title, description, category_id, tags) + .await?; + + let torrent_listing = self + .torrent_listing_generator + .one_torrent_by_torrent_id(&torrent_listing.torrent_id) + .await?; + + let category = match torrent_listing.category_id { + Some(category_id) => Some(self.category_repository.get_by_id(&category_id).await?), + None => None, + }; + + let torrent_response = TorrentResponse::from_listing(torrent_listing, category); + + Ok(torrent_response) + } + + async fn get_tracker_url(&self) -> String { + let settings = self.configuration.settings.read().await; + settings.tracker.url.clone() + } +} + +pub struct DbTorrentRepository { + database: Arc>, +} + +impl DbTorrentRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It finds the torrent by info-hash. + /// + /// # Errors + /// + /// This function will return an error there is a database error. + pub async fn get_by_info_hash(&self, info_hash: &InfoHash) -> Result { + self.database.get_torrent_from_info_hash(info_hash).await + } + + /// Inserts the entire torrent in the database. + /// + /// # Errors + /// + /// This function will return an error there is a database error. + pub async fn add( + &self, + original_info_hash: &InfoHash, + torrent: &Torrent, + metadata: &Metadata, + user_id: UserId, + ) -> Result { + self.database + .insert_torrent_and_get_id(original_info_hash, torrent, user_id, metadata) + .await + } + + /// Deletes the entire torrent in the database. + /// + /// # Errors + /// + /// This function will return an error there is a database error. + pub async fn delete(&self, torrent_id: &TorrentId) -> Result<(), Error> { + self.database.delete_torrent(*torrent_id).await + } +} + +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] +pub struct DbTorrentInfoHash { + pub info_hash: String, + pub canonical_info_hash: String, + pub original_is_known: bool, +} + +/// All the infohashes associated to a canonical one. +/// +/// When you upload a torrent the info-hash migth change because the Index +/// remove the non-standard fields in the `info` dictionary. That makes the +/// infohash change. The canonical infohash is the resulting infohash. +/// This function returns the original infohashes of a canonical infohash. +/// +/// The relationship is 1 canonical infohash -> N original infohashes. +pub struct CanonicalInfoHashGroup { + pub canonical_info_hash: InfoHash, + /// The list of original infohashes associated to the canonical one. + pub original_info_hashes: Vec, +} +pub struct DbCanonicalInfoHashGroupRepository { + database: Arc>, +} + +impl CanonicalInfoHashGroup { + #[must_use] + pub fn is_empty(&self) -> bool { + self.original_info_hashes.is_empty() + } + + #[must_use] + pub fn find(&self, original_info_hash: &InfoHash) -> Option<&InfoHash> { + self.original_info_hashes.iter().find(|&hash| *hash == *original_info_hash) + } +} + +impl DbCanonicalInfoHashGroupRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It returns all the infohashes associated to the canonical one. + /// + /// # Errors + /// + /// This function will return an error there is a database error. + /// + /// # Errors + /// + /// Returns an error is there was a problem with the database. + pub async fn get_canonical_info_hash_group(&self, info_hash: &InfoHash) -> Result { + self.database.get_torrent_canonical_info_hash_group(info_hash).await + } + + /// It returns the list of all infohashes producing the same canonical + /// infohash. + /// + /// If the original infohash was unknown, it returns the canonical infohash. + /// + /// # Errors + /// + /// Returns an error is there was a problem with the database. + pub async fn find_canonical_info_hash_for(&self, info_hash: &InfoHash) -> Result, Error> { + self.database.find_canonical_info_hash_for(info_hash).await + } + + /// It returns the list of all infohashes producing the same canonical + /// infohash. + /// + /// If the original infohash was unknown, it returns the canonical infohash. + /// + /// # Errors + /// + /// Returns an error is there was a problem with the database. + pub async fn add_info_hash_to_canonical_info_hash_group( + &self, + original_info_hash: &InfoHash, + canonical_info_hash: &InfoHash, + ) -> Result<(), Error> { + self.database + .add_info_hash_to_canonical_info_hash_group(original_info_hash, canonical_info_hash) + .await + } +} + +pub struct DbTorrentInfoRepository { + database: Arc>, +} + +impl DbTorrentInfoRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It finds the torrent info by info-hash. + /// + /// # Errors + /// + /// This function will return an error there is a database error. + pub async fn get_by_info_hash(&self, info_hash: &InfoHash) -> Result { + self.database.get_torrent_info_from_info_hash(info_hash).await + } + + /// It updates the torrent title or/and description by torrent ID. + /// + /// # Errors + /// + /// This function will return an error there is a database error. + pub async fn update( + &self, + torrent_id: &TorrentId, + opt_title: &Option, + opt_description: &Option, + opt_category_id: &Option, + opt_tags: &Option>, + ) -> Result<(), Error> { + if let Some(title) = &opt_title { + self.database.update_torrent_title(*torrent_id, title).await?; + } + + if let Some(description) = &opt_description { + self.database.update_torrent_description(*torrent_id, description).await?; + } + + if let Some(category_id) = &opt_category_id { + self.database.update_torrent_category(*torrent_id, *category_id).await?; + } + + if let Some(tags) = opt_tags { + let mut current_tags: Vec = self + .database + .get_tags_for_torrent_id(*torrent_id) + .await? + .iter() + .map(|tag| tag.tag_id) + .collect(); + + let mut new_tags = tags.clone(); + + current_tags.sort_unstable(); + new_tags.sort_unstable(); + + if new_tags != current_tags { + self.database.delete_all_torrent_tag_links(*torrent_id).await?; + self.database.add_torrent_tag_links(*torrent_id, tags).await?; + } + } + + Ok(()) + } +} + +pub struct DbTorrentFileRepository { + database: Arc>, +} + +impl DbTorrentFileRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It finds the torrent files by torrent id + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_by_torrent_id(&self, torrent_id: &TorrentId) -> Result, Error> { + self.database.get_torrent_files_from_id(*torrent_id).await + } +} + +pub struct DbTorrentAnnounceUrlRepository { + database: Arc>, +} + +impl DbTorrentAnnounceUrlRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It finds the announce URLs by torrent id + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_by_torrent_id(&self, torrent_id: &TorrentId) -> Result, Error> { + self.database + .get_torrent_announce_urls_from_id(*torrent_id) + .await + .map(|v| v.into_iter().flatten().collect()) + } +} + +pub struct DbTorrentTagRepository { + database: Arc>, +} + +impl DbTorrentTagRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It adds a new torrent tag link. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn link_torrent_to_tag(&self, torrent_id: &TorrentId, tag_id: &TagId) -> Result<(), Error> { + self.database.add_torrent_tag_link(*torrent_id, *tag_id).await + } + + /// It adds multiple torrent tag links at once. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn link_torrent_to_tags(&self, torrent_id: &TorrentId, tag_ids: &[TagId]) -> Result<(), Error> { + self.database.add_torrent_tag_links(*torrent_id, tag_ids).await + } + + /// It returns all the tags linked to a certain torrent ID. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_tags_for_torrent(&self, torrent_id: &TorrentId) -> Result, Error> { + self.database.get_tags_for_torrent_id(*torrent_id).await + } + + /// It removes a torrent tag link. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn unlink_torrent_from_tag(&self, torrent_id: &TorrentId, tag_id: &TagId) -> Result<(), Error> { + self.database.delete_torrent_tag_link(*torrent_id, *tag_id).await + } + + /// It removes all tags for a certain torrent. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn unlink_all_tags_for_torrent(&self, torrent_id: &TorrentId) -> Result<(), Error> { + self.database.delete_all_torrent_tag_links(*torrent_id).await + } +} + +pub struct DbTorrentListingGenerator { + database: Arc>, +} + +impl DbTorrentListingGenerator { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It finds the torrent listing by info-hash + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn one_torrent_by_info_hash(&self, info_hash: &InfoHash) -> Result { + self.database.get_torrent_listing_from_info_hash(info_hash).await + } + + /// It finds the torrent listing by torrent ID. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn one_torrent_by_torrent_id(&self, torrent_id: &TorrentId) -> Result { + self.database.get_torrent_listing_from_id(*torrent_id).await + } + + /// It finds the torrent listing by torrent ID. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn generate_listing(&self, specification: &ListingSpecification) -> Result { + self.database + .get_torrents_search_sorted_paginated( + &specification.search, + &specification.categories, + &specification.tags, + &specification.sort, + specification.offset, + specification.page_size, + ) + .await + } +} diff --git a/src/services/torrent_file.rs b/src/services/torrent_file.rs new file mode 100644 index 00000000..338ba6e6 --- /dev/null +++ b/src/services/torrent_file.rs @@ -0,0 +1,142 @@ +//! This module contains the services related to torrent file management. +use uuid::Uuid; + +use crate::models::torrent_file::{Torrent, TorrentFile, TorrentInfoDictionary}; +use crate::services::hasher::sha1; + +/// It contains the information required to create a new torrent file. +/// +/// It's not the full in-memory representation of a torrent file. The full +/// in-memory representation is the `Torrent` struct. +pub struct CreateTorrentRequest { + // The `info` dictionary fields + pub name: String, + pub pieces: String, + pub piece_length: i64, + pub private: Option, + pub root_hash: i64, // True (1) if it's a BEP 30 torrent. + pub files: Vec, + // Other fields of the root level metainfo dictionary + pub announce_urls: Vec>, + pub comment: Option, +} + +impl CreateTorrentRequest { + /// It builds a `Torrent` from a request. + /// + /// # Panics + /// + /// This function will panic if the `torrent_info.pieces` is not a valid hex string. + #[must_use] + pub fn build_torrent(&self) -> Torrent { + let info_dict = self.build_info_dictionary(); + + Torrent { + info: info_dict, + announce: None, + nodes: None, + encoding: None, + httpseeds: None, + announce_list: Some(self.announce_urls.clone()), + creation_date: None, + comment: self.comment.clone(), + created_by: None, + } + } + + /// It builds a `TorrentInfoDictionary` from the current torrent request. + /// + /// # Panics + /// + /// This function will panic if the `pieces` field is not a valid hex string. + #[must_use] + fn build_info_dictionary(&self) -> TorrentInfoDictionary { + TorrentInfoDictionary::with( + &self.name, + self.piece_length, + self.private, + self.root_hash, + &self.pieces, + &self.files, + ) + } +} + +/// It generates a random single-file torrent for testing purposes. +/// +/// The torrent will contain a single text file with the UUID as its content. +/// +/// # Panics +/// +/// This function will panic if the sample file contents length in bytes is +/// greater than `i64::MAX`. +#[must_use] +pub fn generate_random_torrent(id: Uuid) -> Torrent { + // Content of the file from which the torrent will be generated. + // We use the UUID as the content of the file. + let file_contents = format!("{id}\n"); + + let torrent_files: Vec = vec![TorrentFile { + path: vec![String::new()], + length: i64::try_from(file_contents.len()).expect("file contents size in bytes cannot exceed i64::MAX"), + md5sum: None, + }]; + + let torrent_announce_urls: Vec> = vec![]; + + let create_torrent_req = CreateTorrentRequest { + name: format!("file-{id}.txt"), + pieces: sha1(&file_contents), + piece_length: 16384, + private: None, + root_hash: 0, + files: torrent_files, + announce_urls: torrent_announce_urls, + comment: None, + }; + + create_torrent_req.build_torrent() +} + +#[cfg(test)] +mod tests { + use serde_bytes::ByteBuf; + use uuid::Uuid; + + use crate::models::torrent_file::{Torrent, TorrentInfoDictionary}; + use crate::services::torrent_file::generate_random_torrent; + + #[test] + fn it_should_generate_a_random_meta_info_file() { + let uuid = Uuid::parse_str("d6170378-2c14-4ccc-870d-2a8e15195e23").unwrap(); + + let torrent = generate_random_torrent(uuid); + + let expected_torrent = Torrent { + info: TorrentInfoDictionary { + name: "file-d6170378-2c14-4ccc-870d-2a8e15195e23.txt".to_string(), + pieces: Some(ByteBuf::from(vec![ + 62, 231, 243, 51, 234, 165, 204, 209, 51, 132, 163, 133, 249, 50, 107, 46, 24, 15, 251, 32, + ])), + piece_length: 16384, + md5sum: None, + length: Some(37), + files: None, + private: None, + path: None, + root_hash: None, + source: None, + }, + announce: None, + announce_list: Some(vec![]), + creation_date: None, + comment: None, + created_by: None, + nodes: None, + encoding: None, + httpseeds: None, + }; + + assert_eq!(torrent, expected_torrent); + } +} diff --git a/src/services/user.rs b/src/services/user.rs new file mode 100644 index 00000000..358e7431 --- /dev/null +++ b/src/services/user.rs @@ -0,0 +1,351 @@ +//! User services. +use std::sync::Arc; + +use argon2::password_hash::SaltString; +use argon2::{Argon2, PasswordHasher}; +use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; +use log::{debug, info}; +use pbkdf2::password_hash::rand_core::OsRng; + +use crate::config::{Configuration, EmailOnSignup}; +use crate::databases::database::{Database, Error}; +use crate::errors::ServiceError; +use crate::mailer; +use crate::mailer::VerifyClaims; +use crate::models::user::{UserCompact, UserId, UserProfile}; +use crate::utils::validation::validate_email_address; +use crate::web::api::v1::contexts::user::forms::RegistrationForm; + +/// Since user email could be optional, we need a way to represent "no email" +/// in the database. This function returns the string that should be used for +/// that purpose. +fn no_email() -> String { + String::new() +} + +pub struct RegistrationService { + configuration: Arc, + mailer: Arc, + user_repository: Arc, + user_profile_repository: Arc, +} + +impl RegistrationService { + #[must_use] + pub fn new( + configuration: Arc, + mailer: Arc, + user_repository: Arc, + user_profile_repository: Arc, + ) -> Self { + Self { + configuration, + mailer, + user_repository, + user_profile_repository, + } + } + + /// It registers a new user. + /// + /// # Errors + /// + /// This function will return a: + /// + /// * `ServiceError::EmailMissing` if email is required, but missing. + /// * `ServiceError::EmailInvalid` if supplied email is badly formatted. + /// * `ServiceError::PasswordsDontMatch` if the supplied passwords do not match. + /// * `ServiceError::PasswordTooShort` if the supplied password is too short. + /// * `ServiceError::PasswordTooLong` if the supplied password is too long. + /// * `ServiceError::UsernameInvalid` if the supplied username is badly formatted. + /// * `ServiceError::FailedToSendVerificationEmail` if unable to send the required verification email. + /// * An error if unable to successfully hash the password. + /// * An error if unable to insert user into the database. + /// + /// # Panics + /// + /// This function will panic if the email is required, but missing. + pub async fn register_user(&self, registration_form: &RegistrationForm, api_base_url: &str) -> Result { + info!("registering user: {}", registration_form.username); + + let settings = self.configuration.settings.read().await; + + let opt_email = match settings.auth.email_on_signup { + EmailOnSignup::Required => { + if registration_form.email.is_none() { + return Err(ServiceError::EmailMissing); + } + registration_form.email.clone() + } + EmailOnSignup::None => None, + EmailOnSignup::Optional => registration_form.email.clone(), + }; + + if let Some(email) = ®istration_form.email { + if !validate_email_address(email) { + return Err(ServiceError::EmailInvalid); + } + } + + if registration_form.password != registration_form.confirm_password { + return Err(ServiceError::PasswordsDontMatch); + } + + let password_length = registration_form.password.len(); + + if password_length <= settings.auth.min_password_length { + return Err(ServiceError::PasswordTooShort); + } + + if password_length >= settings.auth.max_password_length { + return Err(ServiceError::PasswordTooLong); + } + + let salt = SaltString::generate(&mut OsRng); + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + // Hash password to PHC string ($argon2id$v=19$...) + let password_hash = argon2 + .hash_password(registration_form.password.as_bytes(), &salt)? + .to_string(); + + if registration_form.username.contains('@') { + return Err(ServiceError::UsernameInvalid); + } + + let user_id = self + .user_repository + .add( + ®istration_form.username, + &opt_email.clone().unwrap_or(no_email()), + &password_hash, + ) + .await?; + + // If this is the first created account, give administrator rights + if user_id == 1 { + drop(self.user_repository.grant_admin_role(&user_id).await); + } + + if settings.mail.email_verification_enabled { + if let Some(email) = opt_email { + let mail_res = self + .mailer + .send_verification_mail(&email, ®istration_form.username, user_id, api_base_url) + .await; + + if mail_res.is_err() { + drop(self.user_repository.delete(&user_id).await); + return Err(ServiceError::FailedToSendVerificationEmail); + } + } + } + + Ok(user_id) + } + + /// It verifies the email address of a user via the token sent to the + /// user's email. + /// + /// # Errors + /// + /// This function will return a `ServiceError::DatabaseError` if unable to + /// update the user's email verification status. + pub async fn verify_email(&self, token: &str) -> Result { + let settings = self.configuration.settings.read().await; + + let token_data = match decode::( + token, + &DecodingKey::from_secret(settings.auth.secret_key.as_bytes()), + &Validation::new(Algorithm::HS256), + ) { + Ok(token_data) => { + if !token_data.claims.iss.eq("email-verification") { + return Ok(false); + } + + token_data.claims + } + Err(_) => return Ok(false), + }; + + drop(settings); + + let user_id = token_data.sub; + + if self.user_profile_repository.verify_email(&user_id).await.is_err() { + return Err(ServiceError::DatabaseError); + }; + + Ok(true) + } +} + +pub struct BanService { + user_repository: Arc, + user_profile_repository: Arc, + banned_user_list: Arc, +} + +impl BanService { + #[must_use] + pub fn new( + user_repository: Arc, + user_profile_repository: Arc, + banned_user_list: Arc, + ) -> Self { + Self { + user_repository, + user_profile_repository, + banned_user_list, + } + } + + /// Ban a user from the Index. + /// + /// # Errors + /// + /// This function will return a: + /// + /// * `ServiceError::InternalServerError` if unable get user from the request. + /// * An error if unable to get user profile from supplied username. + /// * An error if unable to set the ban of the user in the database. + pub async fn ban_user(&self, username_to_be_banned: &str, user_id: &UserId) -> Result<(), ServiceError> { + debug!("user with ID {user_id} banning username: {username_to_be_banned}"); + + let user = self.user_repository.get_compact(user_id).await?; + + // Check if user is administrator + if !user.administrator { + return Err(ServiceError::Unauthorized); + } + + let user_profile = self + .user_profile_repository + .get_user_profile_from_username(username_to_be_banned) + .await?; + + self.banned_user_list.add(&user_profile.user_id).await?; + + Ok(()) + } +} + +pub struct DbUserRepository { + database: Arc>, +} + +impl DbUserRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It returns the compact user. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_compact(&self, user_id: &UserId) -> Result { + // todo: persistence layer should have its own errors instead of + // returning a `ServiceError`. + self.database + .get_user_compact_from_id(*user_id) + .await + .map_err(|_| ServiceError::UserNotFound) + } + + /// It grants the admin role to the user. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn grant_admin_role(&self, user_id: &UserId) -> Result<(), Error> { + self.database.grant_admin_role(*user_id).await + } + + /// It deletes the user. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn delete(&self, user_id: &UserId) -> Result<(), Error> { + self.database.delete_user(*user_id).await + } + + /// It adds a new user. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn add(&self, username: &str, email: &str, password_hash: &str) -> Result { + self.database.insert_user_and_get_id(username, email, password_hash).await + } +} + +pub struct DbUserProfileRepository { + database: Arc>, +} + +impl DbUserProfileRepository { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It marks the user's email as verified. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn verify_email(&self, user_id: &UserId) -> Result<(), Error> { + self.database.verify_email(*user_id).await + } + + /// It get the user profile from the username. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + pub async fn get_user_profile_from_username(&self, username: &str) -> Result { + self.database.get_user_profile_from_username(username).await + } +} + +pub struct DbBannedUserList { + database: Arc>, +} + +impl DbBannedUserList { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It add a user to the banned users list. + /// + /// # Errors + /// + /// It returns an error if there is a database error. + /// + /// # Panics + /// + /// It panics if the expiration date cannot be parsed. It should never + /// happen as the date is hardcoded for now. + pub async fn add(&self, user_id: &UserId) -> Result<(), Error> { + // todo: add reason and `date_expiry` parameters to request. + + // code-review: add the user ID of the user who banned the user. + + // For the time being, we will not use a reason for banning a user. + let reason = "no reason".to_string(); + + // User will be banned until the year 9999 + let date_expiry = chrono::NaiveDateTime::parse_from_str("9999-01-01 00:00:00", "%Y-%m-%d %H:%M:%S") + .expect("Could not parse date from 9999-01-01 00:00:00."); + + self.database.ban_user(*user_id, &reason, date_expiry).await + } +} diff --git a/src/tracker.rs b/src/tracker.rs deleted file mode 100644 index 960f4605..00000000 --- a/src/tracker.rs +++ /dev/null @@ -1,174 +0,0 @@ -use std::sync::Arc; - -use serde::{Serialize, Deserialize}; - -use crate::config::Configuration; -use crate::databases::database::Database; -use crate::models::tracker_key::TrackerKey; -use crate::errors::ServiceError; - -#[derive(Debug, Serialize, Deserialize)] -pub struct TorrentInfo { - pub info_hash: String, - pub seeders: i64, - pub completed: i64, - pub leechers: i64, - pub peers: Vec, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct Peer { - pub peer_id: Option, - pub peer_addr: Option, - pub updated: Option, - pub uploaded: Option, - pub downloaded: Option, - pub left: Option, - pub event: Option, -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct PeerId { - pub id: Option, - pub client: Option -} - -pub struct TrackerService { - cfg: Arc, - database: Arc> -} - -impl TrackerService { - pub fn new(cfg: Arc, database: Arc>) -> TrackerService { - TrackerService { - cfg, - database - } - } - - pub async fn whitelist_info_hash(&self, info_hash: String) -> Result<(), ServiceError> { - let settings = self.cfg.settings.read().await; - - let request_url = format!("{}/api/whitelist/{}?token={}", settings.tracker.api_url, info_hash, settings.tracker.token); - - drop(settings); - - let client = reqwest::Client::new(); - - let response = client.post(request_url).send().await.map_err(|_| ServiceError::TrackerOffline)?; - - if response.status().is_success() { - Ok(()) - } else { - Err(ServiceError::InternalServerError) - } - } - - pub async fn remove_info_hash_from_whitelist(&self, info_hash: String) -> Result<(), ServiceError> { - let settings = self.cfg.settings.read().await; - - let request_url = - format!("{}/api/whitelist/{}?token={}", settings.tracker.api_url, info_hash, settings.tracker.token); - - drop(settings); - - let client = reqwest::Client::new(); - - let response = match client.delete(request_url).send().await { - Ok(v) => Ok(v), - Err(_) => Err(ServiceError::InternalServerError) - }?; - - if response.status().is_success() { - return Ok(()) - } - - Err(ServiceError::InternalServerError) - } - - // get personal tracker announce url of a user - // Eg: https://tracker.torrust.com/announce/USER_TRACKER_KEY - pub async fn get_personal_announce_url(&self, user_id: i64) -> Result { - let settings = self.cfg.settings.read().await; - - // get a valid tracker key for this user from database - let tracker_key = self.database.get_user_tracker_key(user_id).await; - - match tracker_key { - Some(v) => { Ok(format!("{}/{}", settings.tracker.url, v.key)) } - None => { - match self.retrieve_new_tracker_key(user_id).await { - Ok(v) => { Ok(format!("{}/{}", settings.tracker.url, v.key)) }, - Err(_) => { Err(ServiceError::TrackerOffline) } - } - } - } - } - - // issue a new tracker key from tracker and save it in database, tied to a user - pub async fn retrieve_new_tracker_key(&self, user_id: i64) -> Result { - let settings = self.cfg.settings.read().await; - - let request_url = format!("{}/api/key/{}?token={}", settings.tracker.api_url, settings.tracker.token_valid_seconds, settings.tracker.token); - - drop(settings); - - let client = reqwest::Client::new(); - - // issue new tracker key - let response = client.post(request_url).send().await.map_err(|_| ServiceError::InternalServerError)?; - - // get tracker key from response - let tracker_key = response.json::().await.map_err(|_| ServiceError::InternalServerError)?; - - // add tracker key to database (tied to a user) - self.database.add_tracker_key(user_id, &tracker_key).await?; - - // return tracker key - Ok(tracker_key) - } - - // get torrent info from tracker api - pub async fn get_torrent_info(&self, torrent_id: i64, info_hash: &str) -> Result { - let settings = self.cfg.settings.read().await; - - let tracker_url = settings.tracker.url.clone(); - - let request_url = - format!("{}/api/torrent/{}?token={}", settings.tracker.api_url, info_hash, settings.tracker.token); - - drop(settings); - - let client = reqwest::Client::new(); - let response = match client.get(request_url) - .send() - .await { - Ok(v) => Ok(v), - Err(_) => Err(ServiceError::InternalServerError) - }?; - - let torrent_info = match response.json::().await { - Ok(torrent_info) => { - let _ = self.database.update_tracker_info(torrent_id, &tracker_url, torrent_info.seeders, torrent_info.leechers).await; - Ok(torrent_info) - }, - Err(_) => { - let _ = self.database.update_tracker_info(torrent_id, &tracker_url, 0, 0).await; - Err(ServiceError::TorrentNotFound) - } - }?; - - Ok(torrent_info) - } - - pub async fn update_torrents(&self) -> Result<(), ServiceError> { - println!("Updating torrents.."); - let torrents = self.database.get_all_torrents_compact().await?; - - for torrent in torrents { - let _ = self.get_torrent_info(torrent.torrent_id, &torrent.info_hash).await; - } - - Ok(()) - } -} diff --git a/src/tracker/api.rs b/src/tracker/api.rs new file mode 100644 index 00000000..d3fa3fcb --- /dev/null +++ b/src/tracker/api.rs @@ -0,0 +1,91 @@ +use reqwest::{Error, Response}; +pub struct ConnectionInfo { + /// The URL of the tracker. Eg: or + pub url: String, + /// The token used to authenticate with the tracker API. + pub token: String, +} + +impl ConnectionInfo { + #[must_use] + pub fn new(url: String, token: String) -> Self { + Self { url, token } + } +} + +pub struct Client { + pub connection_info: ConnectionInfo, + base_url: String, +} + +impl Client { + #[must_use] + pub fn new(connection_info: ConnectionInfo) -> Self { + let base_url = format!("{}/api/v1", connection_info.url); + Self { + connection_info, + base_url, + } + } + + /// Add a torrent to the tracker whitelist. + /// + /// # Errors + /// + /// Will return an error if the HTTP request fails. + pub async fn whitelist_torrent(&self, info_hash: &str) -> Result { + let request_url = format!( + "{}/whitelist/{}?token={}", + self.base_url, info_hash, self.connection_info.token + ); + + let client = reqwest::Client::new(); + + client.post(request_url).send().await + } + + /// Remove a torrent from the tracker whitelist. + /// + /// # Errors + /// + /// Will return an error if the HTTP request fails. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Result { + let request_url = format!( + "{}/whitelist/{}?token={}", + self.base_url, info_hash, self.connection_info.token + ); + + let client = reqwest::Client::new(); + + client.delete(request_url).send().await + } + + /// Retrieve a new tracker key. + /// + /// # Errors + /// + /// Will return an error if the HTTP request fails. + pub async fn retrieve_new_tracker_key(&self, token_valid_seconds: u64) -> Result { + let request_url = format!( + "{}/key/{}?token={}", + self.base_url, token_valid_seconds, self.connection_info.token + ); + + let client = reqwest::Client::new(); + + client.post(request_url).send().await + } + + /// Retrieve the info for a torrent. + /// + /// # Errors + /// + /// Will return an error if the HTTP request fails. + pub async fn get_torrent_info(&self, info_hash: &str) -> Result { + let request_url = format!("{}/torrent/{}?token={}", self.base_url, info_hash, self.connection_info.token); + + let client = reqwest::Client::new(); + + client.get(request_url).send().await + } +} diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs new file mode 100644 index 00000000..5fc5a030 --- /dev/null +++ b/src/tracker/mod.rs @@ -0,0 +1,3 @@ +pub mod api; +pub mod service; +pub mod statistics_importer; diff --git a/src/tracker/service.rs b/src/tracker/service.rs new file mode 100644 index 00000000..e39cf0a6 --- /dev/null +++ b/src/tracker/service.rs @@ -0,0 +1,197 @@ +use std::sync::Arc; + +use hyper::StatusCode; +use log::error; +use serde::{Deserialize, Serialize}; + +use super::api::{Client, ConnectionInfo}; +use crate::config::Configuration; +use crate::databases::database::Database; +use crate::errors::ServiceError; +use crate::models::tracker_key::TrackerKey; +use crate::models::user::UserId; + +#[derive(Debug, Serialize, Deserialize)] +pub struct TorrentInfo { + pub info_hash: String, + pub seeders: i64, + pub completed: i64, + pub leechers: i64, + pub peers: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Peer { + pub peer_id: Option, + pub peer_addr: Option, + pub updated: Option, + pub uploaded: Option, + pub downloaded: Option, + pub left: Option, + pub event: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct PeerId { + pub id: Option, + pub client: Option, +} + +pub struct Service { + database: Arc>, + api_client: Client, + token_valid_seconds: u64, + tracker_url: String, +} + +impl Service { + pub async fn new(cfg: Arc, database: Arc>) -> Service { + let settings = cfg.settings.read().await; + let api_client = Client::new(ConnectionInfo::new( + settings.tracker.api_url.clone(), + settings.tracker.token.clone(), + )); + let token_valid_seconds = settings.tracker.token_valid_seconds; + let tracker_url = settings.tracker.url.clone(); + drop(settings); + Service { + database, + api_client, + token_valid_seconds, + tracker_url, + } + } + + /// Add a torrent to the tracker whitelist. + /// + /// # Errors + /// + /// Will return an error if the HTTP request failed (for example if the + /// tracker API is offline) or if the tracker API returned an error. + pub async fn whitelist_info_hash(&self, info_hash: String) -> Result<(), ServiceError> { + let response = self.api_client.whitelist_torrent(&info_hash).await; + + match response { + Ok(response) => { + if response.status().is_success() { + Ok(()) + } else { + Err(ServiceError::WhitelistingError) + } + } + Err(_) => Err(ServiceError::TrackerOffline), + } + } + + /// Remove a torrent from the tracker whitelist. + /// + /// # Errors + /// + /// Will return an error if the HTTP request failed (for example if the + /// tracker API is offline) or if the tracker API returned an error. + pub async fn remove_info_hash_from_whitelist(&self, info_hash: String) -> Result<(), ServiceError> { + let response = self.api_client.remove_torrent_from_whitelist(&info_hash).await; + + match response { + Ok(response) => { + if response.status().is_success() { + Ok(()) + } else { + Err(ServiceError::InternalServerError) + } + } + Err(_) => Err(ServiceError::InternalServerError), + } + } + + /// Get personal tracker announce url of a user. + /// + /// Eg: + /// + /// If the user doesn't have a not expired tracker key, it will generate a + /// new one and save it in the database. + /// + /// # Errors + /// + /// Will return an error if the HTTP request to get generated a new + /// user tracker key failed. + pub async fn get_personal_announce_url(&self, user_id: UserId) -> Result { + let tracker_key = self.database.get_user_tracker_key(user_id).await; + + match tracker_key { + Some(v) => Ok(self.announce_url_with_key(&v)), + None => match self.retrieve_new_tracker_key(user_id).await { + Ok(v) => Ok(self.announce_url_with_key(&v)), + Err(_) => Err(ServiceError::TrackerOffline), + }, + } + } + + /// Get torrent info from tracker. + /// + /// # Errors + /// + /// Will return an error if the HTTP request to get torrent info fails or + /// if the response cannot be parsed. + pub async fn get_torrent_info(&self, info_hash: &str) -> Result { + let response = self + .api_client + .get_torrent_info(info_hash) + .await + .map_err(|_| ServiceError::InternalServerError)?; + + if response.status() == StatusCode::NOT_FOUND { + return Err(ServiceError::TorrentNotFound); + } + + let body = response.text().await; + + if let Ok(body) = body { + if body == *"torrent not known" { + // todo: temporary fix. the service should return a 404 (StatusCode::NOT_FOUND). + return Err(ServiceError::TorrentNotFound); + } + + let torrent_info = serde_json::from_str(&body); + + if let Ok(torrent_info) = torrent_info { + Ok(torrent_info) + } else { + error!("Failed to parse torrent info from tracker response. Body: {}", body); + Err(ServiceError::InternalServerError) + } + } else { + error!("Tracker API response without body"); + Err(ServiceError::InternalServerError) + } + } + + /// It builds the announce url appending the user tracker key. + /// Eg: + fn announce_url_with_key(&self, tracker_key: &TrackerKey) -> String { + format!("{}/{}", self.tracker_url, tracker_key.key) + } + + /// Issue a new tracker key from tracker and save it in database, + /// tied to a user + async fn retrieve_new_tracker_key(&self, user_id: i64) -> Result { + // Request new tracker key from tracker + let response = self + .api_client + .retrieve_new_tracker_key(self.token_valid_seconds) + .await + .map_err(|_| ServiceError::InternalServerError)?; + + // Parse tracker key from response + let tracker_key = response + .json::() + .await + .map_err(|_| ServiceError::InternalServerError)?; + + // Add tracker key to database (tied to a user) + self.database.add_tracker_key(user_id, &tracker_key).await?; + + // return tracker key + Ok(tracker_key) + } +} diff --git a/src/tracker/statistics_importer.rs b/src/tracker/statistics_importer.rs new file mode 100644 index 00000000..128cce12 --- /dev/null +++ b/src/tracker/statistics_importer.rs @@ -0,0 +1,83 @@ +use std::sync::Arc; + +use log::{error, info}; + +use super::service::{Service, TorrentInfo}; +use crate::config::Configuration; +use crate::databases::database::{self, Database}; +use crate::errors::ServiceError; + +pub struct StatisticsImporter { + database: Arc>, + tracker_service: Arc, + tracker_url: String, +} + +impl StatisticsImporter { + pub async fn new(cfg: Arc, tracker_service: Arc, database: Arc>) -> Self { + let settings = cfg.settings.read().await; + let tracker_url = settings.tracker.url.clone(); + drop(settings); + Self { + database, + tracker_service, + tracker_url, + } + } + + /// Import torrents statistics from tracker and update them in database. + /// + /// # Errors + /// + /// Will return an error if the database query failed. + pub async fn import_all_torrents_statistics(&self) -> Result<(), database::Error> { + info!("Importing torrents statistics from tracker ..."); + let torrents = self.database.get_all_torrents_compact().await?; + + for torrent in torrents { + info!("Updating torrent {} ...", torrent.torrent_id); + + let ret = self.import_torrent_statistics(torrent.torrent_id, &torrent.info_hash).await; + + // code-review: should we treat differently for each case?. The + // tracker API could be temporarily offline, or there could be a + // tracker misconfiguration. + // + // This is the log when the torrent is not found in the tracker: + // + // ``` + // 2023-05-09T13:31:24.497465723+00:00 [torrust_index::tracker::statistics_importer][ERROR] Error updating torrent tracker stats for torrent with id 140: TorrentNotFound + // ``` + + if let Some(err) = ret.err() { + let message = format!( + "Error updating torrent tracker stats for torrent with id {}: {:?}", + torrent.torrent_id, err + ); + error!("{}", message); + } + } + + Ok(()) + } + + /// Import torrent statistics from tracker and update them in database. + /// + /// # Errors + /// + /// Will return an error if the HTTP request failed or the torrent is not + /// found. + pub async fn import_torrent_statistics(&self, torrent_id: i64, info_hash: &str) -> Result { + if let Ok(torrent_info) = self.tracker_service.get_torrent_info(info_hash).await { + drop( + self.database + .update_tracker_info(torrent_id, &self.tracker_url, torrent_info.seeders, torrent_info.leechers) + .await, + ); + Ok(torrent_info) + } else { + drop(self.database.update_tracker_info(torrent_id, &self.tracker_url, 0, 0).await); + Err(ServiceError::TorrentNotFound) + } + } +} diff --git a/src/ui/mod.rs b/src/ui/mod.rs new file mode 100644 index 00000000..143a6381 --- /dev/null +++ b/src/ui/mod.rs @@ -0,0 +1,2 @@ +//! User interface module. Presentation layer. +pub mod proxy; diff --git a/src/ui/proxy.rs b/src/ui/proxy.rs new file mode 100644 index 00000000..78dd2fc2 --- /dev/null +++ b/src/ui/proxy.rs @@ -0,0 +1,57 @@ +use std::sync::Once; + +use bytes::Bytes; +use text_to_png::TextRenderer; + +use crate::cache::image::manager::Error; + +pub static ERROR_IMAGE_LOADER: Once = Once::new(); + +static mut ERROR_IMAGE_URL_IS_UNREACHABLE: Bytes = Bytes::new(); +static mut ERROR_IMAGE_URL_IS_NOT_AN_IMAGE: Bytes = Bytes::new(); +static mut ERROR_IMAGE_TOO_BIG: Bytes = Bytes::new(); +static mut ERROR_IMAGE_USER_QUOTA_MET: Bytes = Bytes::new(); +static mut ERROR_IMAGE_UNAUTHENTICATED: Bytes = Bytes::new(); + +const ERROR_IMG_FONT_SIZE: u8 = 16; +const ERROR_IMG_COLOR: &str = "Red"; + +const ERROR_IMAGE_URL_IS_UNREACHABLE_TEXT: &str = "Could not find image."; +const ERROR_IMAGE_URL_IS_NOT_AN_IMAGE_TEXT: &str = "Invalid image."; +const ERROR_IMAGE_TOO_BIG_TEXT: &str = "Image is too big."; +const ERROR_IMAGE_USER_QUOTA_MET_TEXT: &str = "Image proxy quota met."; +const ERROR_IMAGE_UNAUTHENTICATED_TEXT: &str = "Sign in to see image."; + +pub fn load_error_images() { + ERROR_IMAGE_LOADER.call_once(|| unsafe { + ERROR_IMAGE_URL_IS_UNREACHABLE = generate_img_from_text(ERROR_IMAGE_URL_IS_UNREACHABLE_TEXT); + ERROR_IMAGE_URL_IS_NOT_AN_IMAGE = generate_img_from_text(ERROR_IMAGE_URL_IS_NOT_AN_IMAGE_TEXT); + ERROR_IMAGE_TOO_BIG = generate_img_from_text(ERROR_IMAGE_TOO_BIG_TEXT); + ERROR_IMAGE_USER_QUOTA_MET = generate_img_from_text(ERROR_IMAGE_USER_QUOTA_MET_TEXT); + ERROR_IMAGE_UNAUTHENTICATED = generate_img_from_text(ERROR_IMAGE_UNAUTHENTICATED_TEXT); + }); +} + +pub fn map_error_to_image(error: &Error) -> Bytes { + load_error_images(); + unsafe { + match error { + Error::UrlIsUnreachable => ERROR_IMAGE_URL_IS_UNREACHABLE.clone(), + Error::UrlIsNotAnImage => ERROR_IMAGE_URL_IS_NOT_AN_IMAGE.clone(), + Error::ImageTooBig => ERROR_IMAGE_TOO_BIG.clone(), + Error::UserQuotaMet => ERROR_IMAGE_USER_QUOTA_MET.clone(), + Error::Unauthenticated => ERROR_IMAGE_UNAUTHENTICATED.clone(), + } + } +} + +fn generate_img_from_text(text: &str) -> Bytes { + let renderer = TextRenderer::default(); + + Bytes::from( + renderer + .render_text_to_png_data(text, ERROR_IMG_FONT_SIZE, ERROR_IMG_COLOR) + .unwrap() + .data, + ) +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs new file mode 100644 index 00000000..44af94f9 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs @@ -0,0 +1,35 @@ +use std::sync::Arc; + +use self::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use self::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub mod sqlite_v1_0_0; +pub mod sqlite_v2_0_0; + +pub async fn current_db(db_filename: &str) -> Arc { + let source_database_connect_url = format!("sqlite://{db_filename}?mode=ro"); + Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) +} + +pub async fn new_db(db_filename: &str) -> Arc { + let target_database_connect_url = format!("sqlite://{db_filename}?mode=rwc"); + Arc::new(SqliteDatabaseV2_0_0::new(&target_database_connect_url).await) +} + +pub async fn migrate_target_database(target_database: Arc) { + println!("Running migrations in the target database..."); + target_database.migrate().await; +} + +/// It truncates all tables in the target database. +/// +/// # Panics +/// +/// It panics if it cannot truncate the tables. +pub async fn truncate_target_database(target_database: Arc) { + println!("Truncating all tables in target database ..."); + target_database + .delete_all_database_rows() + .await + .expect("Can't reset the target database."); +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs new file mode 100644 index 00000000..f1a410d1 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -0,0 +1,111 @@ +#![allow(clippy::missing_errors_doc)] + +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query_as, SqlitePool}; + +use crate::databases::database; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV1 { + pub category_id: i64, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, Clone)] +pub struct UserRecordV1 { + pub user_id: i64, + pub username: String, + pub email: String, + pub email_verified: bool, + pub password: String, + pub administrator: bool, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TrackerKeyRecordV1 { + pub key_id: i64, + pub user_id: i64, + pub key: String, + pub valid_until: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentRecordV1 { + pub torrent_id: i64, + pub uploader: String, + pub info_hash: String, + pub title: String, + pub category_id: i64, + pub description: Option, + pub upload_date: i64, + pub file_size: i64, + pub seeders: i64, + pub leechers: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentFileRecordV1 { + pub file_id: i64, + pub torrent_uid: i64, + pub number: i64, + pub path: String, + pub length: i64, +} + +pub struct SqliteDatabaseV1_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV1_0_0 { + /// It creates a new instance of the `SqliteDatabaseV1_0_0`. + /// + /// # Panics + /// + /// This function will panic if it is unable to create the database pool. + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn get_categories_order_by_id(&self) -> Result, database::Error> { + query_as::<_, CategoryRecordV1>("SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC") + .fetch_all(&self.pool) + .await + .map_err(|_| database::Error::Error) + } + + pub async fn get_users(&self) -> Result, sqlx::Error> { + query_as::<_, UserRecordV1>("SELECT * FROM torrust_users ORDER BY user_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_user_by_username(&self, username: &str) -> Result { + query_as::<_, UserRecordV1>("SELECT * FROM torrust_users WHERE username = ?") + .bind(username) + .fetch_one(&self.pool) + .await + } + + pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { + query_as::<_, TrackerKeyRecordV1>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrents(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentRecordV1>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentFileRecordV1>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") + .fetch_all(&self.pool) + .await + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs new file mode 100644 index 00000000..37a06d5e --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -0,0 +1,288 @@ +#![allow(clippy::missing_errors_doc)] + +use chrono::{DateTime, NaiveDateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; +use sqlx::{query, query_as, SqlitePool}; + +use super::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; +use crate::databases::database::{self, TABLES_TO_TRUNCATE}; +use crate::models::torrent_file::{TorrentFile, TorrentInfoDictionary}; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV2 { + pub category_id: i64, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentRecordV2 { + pub torrent_id: i64, + pub uploader_id: i64, + pub category_id: i64, + pub info_hash: String, + pub size: i64, + pub name: String, + pub pieces: String, + pub piece_length: i64, + pub private: Option, + pub root_hash: i64, + pub date_uploaded: String, +} + +impl TorrentRecordV2 { + #[must_use] + pub fn from_v1_data(torrent: &TorrentRecordV1, torrent_info: &TorrentInfoDictionary, uploader: &UserRecordV1) -> Self { + Self { + torrent_id: torrent.torrent_id, + uploader_id: uploader.user_id, + category_id: torrent.category_id, + info_hash: torrent.info_hash.clone(), + size: torrent.file_size, + name: torrent_info.name.clone(), + pieces: torrent_info.get_pieces_as_string(), + piece_length: torrent_info.piece_length, + private: torrent_info.private, + root_hash: torrent_info.get_root_hash_as_i64(), + date_uploaded: convert_timestamp_to_datetime(torrent.upload_date), + } + } +} + +/// It converts a timestamp in seconds to a datetime string. +/// +/// # Panics +/// +/// It panics if the timestamp is too big and it overflows i64. Very future! +#[must_use] +pub fn convert_timestamp_to_datetime(timestamp: i64) -> String { + // The expected format in database is: 2022-11-04 09:53:57 + // MySQL uses a DATETIME column and SQLite uses a TEXT column. + + let naive_datetime = NaiveDateTime::from_timestamp_opt(timestamp, 0).expect("Overflow of i64 seconds, very future!"); + let datetime_again: DateTime = DateTime::from_naive_utc_and_offset(naive_datetime, Utc); + + // Format without timezone + datetime_again.format("%Y-%m-%d %H:%M:%S").to_string() +} + +pub struct SqliteDatabaseV2_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV2_0_0 { + /// Creates a new instance of the database. + /// + /// # Panics + /// + /// It panics if it cannot create the database pool. + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + /// It migrates the database to the latest version. + /// + /// # Panics + /// + /// It panics if it cannot run the migrations. + pub async fn migrate(&self) { + sqlx::migrate!("migrations/sqlite3") + .run(&self.pool) + .await + .expect("Could not run database migrations."); + } + + pub async fn reset_categories_sequence(&self) -> Result { + query("DELETE FROM `sqlite_sequence` WHERE `name` = 'torrust_categories'") + .execute(&self.pool) + .await + .map_err(|_| database::Error::Error) + } + + pub async fn get_categories(&self) -> Result, database::Error> { + query_as::<_, CategoryRecordV2>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") + .fetch_all(&self.pool) + .await + .map_err(|_| database::Error::Error) + } + + pub async fn insert_category_and_get_id(&self, category_name: &str) -> Result { + query("INSERT INTO torrust_categories (name) VALUES (?)") + .bind(category_name) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + if err.message().contains("UNIQUE") && err.message().contains("name") { + database::Error::CategoryAlreadyExists + } else { + database::Error::Error + } + } + _ => database::Error::Error, + }) + } + + pub async fn insert_category(&self, category: &CategoryRecordV2) -> Result { + query("INSERT INTO torrust_categories (category_id, name) VALUES (?, ?)") + .bind(category.category_id) + .bind(category.name.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_imported_user(&self, user_id: i64, date_imported: &str, administrator: bool) -> Result { + query("INSERT INTO torrust_users (user_id, date_imported, administrator) VALUES (?, ?, ?)") + .bind(user_id) + .bind(date_imported) + .bind(administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_user_profile( + &self, + user_id: i64, + username: &str, + email: &str, + email_verified: bool, + ) -> Result { + query( + "INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)", + ) + .bind(user_id) + .bind(username) + .bind(email) + .bind(email_verified) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_user_password_hash(&self, user_id: i64, password_hash: &str) -> Result { + query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") + .bind(user_id) + .bind(password_hash) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_tracker_key( + &self, + tracker_key_id: i64, + user_id: i64, + tracker_key: &str, + date_expiry: i64, + ) -> Result { + query("INSERT INTO torrust_tracker_keys (tracker_key_id, user_id, tracker_key, date_expiry) VALUES (?, ?, ?, ?)") + .bind(tracker_key_id) + .bind(user_id) + .bind(tracker_key) + .bind(date_expiry) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent(&self, torrent: &TorrentRecordV2) -> Result { + query( + " + INSERT INTO torrust_torrents ( + torrent_id, + uploader_id, + category_id, + info_hash, + size, + name, + pieces, + piece_length, + private, + root_hash, + date_uploaded + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(torrent.torrent_id) + .bind(torrent.uploader_id) + .bind(torrent.category_id) + .bind(torrent.info_hash.clone()) + .bind(torrent.size) + .bind(torrent.name.clone()) + .bind(torrent.pieces.clone()) + .bind(torrent.piece_length) + .bind(torrent.private.unwrap_or(0)) + .bind(torrent.root_hash) + .bind(torrent.date_uploaded.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_file_for_torrent_with_one_file( + &self, + torrent_id: i64, + md5sum: &Option, + length: i64, + ) -> Result { + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH) VALUES (?, ?, ?)") + .bind(md5sum) + .bind(torrent_id) + .bind(length) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_file_for_torrent_with_multiple_files( + &self, + torrent: &TorrentRecordV1, + file: &TorrentFile, + ) -> Result { + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) VALUES (?, ?, ?, ?)") + .bind(file.md5sum.clone()) + .bind(torrent.torrent_id) + .bind(file.length) + .bind(file.path.join("/")) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_info(&self, torrent: &TorrentRecordV1) -> Result { + query("INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, ?)") + .bind(torrent.torrent_id) + .bind(torrent.title.clone()) + .bind(torrent.description.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_announce_url(&self, torrent_id: i64, tracker_url: &str) -> Result { + query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + .bind(torrent_id) + .bind(tracker_url) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + #[allow(clippy::missing_panics_doc)] + pub async fn delete_all_database_rows(&self) -> Result<(), database::Error> { + for table in TABLES_TO_TRUNCATE { + query(&format!("DELETE FROM {table};")) + .execute(&self.pool) + .await + .unwrap_or_else(|_| panic!("table {table} should be deleted")); + } + + Ok(()) + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs new file mode 100644 index 00000000..afb35f90 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -0,0 +1,3 @@ +pub mod databases; +pub mod transferrers; +pub mod upgrader; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs new file mode 100644 index 00000000..269f26b8 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{CategoryRecordV2, SqliteDatabaseV2_0_0}; + +#[allow(clippy::missing_panics_doc)] +pub async fn transfer_categories(source_database: Arc, target_database: Arc) { + println!("Transferring categories ..."); + + let source_categories = source_database.get_categories_order_by_id().await.unwrap(); + println!("[v1] categories: {:?}", &source_categories); + + let result = target_database.reset_categories_sequence().await.unwrap(); + println!("[v2] reset categories sequence result: {result:?}"); + + for cat in &source_categories { + println!("[v2] adding category {:?} with id {:?} ...", &cat.name, &cat.category_id); + let id = target_database + .insert_category(&CategoryRecordV2 { + category_id: cat.category_id, + name: cat.name.clone(), + }) + .await + .unwrap(); + + assert!( + id == cat.category_id, + "Error copying category {:?} from source DB to the target DB", + &cat.category_id + ); + + println!("[v2] category: {:?} {:?} added.", id, &cat.name); + } + + let target_categories = target_database.get_categories().await.unwrap(); + println!("[v2] categories: {:?}", &target_categories); +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs new file mode 100644 index 00000000..94eaac75 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs @@ -0,0 +1,4 @@ +pub mod category_transferrer; +pub mod torrent_transferrer; +pub mod tracker_key_transferrer; +pub mod user_transferrer; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs new file mode 100644 index 00000000..271331e4 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs @@ -0,0 +1,195 @@ +#![allow(clippy::missing_errors_doc)] + +use std::sync::Arc; +use std::{error, fs}; + +use crate::models::torrent_file::Torrent; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{SqliteDatabaseV2_0_0, TorrentRecordV2}; +use crate::utils::parse_torrent::decode_torrent; + +#[allow(clippy::missing_panics_doc)] +#[allow(clippy::too_many_lines)] +pub async fn transfer_torrents( + source_database: Arc, + target_database: Arc, + upload_path: &str, +) { + println!("Transferring torrents ..."); + + // Transfer table `torrust_torrents_files` + + // Although the The table `torrust_torrents_files` existed in version v1.0.0 + // it was was not used. + + // Transfer table `torrust_torrents` + + let torrents = source_database.get_torrents().await.unwrap(); + + for torrent in &torrents { + // [v2] table torrust_torrents + + println!("[v2][torrust_torrents] adding the torrent: {:?} ...", &torrent.torrent_id); + + let uploader = source_database.get_user_by_username(&torrent.uploader).await.unwrap(); + + assert!( + uploader.username == torrent.uploader, + "Error copying torrent with id {:?}. + Username (`uploader`) in `torrust_torrents` table does not match `username` in `torrust_users` table", + &torrent.torrent_id + ); + + let filepath = format!("{}/{}.torrent", upload_path, &torrent.torrent_id); + + let torrent_from_file_result = read_torrent_from_file(&filepath); + + assert!( + torrent_from_file_result.is_ok(), + "Error torrent file not found: {:?}", + &filepath + ); + + let torrent_from_file = torrent_from_file_result.unwrap(); + + let id = target_database + .insert_torrent(&TorrentRecordV2::from_v1_data(torrent, &torrent_from_file.info, &uploader)) + .await + .unwrap(); + + assert!( + id == torrent.torrent_id, + "Error copying torrent {:?} from source DB to the target DB", + &torrent.torrent_id + ); + + println!("[v2][torrust_torrents] torrent with id {:?} added.", &torrent.torrent_id); + + // [v2] table torrust_torrent_files + + println!("[v2][torrust_torrent_files] adding torrent files"); + + if torrent_from_file.is_a_single_file_torrent() { + // The torrent contains only one file then: + // - "path" is NULL + // - "md5sum" can be NULL + + println!( + "[v2][torrust_torrent_files][single-file-torrent] adding torrent file {:?} with length {:?} ...", + &torrent_from_file.info.name, &torrent_from_file.info.length, + ); + + let file_id = target_database + .insert_torrent_file_for_torrent_with_one_file( + torrent.torrent_id, + // TODO: it seems med5sum can be None. Why? When? + &torrent_from_file.info.md5sum.clone(), + torrent_from_file.info.length.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_files][single-file-torrent] torrent file insert result: {:?}", + &file_id + ); + } else { + // Multiple files are being shared + let files = torrent_from_file.info.files.as_ref().unwrap(); + + for file in files { + println!( + "[v2][torrust_torrent_files][multiple-file-torrent] adding torrent file: {:?} ...", + &file + ); + + let file_id = target_database + .insert_torrent_file_for_torrent_with_multiple_files(torrent, file) + .await; + + println!( + "[v2][torrust_torrent_files][multiple-file-torrent] torrent file insert result: {:?}", + &file_id + ); + } + } + + // [v2] table torrust_torrent_info + + println!( + "[v2][torrust_torrent_info] adding the torrent info for torrent id {:?} ...", + &torrent.torrent_id + ); + + let id = target_database.insert_torrent_info(torrent).await; + + println!("[v2][torrust_torrents] torrent info insert result: {:?}.", &id); + + // [v2] table torrust_torrent_announce_urls + + println!( + "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + if torrent_from_file.announce_list.is_some() { + // BEP-0012. Multiple trackers. + + println!( + "[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + // flatten the nested vec (this will however remove the) + let announce_urls = torrent_from_file + .announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>(); + + for tracker_url in &announce_urls { + println!( + "[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + let announce_url_id = target_database + .insert_torrent_announce_url(torrent.torrent_id, tracker_url) + .await; + + println!( + "[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", + &announce_url_id + ); + } + } else if torrent_from_file.announce.is_some() { + println!( + "[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + let announce_url_id = target_database + .insert_torrent_announce_url(torrent.torrent_id, &torrent_from_file.announce.unwrap()) + .await; + + println!( + "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", + &announce_url_id + ); + } + } + println!("Torrents transferred"); +} + +pub fn read_torrent_from_file(path: &str) -> Result> { + let contents = match fs::read(path) { + Ok(contents) => contents, + Err(e) => return Err(e.into()), + }; + + match decode_torrent(&contents) { + Ok(torrent) => Ok(torrent), + Err(e) => Err(e), + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs new file mode 100644 index 00000000..88e8a1a2 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs @@ -0,0 +1,43 @@ +use std::sync::Arc; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +#[allow(clippy::missing_panics_doc)] +pub async fn transfer_tracker_keys(source_database: Arc, target_database: Arc) { + println!("Transferring tracker keys ..."); + + // Transfer table `torrust_tracker_keys` + + let tracker_keys = source_database.get_tracker_keys().await.unwrap(); + + for tracker_key in &tracker_keys { + // [v2] table torrust_tracker_keys + + println!( + "[v2][torrust_users] adding the tracker key with id {:?} ...", + &tracker_key.key_id + ); + + let id = target_database + .insert_tracker_key( + tracker_key.key_id, + tracker_key.user_id, + &tracker_key.key, + tracker_key.valid_until, + ) + .await + .unwrap(); + + assert!( + id == tracker_key.key_id, + "Error copying tracker key {:?} from source DB to the target DB", + &tracker_key.key_id + ); + + println!( + "[v2][torrust_tracker_keys] tracker key with id {:?} added.", + &tracker_key.key_id + ); + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs new file mode 100644 index 00000000..ca127f5a --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs @@ -0,0 +1,73 @@ +use std::sync::Arc; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +#[allow(clippy::missing_panics_doc)] +pub async fn transfer_users( + source_database: Arc, + target_database: Arc, + date_imported: &str, +) { + println!("Transferring users ..."); + + // Transfer table `torrust_users` + + let users = source_database.get_users().await.unwrap(); + + for user in &users { + // [v2] table torrust_users + + println!( + "[v2][torrust_users] adding user with username {:?} and id {:?} ...", + &user.username, &user.user_id + ); + + let id = target_database + .insert_imported_user(user.user_id, date_imported, user.administrator) + .await + .unwrap(); + + assert!( + id == user.user_id, + "Error copying user {:?} from source DB to the target DB", + &user.user_id + ); + + println!("[v2][torrust_users] user: {:?} {:?} added.", &user.user_id, &user.username); + + // [v2] table torrust_user_profiles + + println!( + "[v2][torrust_user_profiles] adding user profile for user with username {:?} and id {:?} ...", + &user.username, &user.user_id + ); + + target_database + .insert_user_profile(user.user_id, &user.username, &user.email, user.email_verified) + .await + .unwrap(); + + println!( + "[v2][torrust_user_profiles] user profile added for user with username {:?} and id {:?}.", + &user.username, &user.user_id + ); + + // [v2] table torrust_user_authentication + + println!( + "[v2][torrust_user_authentication] adding password hash ({:?}) for user id ({:?}) ...", + &user.password, &user.user_id + ); + + target_database + .insert_user_password_hash(user.user_id, &user.password) + .await + .unwrap(); + + println!( + "[v2][torrust_user_authentication] password hash ({:?}) added for user id ({:?}).", + &user.password, &user.user_id + ); + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs new file mode 100644 index 00000000..71e58413 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -0,0 +1,147 @@ +//! A console command to upgrade the application from version `v1.0.0` to `v2.0.0`. +//! +//! # Usage +//! +//! ```bash +//! cargo run --bin upgrade SOURCE_DB_FILE TARGET_DB_FILE TORRENT_UPLOAD_DIR +//! ``` +//! +//! Where: +//! +//! - `SOURCE_DB_FILE` is the source database in version `v1.0.0` we want to migrate. +//! - `TARGET_DB_FILE` is the new migrated database in version `v2.0.0`. +//! - `TORRENT_UPLOAD_DIR` is the relative dir where torrent files are stored. +//! +//! For example: +//! +//! ```bash +//! cargo run --bin upgrade ./data.db ./data_v2.db ./uploads +//! ``` +//! +//! This command was created to help users to migrate from version `v1.0.0` to +//! `v2.0.0`. The main changes in version `v2.0.0` were: +//! +//! - The database schema was changed. +//! - The torrents are now stored entirely in the database. The torrent files +//! are not stored in the filesystem anymore. This command reads the torrent +//! files from the filesystem and store them in the database. +//! +//! We recommend to download your production database and the torrent files dir. +//! And run the command in a local environment with the version `v2.0.0.`. Then, +//! you can run the app locally and make sure all the data was migrated +//! correctly. +//! +//! # Notes +//! +//! NOTES for `torrust_users` table transfer: +//! +//! - In v2, the table `torrust_user` contains a field `date_registered` non +//! existing in v1. We changed that column to allow `NULL`. We also added the +//! new column `date_imported` with the datetime when the upgrader was executed. +//! +//! NOTES for `torrust_user_profiles` table transfer: +//! +//! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` +//! and `avatar`. Empty string is used as default value. +//! +//! +//! If you want more information about this command you can read the [issue 56](https://github.com/torrust/torrust-index/issues/56). +use std::env; +use std::time::SystemTime; + +use chrono::prelude::{DateTime, Utc}; +use text_colorizer::Colorize; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{current_db, migrate_target_database, new_db, truncate_target_database}; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::category_transferrer::transfer_categories; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::transfer_torrents; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::user_transferrer::transfer_users; + +const NUMBER_OF_ARGUMENTS: usize = 3; + +#[derive(Debug)] +pub struct Arguments { + /// The source database in version v1.0.0 we want to migrate + pub source_database_file: String, + /// The new migrated database in version v2.0.0 + pub target_database_file: String, + // The relative dir where torrent files are stored + pub upload_path: String, +} + +fn print_usage() { + eprintln!( + "{} - migrates date from version v1.0.0 to v2.0.0. + + cargo run --bin upgrade SOURCE_DB_FILE TARGET_DB_FILE TORRENT_UPLOAD_DIR + + For example: + + cargo run --bin upgrade ./data.db ./data_v2.db ./uploads + + ", + "Upgrader".green() + ); +} + +fn parse_args() -> Arguments { + let args: Vec = env::args().skip(1).collect(); + + if args.len() != NUMBER_OF_ARGUMENTS { + eprintln!( + "{} wrong number of arguments: expected {}, got {}", + "Error".red().bold(), + NUMBER_OF_ARGUMENTS, + args.len() + ); + print_usage(); + } + + Arguments { + source_database_file: args[0].clone(), + target_database_file: args[1].clone(), + upload_path: args[2].clone(), + } +} + +pub async fn run() { + let now = datetime_iso_8601(); + upgrade(&parse_args(), &now).await; +} + +pub async fn upgrade(args: &Arguments, date_imported: &str) { + // Get connection to the source database (current DB in settings) + let source_database = current_db(&args.source_database_file).await; + + // Get connection to the target database (new DB we want to migrate the data) + let target_database = new_db(&args.target_database_file).await; + + println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); + + migrate_target_database(target_database.clone()).await; + truncate_target_database(target_database.clone()).await; + + transfer_categories(source_database.clone(), target_database.clone()).await; + transfer_users(source_database.clone(), target_database.clone(), date_imported).await; + transfer_tracker_keys(source_database.clone(), target_database.clone()).await; + transfer_torrents(source_database.clone(), target_database.clone(), &args.upload_path).await; + + println!("Upgrade data from version v1.0.0 to v2.0.0 finished!\n"); + + eprintln!( + "{}\nWe recommend you to run the command to import torrent statistics for all torrents manually. \ + If you do not do it the statistics will be imported anyway during the normal execution of the program. \ + You can import statistics manually with:\n {}", + "SUGGESTION: \n".yellow(), + "cargo run --bin import_tracker_statistics".yellow() + ); +} + +/// Current datetime in ISO8601 without time zone. +/// For example: `2022-11-10 10:35:15` +#[must_use] +pub fn datetime_iso_8601() -> String { + let dt: DateTime = SystemTime::now().into(); + format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) +} diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs new file mode 100644 index 00000000..e22b19a7 --- /dev/null +++ b/src/upgrades/mod.rs @@ -0,0 +1 @@ +pub mod from_v1_0_0_to_v2_0_0; diff --git a/src/utils/clock.rs b/src/utils/clock.rs new file mode 100644 index 00000000..b17ee48b --- /dev/null +++ b/src/utils/clock.rs @@ -0,0 +1,10 @@ +/// Returns the current timestamp in seconds. +/// +/// # Panics +/// +/// This function should never panic unless the current timestamp from the +/// time library is negative, which should never happen. +#[must_use] +pub fn now() -> u64 { + u64::try_from(chrono::prelude::Utc::now().timestamp()).expect("timestamp should be positive") +} diff --git a/src/utils/hex.rs b/src/utils/hex.rs index 1432a474..be8e82f5 100644 --- a/src/utils/hex.rs +++ b/src/utils/hex.rs @@ -1,17 +1,23 @@ use std::fmt::Write; use std::num::ParseIntError; -pub fn bytes_to_hex(bytes: &[u8]) -> String { +#[must_use] +pub fn from_bytes(bytes: &[u8]) -> String { let mut s = String::with_capacity(2 * bytes.len()); for byte in bytes { - write!(s, "{:02X}", byte).unwrap(); - }; + write!(s, "{byte:02X}").unwrap(); + } s } -pub fn hex_to_bytes(s: &str) -> Result, ParseIntError> { +/// Encodes a String into Hex Bytes +/// +/// # Errors +/// +/// This function will return an error if unable to encode into Hex +pub fn into_bytes(s: &str) -> Result, ParseIntError> { (0..s.len()) .step_by(2) .map(|i| u8::from_str_radix(&s[i..i + 2], 16)) diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 7226920a..ebb62358 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -1,4 +1,4 @@ -pub mod parse_torrent; -pub mod time; +pub mod clock; pub mod hex; -pub mod regex; +pub mod parse_torrent; +pub mod validation; diff --git a/src/utils/parse_torrent.rs b/src/utils/parse_torrent.rs index 3b4df6f4..69e69011 100644 --- a/src/utils/parse_torrent.rs +++ b/src/utils/parse_torrent.rs @@ -1,23 +1,154 @@ -use std::{error}; +use std::error; + +use derive_more::{Display, Error}; +use serde::{self, Deserialize, Serialize}; +use serde_bencode::value::Value; use serde_bencode::{de, Error}; +use sha1::{Digest, Sha1}; + +use crate::models::info_hash::InfoHash; use crate::models::torrent_file::Torrent; +#[derive(Debug, Display, PartialEq, Eq, Error)] +pub enum DecodeTorrentFileError { + #[display(fmt = "Torrent data could not be decoded from the bencoded format.")] + InvalidBencodeData, + + #[display(fmt = "Torrent info dictionary key could not be decoded from the bencoded format.")] + InvalidInfoDictionary, + + #[display(fmt = "Torrent has an invalid pieces key length. It should be a multiple of 20.")] + InvalidTorrentPiecesLength, + + #[display(fmt = "Cannot bencode the parsed `info` dictionary again to generate the info-hash.")] + CannotBencodeInfoDict, +} + +/// It decodes and validate an array of bytes containing a torrent file. +/// +/// It returns a tuple containing the decoded torrent and the original info hash. +/// The original info-hash migth not match the new one in the `Torrent` because +/// the info dictionary might have been modified. For example, ignoring some +/// non-standard fields. +/// +/// # Errors +/// +/// This function will return an error if +/// +/// - The torrent file is not a valid bencoded file. +/// - The pieces key has a length that is not a multiple of 20. +pub fn decode_and_validate_torrent_file(bytes: &[u8]) -> Result<(Torrent, InfoHash), DecodeTorrentFileError> { + let original_info_hash = calculate_info_hash(bytes)?; + + let torrent = decode_torrent(bytes).map_err(|_| DecodeTorrentFileError::InvalidBencodeData)?; + + // Make sure that the pieces key has a length that is a multiple of 20 + if let Some(pieces) = torrent.info.pieces.as_ref() { + if pieces.as_ref().len() % 20 != 0 { + return Err(DecodeTorrentFileError::InvalidTorrentPiecesLength); + } + } + + Ok((torrent, original_info_hash)) +} + +/// Decode a Torrent from Bencoded Bytes. +/// +/// # Errors +/// +/// This function will return an error if unable to parse bytes into torrent. pub fn decode_torrent(bytes: &[u8]) -> Result> { match de::from_bytes::(bytes) { Ok(torrent) => Ok(torrent), Err(e) => { - println!("{:?}", e); + println!("{e:?}"); Err(e.into()) } } } +/// Encode a Torrent into Bencoded Bytes. +/// +/// # Errors +/// +/// This function will return an error if unable to bencode torrent. pub fn encode_torrent(torrent: &Torrent) -> Result, Error> { match serde_bencode::to_bytes(torrent) { Ok(bencode_bytes) => Ok(bencode_bytes), Err(e) => { - eprintln!("{:?}", e); + eprintln!("{e:?}"); Err(e) } } } + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct ParsedInfoDictFromMetainfoFile { + pub info: Value, +} + +/// Calculates the `InfoHash` from a the torrent file binary data. +/// +/// # Errors +/// +/// This function will return an error if: +/// +/// - The torrent file is not a valid bencoded torrent file containing an `info` +/// dictionary key. +/// - The original torrent info-hash cannot be bencoded from the parsed `info` +/// dictionary is not a valid bencoded dictionary. +pub fn calculate_info_hash(bytes: &[u8]) -> Result { + // Extract the info dictionary + let metainfo: ParsedInfoDictFromMetainfoFile = + serde_bencode::from_bytes(bytes).map_err(|_| DecodeTorrentFileError::InvalidInfoDictionary)?; + + // Bencode the info dictionary + let info_dict_bytes = serde_bencode::to_bytes(&metainfo.info).map_err(|_| DecodeTorrentFileError::CannotBencodeInfoDict)?; + + // Calculate the SHA-1 hash of the bencoded info dictionary + let mut hasher = Sha1::new(); + hasher.update(&info_dict_bytes); + let result = hasher.finalize(); + + Ok(InfoHash::from_bytes(&result)) +} + +#[cfg(test)] +mod tests { + use std::path::Path; + use std::str::FromStr; + + use crate::models::info_hash::InfoHash; + + #[test] + fn it_should_calculate_the_original_info_hash_using_all_fields_in_the_info_key_dictionary() { + let torrent_path = Path::new( + // cspell:disable-next-line + "tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent", + ); + + let original_info_hash = super::calculate_info_hash(&std::fs::read(torrent_path).unwrap()).unwrap(); + + assert_eq!( + original_info_hash, + InfoHash::from_str("6c690018c5786dbbb00161f62b0712d69296df97").unwrap() + ); + } + + #[test] + fn it_should_calculate_the_new_info_hash_ignoring_non_standard_fields_in_the_info_key_dictionary() { + let torrent_path = Path::new( + // cspell:disable-next-line + "tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent", + ); + + let torrent = super::decode_torrent(&std::fs::read(torrent_path).unwrap()).unwrap(); + + // The infohash is not the original infohash of the torrent file, + // but the infohash of the info dictionary without the custom keys. + assert_eq!( + torrent.canonical_info_hash_hex(), + "8aa01a4c816332045ffec83247ccbc654547fedf".to_string() + ); + } +} diff --git a/src/utils/regex.rs b/src/utils/regex.rs deleted file mode 100644 index 4c5b55ff..00000000 --- a/src/utils/regex.rs +++ /dev/null @@ -1,31 +0,0 @@ -use regex::Regex; - -pub fn validate_email_address(email_address_to_be_checked: &str) -> bool { - let email_regex = Regex::new(r"^([a-z\d_+]([a-z\d_+.]*[a-z\d_+])?)@([a-z\d]+([\-.][a-z\d]+)*\.[a-z]{2,6})").unwrap(); - - email_regex.is_match(email_address_to_be_checked) -} - -#[cfg(test)] -mod tests { - use crate::utils::regex::validate_email_address; - - #[test] - fn validate_email_address_test() { - assert!(!validate_email_address("test")); - - assert!(!validate_email_address("test@")); - - assert!(!validate_email_address("test@torrust")); - - assert!(!validate_email_address("test@torrust.")); - - assert!(!validate_email_address("test@.")); - - assert!(!validate_email_address("test@.com")); - - assert!(validate_email_address("test@torrust.com")); - - assert!(validate_email_address("t@torrust.org")) - } -} diff --git a/src/utils/time.rs b/src/utils/time.rs deleted file mode 100644 index 45f60cb4..00000000 --- a/src/utils/time.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub fn current_time() -> u64 { - chrono::prelude::Utc::now().timestamp() as u64 -} diff --git a/src/utils/validation.rs b/src/utils/validation.rs new file mode 100644 index 00000000..9c4eeb8a --- /dev/null +++ b/src/utils/validation.rs @@ -0,0 +1,79 @@ +use std::str::FromStr; + +use email_address::EmailAddress; +use regex::Regex; + +const MIN_DOMAIN_LENGTH: usize = 4; + +/// Validates an email address. +/// +/// # Panics +/// +/// It panics if the email address is invalid. This should not happen +/// because the email address is previously validated. +#[must_use] +pub fn validate_email_address(email_address_to_be_checked: &str) -> bool { + if !EmailAddress::is_valid(email_address_to_be_checked) { + return false; + } + + let email = EmailAddress::from_str(email_address_to_be_checked).expect("Invalid email address"); + + // We reject anyway the email if it's a dotless domain name. + domain_has_extension(email.domain()) +} + +/// Returns true if the string representing a domain has an extension. +/// +/// It does not check if the extension is valid. +fn domain_has_extension(domain: &str) -> bool { + if domain.len() < MIN_DOMAIN_LENGTH { + return false; + } + + Regex::new(r".*\..*").expect("Invalid regex").is_match(domain) +} + +#[cfg(test)] +mod tests { + + mod for_email_validation { + use crate::utils::validation::validate_email_address; + + #[test] + fn it_should_accept_valid_email_addresses() { + assert!(validate_email_address("test@torrust.com")); + assert!(validate_email_address("t@torrust.org")); + } + + #[test] + fn it_should_not_accept_invalid_email_addresses() { + assert!(!validate_email_address("test")); + assert!(!validate_email_address("test@")); + assert!(!validate_email_address("test@torrust.")); + assert!(!validate_email_address("test@.")); + assert!(!validate_email_address("test@.com")); + + // Notice that local domain name with no TLD are valid, + // although ICANN highly discourages dotless email addresses + assert!(!validate_email_address("test@torrust")); + } + } + + mod for_domain_validation { + use crate::utils::validation::domain_has_extension; + + #[test] + fn it_should_accept_valid_domain_with_extension() { + assert!(domain_has_extension("a.io")); + assert!(domain_has_extension("a.com")); + } + + #[test] + fn it_should_not_accept_dotless_domains() { + assert!(!domain_has_extension("")); + assert!(!domain_has_extension(".")); + assert!(!domain_has_extension("a.")); + } + } +} diff --git a/src/web/api/mod.rs b/src/web/api/mod.rs new file mode 100644 index 00000000..749008f1 --- /dev/null +++ b/src/web/api/mod.rs @@ -0,0 +1,42 @@ +//! The Torrust Index API. +//! +//! Currently, the API has only one version: `v1`. +//! +//! Refer to the [`v1`]) module for more information. +pub mod server; +pub mod v1; + +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::task::JoinHandle; + +use crate::common::AppData; +use crate::web::api; + +/// API versions. +pub enum Version { + V1, +} + +/// The running API server. +pub struct Running { + /// The socket address the API server is listening on. + pub socket_addr: SocketAddr, + /// The handle for the running API server. + pub api_server: Option>>, +} + +#[must_use] +#[derive(Debug)] +pub struct ServerStartedMessage { + pub socket_addr: SocketAddr, +} + +/// Starts the API server. +#[must_use] +pub async fn start(app_data: Arc, net_ip: &str, net_port: u16, implementation: &Version) -> api::Running { + match implementation { + Version::V1 => server::start(app_data, net_ip, net_port).await, + } +} diff --git a/src/web/api/server.rs b/src/web/api/server.rs new file mode 100644 index 00000000..8fa1e704 --- /dev/null +++ b/src/web/api/server.rs @@ -0,0 +1,75 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::Future; +use log::info; +use tokio::sync::oneshot::{self, Sender}; + +use super::v1::routes::router; +use super::{Running, ServerStartedMessage}; +use crate::common::AppData; + +/// Starts the API server. +/// +/// # Panics +/// +/// Panics if the API server can't be started. +pub async fn start(app_data: Arc, net_ip: &str, net_port: u16) -> Running { + let config_socket_addr: SocketAddr = format!("{net_ip}:{net_port}") + .parse() + .expect("API server socket address to be valid."); + + let (tx, rx) = oneshot::channel::(); + + // Run the API server + let join_handle = tokio::spawn(async move { + info!("Starting API server with net config: {} ...", config_socket_addr); + + let handle = start_server(config_socket_addr, app_data.clone(), tx); + + if let Ok(()) = handle.await { + info!("API server stopped"); + } + + Ok(()) + }); + + // Wait until the API server is running + let bound_addr = match rx.await { + Ok(msg) => msg.socket_addr, + Err(e) => panic!("API server start. The API server was dropped: {e}"), + }; + + Running { + socket_addr: bound_addr, + api_server: Some(join_handle), + } +} + +fn start_server( + config_socket_addr: SocketAddr, + app_data: Arc, + tx: Sender, +) -> impl Future> { + let tcp_listener = std::net::TcpListener::bind(config_socket_addr).expect("tcp listener to bind to a socket address"); + + let bound_addr = tcp_listener + .local_addr() + .expect("tcp listener to be bound to a socket address."); + + info!("API server listening on http://{}", bound_addr); + + let app = router(app_data); + + let server = axum::Server::from_tcp(tcp_listener) + .expect("a new server from the previously created tcp listener.") + .serve(app.into_make_service_with_connect_info::()); + + tx.send(ServerStartedMessage { socket_addr: bound_addr }) + .expect("the API server should not be dropped"); + + server.with_graceful_shutdown(async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + info!("Stopping API server on http://{} ...", bound_addr); + }) +} diff --git a/src/web/api/v1/auth.rs b/src/web/api/v1/auth.rs new file mode 100644 index 00000000..e52542cc --- /dev/null +++ b/src/web/api/v1/auth.rs @@ -0,0 +1,175 @@ +//! API authentication. +//! +//! The API uses a [bearer token authentication scheme](https://datatracker.ietf.org/doc/html/rfc6750). +//! +//! API clients must have an account on the website to be able to use the API. +//! +//! # Authentication flow +//! +//! - [Registration](#registration) +//! - [Login](#login) +//! - [Using the token](#using-the-token) +//! +//! ## Registration +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request POST \ +//! --data '{"username":"indexadmin","email":"indexadmin@torrust.com","password":"BenoitMandelbrot1924","confirm_password":"BenoitMandelbrot1924"}' \ +//! http://127.0.0.1:3001/v1/user/register +//! ``` +//! +//! **NOTICE**: The first user is automatically an administrator. Currently, +//! there is no way to change this. There is one administrator per instance. +//! And you cannot delete the administrator account or make another user an +//! administrator. For testing purposes, you can create a new administrator +//! account by creating a new user and then manually changing the `administrator` +//! field in the `torrust_users` table to `1`. +//! +//! ## Login +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request POST \ +//! --data '{"login":"indexadmin","password":"BenoitMandelbrot1924"}' \ +//! http://127.0.0.1:3001/v1/user/login +//! ``` +//! +//! **Response** +//! +//! ```json +//! { +//! "data":{ +//! "token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI", +//! "username":"indexadmin", +//! "admin":true +//! } +//! } +//! ``` +//! +//! **NOTICE**: The token is valid for 2 weeks (`1_209_600` seconds). After that, +//! you will have to renew the token. +//! +//! **NOTICE**: The token is associated with the user role. If you change the +//! user's role, you will have to log in again to get a new token with the new +//! role. +//! +//! ## Using the token +//! +//! Some endpoints require authentication. To use the token, you must add the +//! `Authorization` header to your request. For example, if you want to add a +//! new category, you must do the following: +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request POST \ +//! --data '{"name":"new category","icon":null}' \ +//! http://127.0.0.1:3001/v1/category +//! ``` +//! +//! **Response** +//! +//! ```json +//! { +//! "data": "new category" +//! } +//! ``` +use std::sync::Arc; + +use hyper::http::HeaderValue; + +use crate::common::AppData; +use crate::errors::ServiceError; +use crate::models::user::{UserClaims, UserCompact, UserId}; +use crate::services::authentication::JsonWebToken; +use crate::web::api::v1::extractors::bearer_token::BearerToken; + +pub struct Authentication { + json_web_token: Arc, +} + +impl Authentication { + #[must_use] + pub fn new(json_web_token: Arc) -> Self { + Self { json_web_token } + } + + /// Create Json Web Token + pub async fn sign_jwt(&self, user: UserCompact) -> String { + self.json_web_token.sign(user).await + } + + /// Verify Json Web Token + /// + /// # Errors + /// + /// This function will return an error if the JWT is not good or expired. + pub async fn verify_jwt(&self, token: &str) -> Result { + self.json_web_token.verify(token).await + } + + /// Get logged-in user ID from bearer token + /// + /// # Errors + /// + /// This function will return an error if it can get claims from the request + pub async fn get_user_id_from_bearer_token(&self, maybe_token: &Option) -> Result { + let claims = self.get_claims_from_bearer_token(maybe_token).await?; + Ok(claims.user.user_id) + } + + /// Get Claims from bearer token + /// + /// # Errors + /// + /// This function will: + /// + /// - Return an `ServiceError::TokenNotFound` if `HeaderValue` is `None`. + /// - Pass through the `ServiceError::TokenInvalid` if unable to verify the JWT. + async fn get_claims_from_bearer_token(&self, maybe_token: &Option) -> Result { + match maybe_token { + Some(token) => match self.verify_jwt(&token.value()).await { + Ok(claims) => Ok(claims), + Err(e) => Err(e), + }, + None => Err(ServiceError::TokenNotFound), + } + } +} + +/// Parses the token from the `Authorization` header. +/// +/// # Panics +/// +/// This function will panic if the `Authorization` header is not a valid `String`. +pub fn parse_token(authorization: &HeaderValue) -> String { + let split: Vec<&str> = authorization + .to_str() + .expect("variable `auth` contains data that is not visible ASCII chars.") + .split("Bearer") + .collect(); + let token = split[1].trim(); + token.to_string() +} + +/// If the user is logged in, returns the user's ID. Otherwise, returns `None`. +/// +/// # Errors +/// +/// It returns an error if we cannot get the user from the bearer token. +pub async fn get_optional_logged_in_user( + maybe_bearer_token: Option, + app_data: Arc, +) -> Result, ServiceError> { + match maybe_bearer_token { + Some(bearer_token) => match app_data.auth.get_user_id_from_bearer_token(&Some(bearer_token)).await { + Ok(user_id) => Ok(Some(user_id)), + Err(error) => Err(error), + }, + None => Ok(None), + } +} diff --git a/src/web/api/v1/contexts/about/handlers.rs b/src/web/api/v1/contexts/about/handlers.rs new file mode 100644 index 00000000..07d5977b --- /dev/null +++ b/src/web/api/v1/contexts/about/handlers.rs @@ -0,0 +1,30 @@ +//! API handlers for the the [`about`](crate::web::api::v1::contexts::about) API +//! context. +use std::sync::Arc; + +use axum::extract::State; +use axum::http::{header, StatusCode}; +use axum::response::{IntoResponse, Response}; + +use crate::common::AppData; +use crate::services::about; + +#[allow(clippy::unused_async)] +pub async fn about_page_handler(State(_app_data): State>) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "text/html; charset=utf-8")], + about::page(), + ) + .into_response() +} + +#[allow(clippy::unused_async)] +pub async fn license_page_handler(State(_app_data): State>) -> Response { + ( + StatusCode::OK, + [(header::CONTENT_TYPE, "text/html; charset=utf-8")], + about::license_page(), + ) + .into_response() +} diff --git a/src/web/api/v1/contexts/about/mod.rs b/src/web/api/v1/contexts/about/mod.rs new file mode 100644 index 00000000..ef4668d1 --- /dev/null +++ b/src/web/api/v1/contexts/about/mod.rs @@ -0,0 +1,88 @@ +//! API context: `about`. +//! +//! This API context is responsible for providing metadata about the API. +//! +//! # Endpoints +//! +//! - [About](#about) +//! - [License](#license) +//! +//! # About +//! +//! `GET /v1/about` +//! +//! Returns a html page with information about the API. +//! +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:3001/v1/about" +//! ``` +//! +//! **Example response** `200` +//! +//! ```html +//! +//! +//! About +//! +//! +//!

Torrust Index

+//! +//!

About

+//! +//!

Hi! This is a running torrust-index.

+//! +//! +//! +//! ``` +//! +//! # License +//! +//! `GET /v1/about/license` +//! +//! Returns the API license. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:3001/v1/about/license" +//! ``` +//! +//! **Example response** `200` +//! +//! ```html +//! +//! +//! Licensing +//! +//! +//!

Torrust Index

+//! +//!

Licensing

+//! +//!

Multiple Licenses

+//! +//!

+//! This repository has multiple licenses depending on the content type, the date of contributions or stemming from external component licenses that were not developed by any of Torrust team members or Torrust repository +//! contributors. +//!

+//! +//!

The two main applicable license to most of its content are:

+//! +//!

- For Code -- agpl-3.0

+//! +//!

- For Media (Images, etc.) -- cc-by-sa

+//! +//!

If you want to read more about all the licenses and how they apply please refer to the contributor agreement.

+//! +//! +//! +//! ``` +pub mod handlers; +pub mod routes; diff --git a/src/web/api/v1/contexts/about/routes.rs b/src/web/api/v1/contexts/about/routes.rs new file mode 100644 index 00000000..d3877a3b --- /dev/null +++ b/src/web/api/v1/contexts/about/routes.rs @@ -0,0 +1,17 @@ +//! API routes for the [`about`](crate::web::api::v1::contexts::about) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::about). +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{about_page_handler, license_page_handler}; +use crate::common::AppData; + +/// Routes for the [`about`](crate::web::api::v1::contexts::about) API context. +pub fn router(app_data: Arc) -> Router { + Router::new() + .route("/", get(about_page_handler).with_state(app_data.clone())) + .route("/license", get(license_page_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/contexts/category/forms.rs b/src/web/api/v1/contexts/category/forms.rs new file mode 100644 index 00000000..1ad7767a --- /dev/null +++ b/src/web/api/v1/contexts/category/forms.rs @@ -0,0 +1,9 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct AddCategoryForm { + pub name: String, + pub icon: Option, +} + +pub type DeleteCategoryForm = AddCategoryForm; diff --git a/src/web/api/v1/contexts/category/handlers.rs b/src/web/api/v1/contexts/category/handlers.rs new file mode 100644 index 00000000..da0c1209 --- /dev/null +++ b/src/web/api/v1/contexts/category/handlers.rs @@ -0,0 +1,87 @@ +//! API handlers for the the [`category`](crate::web::api::v1::contexts::category) API +//! context. +use std::sync::Arc; + +use axum::extract::{self, State}; +use axum::response::{IntoResponse, Json, Response}; + +use super::forms::{AddCategoryForm, DeleteCategoryForm}; +use super::responses::{added_category, deleted_category}; +use crate::common::AppData; +use crate::web::api::v1::extractors::bearer_token::Extract; +use crate::web::api::v1::responses::{self}; + +/// It handles the request to get all the categories. +/// +/// It returns: +/// +/// - `200` response with a json containing the category list [`Vec`](crate::databases::database::Category). +/// - Other error status codes if there is a database error. +/// +/// Refer to the [API endpoint documentation](crate::web::api::v1::contexts::category) +/// for more information about this endpoint. +/// +/// # Errors +/// +/// It returns an error if there is a database error. +#[allow(clippy::unused_async)] +pub async fn get_all_handler(State(app_data): State>) -> Response { + match app_data.category_repository.get_all().await { + Ok(categories) => Json(responses::OkResponseData { data: categories }).into_response(), + Err(error) => error.into_response(), + } +} + +/// It adds a new category. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - The user does not have permissions to create a new category. +/// - There is a database error. +#[allow(clippy::unused_async)] +pub async fn add_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + extract::Json(category_form): extract::Json, +) -> Response { + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data.category_service.add_category(&category_form.name, &user_id).await { + Ok(_) => added_category(&category_form.name).into_response(), + Err(error) => error.into_response(), + } +} + +/// It deletes a category. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - The user does not have permissions to delete category. +/// - There is a database error. +#[allow(clippy::unused_async)] +pub async fn delete_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + extract::Json(category_form): extract::Json, +) -> Response { + // code-review: why do we need to send the whole category object to delete it? + // And we should use the ID instead of the name, because the name could change + // or we could add support for multiple languages. + + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data.category_service.delete_category(&category_form.name, &user_id).await { + Ok(()) => deleted_category(&category_form.name).into_response(), + Err(error) => error.into_response(), + } +} diff --git a/src/web/api/v1/contexts/category/mod.rs b/src/web/api/v1/contexts/category/mod.rs new file mode 100644 index 00000000..c6ed8a71 --- /dev/null +++ b/src/web/api/v1/contexts/category/mod.rs @@ -0,0 +1,144 @@ +//! API context: `category`. +//! +//! This API context is responsible for handling torrent categories. +//! +//! # Endpoints +//! +//! - [Get all categories](#get-all-categories) +//! - [Add a category](#add-a-category) +//! - [Delete a category](#delete-a-category) +//! +//! **NOTICE**: We don't support multiple languages yet, so the category name +//! is always in English. +//! +//! # Get all categories +//! +//! `GET /v1/category` +//! +//! Returns all torrent categories. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:3001/v1/category" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": [ +//! { +//! "category_id": 3, +//! "name": "games", +//! "num_torrents": 0 +//! }, +//! { +//! "category_id": 1, +//! "name": "movies", +//! "num_torrents": 0 +//! }, +//! { +//! "category_id": 4, +//! "name": "music", +//! "num_torrents": 0 +//! }, +//! { +//! "category_id": 5, +//! "name": "software", +//! "num_torrents": 0 +//! }, +//! { +//! "category_id": 2, +//! "name": "tv shows", +//! "num_torrents": 0 +//! } +//! ] +//! } +//! ``` +//! **Resource** +//! +//! Refer to the [`Category`](crate::databases::database::Category) +//! struct for more information about the response attributes. +//! +//! # Add a category +//! +//! `POST /v1/category` +//! +//! It adds a new category. +//! +//! **POST params** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `name` | `String` | The name of the category | Yes | `new category` +//! `icon` | `Option` | Icon representing the category | No | +//! +//! **Notice**: the `icon` field is not implemented yet. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request POST \ +//! --data '{"name":"new category","icon":null}' \ +//! http://127.0.0.1:3001/v1/category +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": "new category" +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to [`OkResponse`](crate::models::response::OkResponse) for more +//! information about the response attributes. The response contains only the +//! name of the newly created category. +//! +//! # Delete a category +//! +//! `DELETE /v1/category` +//! +//! It deletes a category. +//! +//! **POST params** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `name` | `String` | The name of the category | Yes | `new category` +//! `icon` | `Option` | Icon representing the category | No | +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request DELETE \ +//! --data '{"name":"new category","icon":null}' \ +//! http://127.0.0.1:3001/v1/category +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": "new category" +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to [`OkResponse`](crate::models::response::OkResponse) for more +//! information about the response attributes. The response contains only the +//! name of the deleted category. +pub mod forms; +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/web/api/v1/contexts/category/responses.rs b/src/web/api/v1/contexts/category/responses.rs new file mode 100644 index 00000000..b1e20d19 --- /dev/null +++ b/src/web/api/v1/contexts/category/responses.rs @@ -0,0 +1,19 @@ +//! API responses for the the [`category`](crate::web::api::v1::contexts::category) API +//! context. +use axum::Json; + +use crate::web::api::v1::responses::OkResponseData; + +/// Response after successfully creating a new category. +pub fn added_category(category_name: &str) -> Json> { + Json(OkResponseData { + data: category_name.to_string(), + }) +} + +/// Response after successfully deleting a new category. +pub fn deleted_category(category_name: &str) -> Json> { + Json(OkResponseData { + data: category_name.to_string(), + }) +} diff --git a/src/web/api/v1/contexts/category/routes.rs b/src/web/api/v1/contexts/category/routes.rs new file mode 100644 index 00000000..2d762c47 --- /dev/null +++ b/src/web/api/v1/contexts/category/routes.rs @@ -0,0 +1,18 @@ +//! API routes for the [`category`](crate::web::api::v1::contexts::category) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::category). +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{add_handler, delete_handler, get_all_handler}; +use crate::common::AppData; + +/// Routes for the [`category`](crate::web::api::v1::contexts::category) API context. +pub fn router(app_data: Arc) -> Router { + Router::new() + .route("/", get(get_all_handler).with_state(app_data.clone())) + .route("/", post(add_handler).with_state(app_data.clone())) + .route("/", delete(delete_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/contexts/mod.rs b/src/web/api/v1/contexts/mod.rs new file mode 100644 index 00000000..f6ef4069 --- /dev/null +++ b/src/web/api/v1/contexts/mod.rs @@ -0,0 +1,19 @@ +//! The API is organized in the following contexts: +//! +//! Context | Description | Version +//! ---|---|--- +//! `About` | Metadata about the API | [`v1`](crate::web::api::v1::contexts::about) +//! `Category` | Torrent categories | [`v1`](crate::web::api::v1::contexts::category) +//! `Proxy` | Image proxy cache | [`v1`](crate::web::api::v1::contexts::proxy) +//! `Settings` | Index settings | [`v1`](crate::web::api::v1::contexts::settings) +//! `Tag` | Torrent tags | [`v1`](crate::web::api::v1::contexts::tag) +//! `Torrent` | Indexed torrents | [`v1`](crate::web::api::v1::contexts::torrent) +//! `User` | Users | [`v1`](crate::web::api::v1::contexts::user) +//! +pub mod about; +pub mod category; +pub mod proxy; +pub mod settings; +pub mod tag; +pub mod torrent; +pub mod user; diff --git a/src/web/api/v1/contexts/proxy/handlers.rs b/src/web/api/v1/contexts/proxy/handlers.rs new file mode 100644 index 00000000..1e5105ee --- /dev/null +++ b/src/web/api/v1/contexts/proxy/handlers.rs @@ -0,0 +1,49 @@ +//! API handlers for the the [`proxy`](crate::web::api::v1::contexts::proxy) API +//! context. +use std::sync::Arc; + +use axum::extract::{Path, State}; +use axum::response::Response; + +use super::responses::png_image; +use crate::cache::image::manager::Error; +use crate::common::AppData; +use crate::ui::proxy::map_error_to_image; +use crate::web::api::v1::extractors::bearer_token::Extract; + +/// Get the remote image. It uses the cached image if available. +#[allow(clippy::unused_async)] +pub async fn get_proxy_image_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + Path(url): Path, +) -> Response { + if maybe_bearer_token.is_none() { + return png_image(map_error_to_image(&Error::Unauthenticated)); + } + + let Ok(user_id) = app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await else { + return png_image(map_error_to_image(&Error::Unauthenticated)); + }; + + // code-review: Handling status codes in the index-gui other tan OK is quite a pain. + // Return OK for now. + + // todo: it also work for other image types but we are always returning the + // same content type: `image/png`. If we only support PNG images we should + // change the documentation and return an error for other image types. + + // Get image URL from URL path parameter. + let image_url = urlencoding::decode(&url).unwrap_or_default().into_owned(); + + match app_data.proxy_service.get_image_by_url(&image_url, &user_id).await { + Ok(image_bytes) => { + // Returns the cached image. + png_image(image_bytes) + } + Err(e) => { + // Returns an error image. + png_image(map_error_to_image(&e)) + } + } +} diff --git a/src/web/api/v1/contexts/proxy/mod.rs b/src/web/api/v1/contexts/proxy/mod.rs new file mode 100644 index 00000000..745d9340 --- /dev/null +++ b/src/web/api/v1/contexts/proxy/mod.rs @@ -0,0 +1,58 @@ +//! API context: `proxy`. +//! +//! This context contains the API routes for the proxy service. +//! +//! The torrent descriptions can contain images. These images are proxied +//! through the index to: +//! +//! - Prevent leaking the user's IP address. +//! - Avoid storing images on the server. +//! +//! The proxy service is a simple cache that stores the images in memory. +//! +//! **NOTICE:** For now, it only supports PNG images. +//! +//! **NOTICE:** The proxy service is not intended to be used as a general +//! purpose proxy. It is only intended to be used for the images in the +//! torrent descriptions. +//! +//! **NOTICE:** Ununauthorized users can't see images. They will get an image +//! with the text "Sign in to see image" instead. +//! +//! # Example +//! +//! The PNG image: +//! +//! +//! +//! The percent encoded image URL: +//! +//! ```text +//! https%3A%2F%2Fraw.githubusercontent.com%2Ftorrust%2Ftorrust-index%2Fdevelop%2Fdocs%2Fmedia%2Ftorrust_logo.png +//! ``` +//! +//! For unauthenticated clients: +//! +//! ```bash +//! curl \ +//! --header "cache-control: no-cache" \ +//! --header "pragma: no-cache" \ +//! --output mandelbrotset.jpg \ +//! http://0.0.0.0:3001/v1/proxy/image/https%3A%2F%2Fraw.githubusercontent.com%2Ftorrust%2Ftorrust-index%2Fdevelop%2Fdocs%2Fmedia%2Ftorrust_logo.png +//! ``` +//! +//! You will receive an image with the text "Sign in to see image" instead. +//! +//! For authenticated clients: +//! +//! ```bash +//! curl \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --header "cache-control: no-cache" \ +//! --header "pragma: no-cache" \ +//! --output mandelbrotset.jpg \ +//! http://0.0.0.0:3001/v1/proxy/image/https%3A%2F%2Fraw.githubusercontent.com%2Ftorrust%2Ftorrust-index%2Fdevelop%2Fdocs%2Fmedia%2Ftorrust_logo.png +//! ``` +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/web/api/v1/contexts/proxy/responses.rs b/src/web/api/v1/contexts/proxy/responses.rs new file mode 100644 index 00000000..1ce9730c --- /dev/null +++ b/src/web/api/v1/contexts/proxy/responses.rs @@ -0,0 +1,8 @@ +use axum::response::{IntoResponse, Response}; +use bytes::Bytes; +use hyper::{header, StatusCode}; + +#[must_use] +pub fn png_image(bytes: Bytes) -> Response { + (StatusCode::OK, [(header::CONTENT_TYPE, "image/png")], bytes).into_response() +} diff --git a/src/web/api/v1/contexts/proxy/routes.rs b/src/web/api/v1/contexts/proxy/routes.rs new file mode 100644 index 00000000..e6bd7bef --- /dev/null +++ b/src/web/api/v1/contexts/proxy/routes.rs @@ -0,0 +1,15 @@ +//! API routes for the [`proxy`](crate::web::api::v1::contexts::proxy) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::proxy). +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::get_proxy_image_handler; +use crate::common::AppData; + +/// Routes for the [`about`](crate::web::api::v1::contexts::about) API context. +pub fn router(app_data: Arc) -> Router { + Router::new().route("/image/:url", get(get_proxy_image_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/contexts/settings/handlers.rs b/src/web/api/v1/contexts/settings/handlers.rs new file mode 100644 index 00000000..f4d94541 --- /dev/null +++ b/src/web/api/v1/contexts/settings/handlers.rs @@ -0,0 +1,47 @@ +//! API handlers for the the [`category`](crate::web::api::v1::contexts::category) API +//! context. +use std::sync::Arc; + +use axum::extract::State; +use axum::response::{IntoResponse, Json, Response}; + +use crate::common::AppData; +use crate::web::api::v1::extractors::bearer_token::Extract; +use crate::web::api::v1::responses; + +/// Get all settings. +/// +/// # Errors +/// +/// This function will return an error if the user does not have permission to +/// view all the settings. +#[allow(clippy::unused_async)] +pub async fn get_all_handler(State(app_data): State>, Extract(maybe_bearer_token): Extract) -> Response { + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + let all_settings = match app_data.settings_service.get_all(&user_id).await { + Ok(all_settings) => all_settings, + Err(error) => return error.into_response(), + }; + + Json(responses::OkResponseData { data: all_settings }).into_response() +} + +/// Get public Settings. +#[allow(clippy::unused_async)] +pub async fn get_public_handler(State(app_data): State>) -> Response { + let public_settings = app_data.settings_service.get_public().await; + + Json(responses::OkResponseData { data: public_settings }).into_response() +} + +/// Get website name. +#[allow(clippy::unused_async)] +pub async fn get_site_name_handler(State(app_data): State>) -> Response { + let site_name = app_data.settings_service.get_site_name().await; + + Json(responses::OkResponseData { data: site_name }).into_response() +} diff --git a/src/web/api/v1/contexts/settings/mod.rs b/src/web/api/v1/contexts/settings/mod.rs new file mode 100644 index 00000000..5bb35151 --- /dev/null +++ b/src/web/api/v1/contexts/settings/mod.rs @@ -0,0 +1,171 @@ +//! API context: `settings`. +//! +//! This API context is responsible for handling the application settings. +//! +//! # Endpoints +//! +//! - [Get all settings](#get-all-settings) +//! - [Update all settings](#update-all-settings) +//! - [Get site name](#get-site-name) +//! - [Get public settings](#get-public-settings) +//! +//! # Get all settings +//! +//! `GET /v1/settings` +//! +//! Returns all settings. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request GET \ +//! "http://127.0.0.1:3001/v1/settings" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "website": { +//! "name": "Torrust" +//! }, +//! "tracker": { +//! "url": "udp://localhost:6969", +//! "mode": "Public", +//! "api_url": "http://localhost:1212", +//! "token": "MyAccessToken", +//! "token_valid_seconds": 7257600 +//! }, +//! "net": { +//! "port": 3001, +//! "base_url": null +//! }, +//! "auth": { +//! "email_on_signup": "Optional", +//! "min_password_length": 6, +//! "max_password_length": 64, +//! "secret_key": "MaxVerstappenWC2021" +//! }, +//! "database": { +//! "connect_url": "sqlite://./storage/database/data.db?mode=rwc" +//! }, +//! "mail": { +//! "email_verification_enabled": false, +//! "from": "example@email.com", +//! "reply_to": "noreply@email.com", +//! "username": "", +//! "password": "", +//! "server": "", +//! "port": 25 +//! }, +//! "image_cache": { +//! "max_request_timeout_ms": 1000, +//! "capacity": 128000000, +//! "entry_size_limit": 4000000, +//! "user_quota_period_seconds": 3600, +//! "user_quota_bytes": 64000000 +//! }, +//! "api": { +//! "default_torrent_page_size": 10, +//! "max_torrent_page_size": 30 +//! }, +//! "tracker_statistics_importer": { +//! "torrent_info_update_interval": 3600 +//! } +//! } +//! } +//! ``` +//! **Resource** +//! +//! Refer to the [`TorrustIndex`](crate::config::TorrustIndex) +//! struct for more information about the response attributes. +//! +//! # Update all settings +//! +//! **NOTICE**: This endpoint to update the settings does not work when you use +//! environment variables to configure the application. You need to use a +//! configuration file instead. Because settings are persisted in that file. +//! Refer to the issue [#144](https://github.com/torrust/torrust-index/issues/144) +//! for more information. +//! +//! `POST /v1/settings` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request POST \ +//! --data '{"website":{"name":"Torrust"},"tracker":{"url":"udp://localhost:6969","mode":"Public","api_url":"http://localhost:1212","token":"MyAccessToken","token_valid_seconds":7257600},"net":{"port":3001,"base_url":null},"auth":{"email_on_signup":"Optional","min_password_length":6,"max_password_length":64,"secret_key":"MaxVerstappenWC2021"},"database":{"connect_url":"sqlite://./storage/database/data.db?mode=rwc"},"mail":{"email_verification_enabled":false,"from":"example@email.com","reply_to":"noreply@email.com","username":"","password":"","server":"","port":25},"image_cache":{"max_request_timeout_ms":1000,"capacity":128000000,"entry_size_limit":4000000,"user_quota_period_seconds":3600,"user_quota_bytes":64000000},"api":{"default_torrent_page_size":10,"max_torrent_page_size":30},"tracker_statistics_importer":{"torrent_info_update_interval":3600}}' \ +//! "http://127.0.0.1:3001/v1/settings" +//! ``` +//! +//! The response contains the settings that were updated. +//! +//! **Resource** +//! +//! Refer to the [`TorrustIndex`](crate::config::TorrustIndex) +//! struct for more information about the response attributes. +//! +//! # Get site name +//! +//! `GET /v1/settings/name` +//! +//! It returns the name of the site. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request GET \ +//! "http://127.0.0.1:3001/v1/settings/name" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data":"Torrust" +//! } +//! ``` +//! +//! # Get public settings +//! +//! `GET /v1/settings/public` +//! +//! It returns all the public settings. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request GET \ +//! "http://127.0.0.1:3001/v1/settings/public" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "website_name": "Torrust", +//! "tracker_url": "udp://localhost:6969", +//! "tracker_mode": "Public", +//! "email_on_signup": "Optional" +//! } +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the [`ConfigurationPublic`](crate::config::ConfigurationPublic) +//! struct for more information about the response attributes. +pub mod handlers; +pub mod routes; diff --git a/src/web/api/v1/contexts/settings/routes.rs b/src/web/api/v1/contexts/settings/routes.rs new file mode 100644 index 00000000..e0990f52 --- /dev/null +++ b/src/web/api/v1/contexts/settings/routes.rs @@ -0,0 +1,18 @@ +//! API routes for the [`settings`](crate::web::api::v1::contexts::settings) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::settings). +use std::sync::Arc; + +use axum::routing::get; +use axum::Router; + +use super::handlers::{get_all_handler, get_public_handler, get_site_name_handler}; +use crate::common::AppData; + +/// Routes for the [`category`](crate::web::api::v1::contexts::category) API context. +pub fn router(app_data: Arc) -> Router { + Router::new() + .route("/", get(get_all_handler).with_state(app_data.clone())) + .route("/name", get(get_site_name_handler).with_state(app_data.clone())) + .route("/public", get(get_public_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/contexts/tag/forms.rs b/src/web/api/v1/contexts/tag/forms.rs new file mode 100644 index 00000000..12c751ad --- /dev/null +++ b/src/web/api/v1/contexts/tag/forms.rs @@ -0,0 +1,15 @@ +//! API forms for the the [`tag`](crate::web::api::v1::contexts::tag) API +//! context. +use serde::{Deserialize, Serialize}; + +use crate::models::torrent_tag::TagId; + +#[derive(Serialize, Deserialize, Debug)] +pub struct AddTagForm { + pub name: String, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct DeleteTagForm { + pub tag_id: TagId, +} diff --git a/src/web/api/v1/contexts/tag/handlers.rs b/src/web/api/v1/contexts/tag/handlers.rs new file mode 100644 index 00000000..f750c385 --- /dev/null +++ b/src/web/api/v1/contexts/tag/handlers.rs @@ -0,0 +1,83 @@ +//! API handlers for the [`tag`](crate::web::api::v1::contexts::tag) API +//! context. +use std::sync::Arc; + +use axum::extract::{self, State}; +use axum::response::{IntoResponse, Json, Response}; + +use super::forms::{AddTagForm, DeleteTagForm}; +use super::responses::{added_tag, deleted_tag}; +use crate::common::AppData; +use crate::web::api::v1::extractors::bearer_token::Extract; +use crate::web::api::v1::responses::{self}; + +/// It handles the request to get all the tags. +/// +/// It returns: +/// +/// - `200` response with a json containing the tag list [`Vec`](crate::models::torrent_tag::TorrentTag). +/// - Other error status codes if there is a database error. +/// +/// Refer to the [API endpoint documentation](crate::web::api::v1::contexts::tag) +/// for more information about this endpoint. +/// +/// # Errors +/// +/// It returns an error if there is a database error. +#[allow(clippy::unused_async)] +pub async fn get_all_handler(State(app_data): State>) -> Response { + match app_data.tag_repository.get_all().await { + Ok(tags) => Json(responses::OkResponseData { data: tags }).into_response(), + Err(error) => error.into_response(), + } +} + +/// It adds a new tag. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - The user does not have permissions to create a new tag. +/// - There is a database error. +#[allow(clippy::unused_async)] +pub async fn add_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + extract::Json(add_tag_form): extract::Json, +) -> Response { + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data.tag_service.add_tag(&add_tag_form.name, &user_id).await { + Ok(_) => added_tag(&add_tag_form.name).into_response(), + Err(error) => error.into_response(), + } +} + +/// It deletes a tag. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - The user does not have permissions to delete tags. +/// - There is a database error. +#[allow(clippy::unused_async)] +pub async fn delete_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + extract::Json(delete_tag_form): extract::Json, +) -> Response { + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data.tag_service.delete_tag(&delete_tag_form.tag_id, &user_id).await { + Ok(()) => deleted_tag(delete_tag_form.tag_id).into_response(), + Err(error) => error.into_response(), + } +} diff --git a/src/web/api/v1/contexts/tag/mod.rs b/src/web/api/v1/contexts/tag/mod.rs new file mode 100644 index 00000000..eb4dd68d --- /dev/null +++ b/src/web/api/v1/contexts/tag/mod.rs @@ -0,0 +1,123 @@ +//! API context: `tag`. +//! +//! This API context is responsible for handling torrent tags. +//! +//! # Endpoints +//! +//! - [Get all tags](#get-all-tags) +//! - [Add a tag](#add-a-tag) +//! - [Delete a tag](#delete-a-tag) +//! +//! **NOTICE**: We don't support multiple languages yet, so the tag is always +//! in English. +//! +//! # Get all tags +//! +//! `GET /v1/tag` +//! +//! Returns all torrent tags. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:3001/v1/tags" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": [ +//! { +//! "tag_id": 1, +//! "name": "anime" +//! }, +//! { +//! "tag_id": 2, +//! "name": "manga" +//! } +//! ] +//! } +//! ``` +//! **Resource** +//! +//! Refer to the [`Tag`](crate::models::torrent_tag::TorrentTag) +//! struct for more information about the response attributes. +//! +//! # Add a tag +//! +//! `POST /v1/tag` +//! +//! It adds a new tag. +//! +//! **POST params** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `name` | `String` | The tag name | Yes | `new tag` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request POST \ +//! --data '{"name":"new tag"}' \ +//! http://127.0.0.1:3001/v1/tag +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": "new tag" +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to [`OkResponse`](crate::models::response::OkResponse) for more +//! information about the response attributes. The response contains only the +//! name of the newly created tag. +//! +//! # Delete a tag +//! +//! `DELETE /v1/tag` +//! +//! It deletes a tag. +//! +//! **POST params** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `tag_id` | `i64` | The internal tag ID | Yes | `1` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request DELETE \ +//! --data '{"tag_id":1}' \ +//! http://127.0.0.1:3001/v1/tag +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": 1 +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to [`OkResponse`](crate::models::response::OkResponse) for more +//! information about the response attributes. The response contains only the +//! name of the deleted tag. +pub mod forms; +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/web/api/v1/contexts/tag/responses.rs b/src/web/api/v1/contexts/tag/responses.rs new file mode 100644 index 00000000..a1645994 --- /dev/null +++ b/src/web/api/v1/contexts/tag/responses.rs @@ -0,0 +1,18 @@ +//! API responses for the [`tag`](crate::web::api::v1::contexts::tag) API +//! context. +use axum::Json; + +use crate::models::torrent_tag::TagId; +use crate::web::api::v1::responses::OkResponseData; + +/// Response after successfully creating a new tag. +pub fn added_tag(tag_name: &str) -> Json> { + Json(OkResponseData { + data: tag_name.to_string(), + }) +} + +/// Response after successfully deleting a tag. +pub fn deleted_tag(tag_id: TagId) -> Json> { + Json(OkResponseData { data: tag_id }) +} diff --git a/src/web/api/v1/contexts/tag/routes.rs b/src/web/api/v1/contexts/tag/routes.rs new file mode 100644 index 00000000..4d72970a --- /dev/null +++ b/src/web/api/v1/contexts/tag/routes.rs @@ -0,0 +1,24 @@ +//! API routes for the [`tag`](crate::web::api::v1::contexts::tag) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::tag). +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{add_handler, delete_handler, get_all_handler}; +use crate::common::AppData; + +// code-review: should we use `tags` also for single resources? + +/// Routes for the [`tag`](crate::web::api::v1::contexts::tag) API context. +pub fn router_for_single_resources(app_data: Arc) -> Router { + Router::new() + .route("/", post(add_handler).with_state(app_data.clone())) + .route("/", delete(delete_handler).with_state(app_data)) +} + +/// Routes for the [`tag`](crate::web::api::v1::contexts::tag) API context. +pub fn router_for_multiple_resources(app_data: Arc) -> Router { + Router::new().route("/", get(get_all_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/contexts/torrent/errors.rs b/src/web/api/v1/contexts/torrent/errors.rs new file mode 100644 index 00000000..9bf24d48 --- /dev/null +++ b/src/web/api/v1/contexts/torrent/errors.rs @@ -0,0 +1,61 @@ +use axum::response::{IntoResponse, Response}; +use derive_more::{Display, Error}; +use hyper::StatusCode; + +use crate::web::api::v1::responses::{json_error_response, ErrorResponseData}; + +#[derive(Debug, Display, PartialEq, Eq, Error)] +pub enum Request { + #[display(fmt = "torrent title bytes are nota valid UTF8 string.")] + TitleIsNotValidUtf8, + + #[display(fmt = "torrent description bytes are nota valid UTF8 string.")] + DescriptionIsNotValidUtf8, + + #[display(fmt = "torrent category bytes are nota valid UTF8 string.")] + CategoryIsNotValidUtf8, + + #[display(fmt = "torrent tags arrays bytes are nota valid UTF8 string array.")] + TagsArrayIsNotValidUtf8, + + #[display(fmt = "torrent tags string is not a valid JSON.")] + TagsArrayIsNotValidJson, + + #[display(fmt = "upload torrent request header `content-type` should be `application/x-bittorrent`.")] + InvalidFileType, + + #[display(fmt = "cannot write uploaded torrent bytes (binary file) into memory.")] + CannotWriteChunkFromUploadedBinary, + + #[display(fmt = "cannot read a chunk of bytes from the uploaded torrent file. Review the request body size limit.")] + CannotReadChunkFromUploadedBinary, + + #[display(fmt = "provided path param for Info-hash is not valid.")] + InvalidInfoHashParam, +} + +impl IntoResponse for Request { + fn into_response(self) -> Response { + json_error_response( + http_status_code_for_handler_error(&self), + &ErrorResponseData { error: self.to_string() }, + ) + } +} + +#[must_use] +pub fn http_status_code_for_handler_error(error: &Request) -> StatusCode { + #[allow(clippy::match_same_arms)] + match error { + Request::TitleIsNotValidUtf8 => StatusCode::BAD_REQUEST, + Request::DescriptionIsNotValidUtf8 => StatusCode::BAD_REQUEST, + Request::CategoryIsNotValidUtf8 => StatusCode::BAD_REQUEST, + Request::TagsArrayIsNotValidUtf8 => StatusCode::BAD_REQUEST, + Request::TagsArrayIsNotValidJson => StatusCode::BAD_REQUEST, + Request::InvalidFileType => StatusCode::BAD_REQUEST, + Request::InvalidInfoHashParam => StatusCode::BAD_REQUEST, + // Internal errors processing the request + Request::CannotWriteChunkFromUploadedBinary => StatusCode::INTERNAL_SERVER_ERROR, + Request::CannotReadChunkFromUploadedBinary => StatusCode::INTERNAL_SERVER_ERROR, + } +} diff --git a/src/web/api/v1/contexts/torrent/forms.rs b/src/web/api/v1/contexts/torrent/forms.rs new file mode 100644 index 00000000..fbe9f12c --- /dev/null +++ b/src/web/api/v1/contexts/torrent/forms.rs @@ -0,0 +1,12 @@ +use serde::Deserialize; + +use crate::models::category::CategoryId; +use crate::models::torrent_tag::TagId; + +#[derive(Debug, Deserialize)] +pub struct UpdateTorrentInfoForm { + pub title: Option, + pub description: Option, + pub category: Option, + pub tags: Option>, +} diff --git a/src/web/api/v1/contexts/torrent/handlers.rs b/src/web/api/v1/contexts/torrent/handlers.rs new file mode 100644 index 00000000..bab51443 --- /dev/null +++ b/src/web/api/v1/contexts/torrent/handlers.rs @@ -0,0 +1,403 @@ +//! API handlers for the [`torrent`](crate::web::api::v1::contexts::torrent) API +//! context. +use std::io::{Cursor, Write}; +use std::str::FromStr; +use std::sync::Arc; + +use axum::extract::{self, Multipart, Path, Query, State}; +use axum::response::{IntoResponse, Redirect, Response}; +use axum::Json; +use log::debug; +use serde::Deserialize; +use uuid::Uuid; + +use super::errors; +use super::forms::UpdateTorrentInfoForm; +use super::responses::{new_torrent_response, torrent_file_response}; +use crate::common::AppData; +use crate::errors::ServiceError; +use crate::models::info_hash::InfoHash; +use crate::models::torrent_tag::TagId; +use crate::services::torrent::{AddTorrentRequest, ListingRequest}; +use crate::services::torrent_file::generate_random_torrent; +use crate::utils::parse_torrent; +use crate::web::api::v1::auth::get_optional_logged_in_user; +use crate::web::api::v1::extractors::bearer_token::Extract; +use crate::web::api::v1::responses::OkResponseData; +use crate::web::api::v1::routes::API_VERSION_URL_PREFIX; + +/// Upload a new torrent file to the Index +/// +/// # Errors +/// +/// This function will return an error if +/// +/// - The user does not have permission to upload the torrent file. +/// - The submitted torrent file is not a valid torrent file. +#[allow(clippy::unused_async)] +pub async fn upload_torrent_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + multipart: Multipart, +) -> Response { + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + let add_torrent_form = match build_add_torrent_request_from_payload(multipart).await { + Ok(torrent_request) => torrent_request, + Err(error) => return error.into_response(), + }; + + match app_data.torrent_service.add_torrent(add_torrent_form, user_id).await { + Ok(response) => new_torrent_response(&response).into_response(), + Err(error) => error.into_response(), + } +} + +#[derive(Deserialize)] +pub struct InfoHashParam(pub String); + +impl InfoHashParam { + fn lowercase(&self) -> String { + self.0.to_lowercase() + } +} + +/// Returns the torrent as a byte stream `application/x-bittorrent`. +/// +/// # Errors +/// +/// Returns an error if the torrent info-hash is invalid. +#[allow(clippy::unused_async)] +pub async fn download_torrent_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + Path(info_hash): Path, +) -> Response { + let Ok(info_hash) = InfoHash::from_str(&info_hash.lowercase()) else { + return errors::Request::InvalidInfoHashParam.into_response(); + }; + + debug!("Downloading torrent: {:?}", info_hash.to_hex_string()); + + if let Some(redirect_response) = redirect_to_download_url_using_canonical_info_hash_if_needed(&app_data, &info_hash).await { + debug!("Redirecting to URL with canonical info-hash"); + redirect_response + } else { + let opt_user_id = match get_optional_logged_in_user(maybe_bearer_token, app_data.clone()).await { + Ok(opt_user_id) => opt_user_id, + Err(error) => return error.into_response(), + }; + + let torrent = match app_data.torrent_service.get_torrent(&info_hash, opt_user_id).await { + Ok(torrent) => torrent, + Err(error) => return error.into_response(), + }; + + let Ok(bytes) = parse_torrent::encode_torrent(&torrent) else { + return ServiceError::InternalServerError.into_response(); + }; + + torrent_file_response( + bytes, + &format!("{}.torrent", torrent.info.name), + &torrent.canonical_info_hash_hex(), + ) + } +} + +async fn redirect_to_download_url_using_canonical_info_hash_if_needed( + app_data: &Arc, + info_hash: &InfoHash, +) -> Option { + match app_data + .torrent_info_hash_repository + .find_canonical_info_hash_for(info_hash) + .await + { + Ok(Some(canonical_info_hash)) => { + if canonical_info_hash != *info_hash { + return Some( + Redirect::temporary(&format!( + "/{API_VERSION_URL_PREFIX}/torrent/download/{}", + canonical_info_hash.to_hex_string() + )) + .into_response(), + ); + } + None + } + Ok(None) => None, + Err(error) => Some(error.into_response()), + } +} + +/// It returns a list of torrents matching the search criteria. +/// +/// Eg: `/torrents?categories=music,other,movie&search=bunny&sort=size_DESC` +/// +/// # Errors +/// +/// It returns an error if the database query fails. +#[allow(clippy::unused_async)] +pub async fn get_torrents_handler(State(app_data): State>, Query(criteria): Query) -> Response { + match app_data.torrent_service.generate_torrent_info_listing(&criteria).await { + Ok(torrents_response) => Json(OkResponseData { data: torrents_response }).into_response(), + Err(error) => error.into_response(), + } +} + +/// Get Torrent from the Index +/// +/// # Errors +/// +/// This function returns an error if: +/// +/// - The info-hash is not valid. +/// - Ot there was a problem getting the torrent info from the database. +#[allow(clippy::unused_async)] +pub async fn get_torrent_info_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + Path(info_hash): Path, +) -> Response { + let Ok(info_hash) = InfoHash::from_str(&info_hash.lowercase()) else { + return errors::Request::InvalidInfoHashParam.into_response(); + }; + + if let Some(redirect_response) = redirect_to_details_url_using_canonical_info_hash_if_needed(&app_data, &info_hash).await { + redirect_response + } else { + let opt_user_id = match get_optional_logged_in_user(maybe_bearer_token, app_data.clone()).await { + Ok(opt_user_id) => opt_user_id, + Err(error) => return error.into_response(), + }; + + match app_data.torrent_service.get_torrent_info(&info_hash, opt_user_id).await { + Ok(torrent_response) => Json(OkResponseData { data: torrent_response }).into_response(), + Err(error) => error.into_response(), + } + } +} + +async fn redirect_to_details_url_using_canonical_info_hash_if_needed( + app_data: &Arc, + info_hash: &InfoHash, +) -> Option { + match app_data + .torrent_info_hash_repository + .find_canonical_info_hash_for(info_hash) + .await + { + Ok(Some(canonical_info_hash)) => { + if canonical_info_hash != *info_hash { + return Some( + Redirect::temporary(&format!( + "/{API_VERSION_URL_PREFIX}/torrent/{}", + canonical_info_hash.to_hex_string() + )) + .into_response(), + ); + } + None + } + Ok(None) => None, + Err(error) => Some(error.into_response()), + } +} + +/// Update a the torrent info +/// +/// # Errors +/// +/// This function will return an error if unable to: +/// +/// * Get the user id from the request. +/// * Get the torrent info-hash from the request. +/// * Update the torrent info. +#[allow(clippy::unused_async)] +pub async fn update_torrent_info_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + Path(info_hash): Path, + extract::Json(update_torrent_info_form): extract::Json, +) -> Response { + let Ok(info_hash) = InfoHash::from_str(&info_hash.lowercase()) else { + return errors::Request::InvalidInfoHashParam.into_response(); + }; + + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data + .torrent_service + .update_torrent_info( + &info_hash, + &update_torrent_info_form.title, + &update_torrent_info_form.description, + &update_torrent_info_form.category, + &update_torrent_info_form.tags, + &user_id, + ) + .await + { + Ok(torrent_response) => Json(OkResponseData { data: torrent_response }).into_response(), + Err(error) => error.into_response(), + } +} + +/// Delete a torrent. +/// +/// # Errors +/// +/// This function will return an error if unable to: +/// +/// * Get the user ID from the request. +/// * Get the torrent info-hash from the request. +/// * Delete the torrent. +#[allow(clippy::unused_async)] +pub async fn delete_torrent_handler( + State(app_data): State>, + Extract(maybe_bearer_token): Extract, + Path(info_hash): Path, +) -> Response { + let Ok(info_hash) = InfoHash::from_str(&info_hash.lowercase()) else { + return errors::Request::InvalidInfoHashParam.into_response(); + }; + + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data.torrent_service.delete_torrent(&info_hash, &user_id).await { + Ok(deleted_torrent_response) => Json(OkResponseData { + data: deleted_torrent_response, + }) + .into_response(), + Err(error) => error.into_response(), + } +} + +#[derive(Debug, Deserialize)] +pub struct UuidParam(pub String); + +impl UuidParam { + fn value(&self) -> String { + self.0.to_lowercase() + } +} + +/// Returns a random torrent file as a byte stream `application/x-bittorrent`. +/// +/// This is useful for testing purposes. +/// +/// # Errors +/// +/// Returns an error if the torrent info-hash is invalid. +#[allow(clippy::unused_async)] +pub async fn create_random_torrent_handler(State(_app_data): State>, Path(uuid): Path) -> Response { + let Ok(uuid) = Uuid::parse_str(&uuid.value()) else { + return errors::Request::InvalidInfoHashParam.into_response(); + }; + + let torrent = generate_random_torrent(uuid); + + let Ok(bytes) = parse_torrent::encode_torrent(&torrent) else { + return ServiceError::InternalServerError.into_response(); + }; + + torrent_file_response( + bytes, + &format!("{}.torrent", torrent.info.name), + &torrent.canonical_info_hash_hex(), + ) +} + +/// Extracts the [`TorrentRequest`] from the multipart form payload. +/// +/// # Errors +/// +/// It will return an error if: +/// +/// - The text fields do not contain a valid UTF8 string. +/// - The torrent file data is not valid because: +/// - The content type is not `application/x-bittorrent`. +/// - The multipart content is invalid. +/// - The torrent file pieces key has a length that is not a multiple of 20. +/// - The binary data cannot be decoded as a torrent file. +async fn build_add_torrent_request_from_payload(mut payload: Multipart) -> Result { + let torrent_buffer = vec![0u8]; + let mut torrent_cursor = Cursor::new(torrent_buffer); + + let mut title = String::new(); + let mut description = String::new(); + let mut category = String::new(); + let mut tags: Vec = vec![]; + + while let Some(mut field) = payload.next_field().await.unwrap() { + let name = field.name().unwrap(); + + match name { + "title" => { + let data = field.bytes().await.unwrap(); + if data.is_empty() { + continue; + }; + title = String::from_utf8(data.to_vec()).map_err(|_| errors::Request::TitleIsNotValidUtf8)?; + } + "description" => { + let data = field.bytes().await.unwrap(); + if data.is_empty() { + continue; + }; + description = String::from_utf8(data.to_vec()).map_err(|_| errors::Request::DescriptionIsNotValidUtf8)?; + } + "category" => { + let data = field.bytes().await.unwrap(); + if data.is_empty() { + continue; + }; + category = String::from_utf8(data.to_vec()).map_err(|_| errors::Request::CategoryIsNotValidUtf8)?; + } + "tags" => { + let data = field.bytes().await.unwrap(); + if data.is_empty() { + continue; + }; + let string_data = String::from_utf8(data.to_vec()).map_err(|_| errors::Request::TagsArrayIsNotValidUtf8)?; + tags = serde_json::from_str(&string_data).map_err(|_| errors::Request::TagsArrayIsNotValidJson)?; + } + "torrent" => { + let content_type = field.content_type().unwrap(); + + if content_type != "application/x-bittorrent" { + return Err(errors::Request::InvalidFileType); + } + + while let Some(chunk) = field + .chunk() + .await + .map_err(|_| (errors::Request::CannotReadChunkFromUploadedBinary))? + { + torrent_cursor + .write_all(&chunk) + .map_err(|_| (errors::Request::CannotWriteChunkFromUploadedBinary))?; + } + } + _ => {} + } + } + + Ok(AddTorrentRequest { + title, + description, + category_name: category, + tags, + torrent_buffer: torrent_cursor.into_inner(), + }) +} diff --git a/src/web/api/v1/contexts/torrent/mod.rs b/src/web/api/v1/contexts/torrent/mod.rs new file mode 100644 index 00000000..6b047940 --- /dev/null +++ b/src/web/api/v1/contexts/torrent/mod.rs @@ -0,0 +1,363 @@ +//! API context: `torrent`. +//! +//! This API context is responsible for handling all torrent related requests. +//! +//! # Original and canonical infohashes +//! +//! Uploaded torrents can contain non-standard fields in the `info` dictionary. +//! +//! For example, this is a torrent file in JSON format with a "custom" field. +//! +//! ```json +//! { +//! "info": { +//! "length": 602515, +//! "name": "mandelbrot_set_01", +//! "piece length": 32768, +//! "pieces": "8A 88 32 BE ED 05 5F AA C4 AF 4A 90 4B 9A BF 0D EC 83 42 1C 73 39 05 B8 D6 20 2C 1B D1 8A 53 28 1F B5 D4 23 0A 23 C8 DB AC C4 E6 6B 16 12 08 C7 A4 AD 64 45 70 ED 91 0D F1 38 E7 DF 0C 1A D0 C9 23 27 7C D1 F9 D4 E5 A1 5F F5 E5 A0 E4 9E FB B1 43 F5 4B AD 0E D4 9D CB 49 F7 E6 7B BA 30 5F AF F9 88 56 FB 45 9A B4 95 92 3E 2C 7F DA A6 D3 82 E7 63 A3 BB 4B 28 F3 57 C7 CB 7D 8C 06 E3 46 AB D7 E8 8E 8A 8C 9F C7 E6 C5 C5 64 82 ED 47 BB 2A F1 B7 3F A5 3C 5B 9C AF 43 EC 2A E1 08 68 9A 49 C8 BF 1B 07 AD BE E9 2D 7E BE 9C 18 7F 4C A1 97 0E 54 3A 18 94 0E 60 8D 5C 69 0E 41 46 0D 3C 9A 37 F6 81 62 4F 95 C0 73 92 CA 9A D5 A9 89 AC 8B 85 12 53 0B FB E2 96 26 3E 26 A6 5B 70 53 48 65 F3 6C 27 0F 6B BD 1C EE EB 1A 9D 5F 77 A8 D8 AF D8 14 82 4A E0 B4 62 BC F1 A5 F5 F2 C7 60 F8 38 C8 5B 0B A9 07 DD 86 FA C0 7B F0 26 D7 D1 9A 42 C3 1F 9F B9 59 83 10 62 41 E9 06 3C 6D A1 19 75 01 57 25 9E B7 FE DF 91 04 D4 51 4B 6D 44 02 8D 31 8E 84 26 95 0F 30 31 F0 2C 16 39 BD 53 1D CF D3 5E 3E 41 A9 1E 14 3F 73 24 AC 5E 9E FC 4D C5 70 45 0F 45 8B 9B 52 E6 D0 26 47 8F 43 08 9E 2A 7C C5 92 D5 86 36 FE 48 E9 B8 86 84 92 23 49 5B EE C4 31 B2 1D 10 75 8E 4C 07 84 8F", +//! "custom": "custom03" +//! } +//! } +//! ``` +//! +//! When you upload a torrent file with non-standards fields in the `info` +//! dictionary, the Index removes those non-standard fields. That generates a +//! new info-hash because all fields in the `info` key are used to calculate it. +//! +//! The Index stores the original info-hash. The resulting info-hash after +//! removing the non-standard fields is called "canonical" infohash. The Index +//! stores the relationship between the original info-hash and the canonical one. +//! +//! # Endpoints +//! +//! - [Upload new torrent](#upload-new-torrent) +//! - [Download a torrent](#download-a-torrent) +//! - [Get torrent info](#get-torrent-info) +//! - [List torrent infos](#list-torrent-infos) +//! - [Update torrent info](#update-torrent-info) +//! - [Delete a torrent](#delete-a-torrent) +//! +//! # Upload new torrent +//! +//! `POST /v1/torrent/upload` +//! +//! It uploads a new torrent to the index. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: multipart/form-data" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request POST \ +//! --form "title=MandelbrotSet" \ +//! --form "description=MandelbrotSet image" \ +//! --form "category=software" \ +//! --form "torrent=@docs/media/mandelbrot_2048x2048_infohash_v1.png.torrent;type=application/x-bittorrent" \ +//! "http://127.0.0.1:3001/v1/torrent/upload" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "torrent_id": 2, +//! "info_hash": "5452869BE36F9F3350CCEE6B4544E7E76CAAADAB" +//! } +//! } +//! ``` +//! +//! **NOTICE**: Info-hashes will be lowercase hex-encoded strings in the future +//! and the [internal database ID could be removed from the response](https://github.com/torrust/torrust-index/discussions/149). +//! +//! **Resource** +//! +//! Refer to the [`TorrustIndex`](crate::models::response::NewTorrentResponse) +//! struct for more information about the response attributes. +//! +//! # Download a torrent +//! +//! `GET /v1/torrent/download/{info_hash}` +//! +//! It downloads a new torrent file from the the index. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/x-bittorrent" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --output mandelbrot_2048x2048_infohash_v1.png.torrent \ +//! "http://127.0.0.1:3001/v1/torrent/download/5452869BE36F9F3350CCEE6B4544E7E76CAAADAB" +//! ``` +//! +//! **Example response** `200` +//! +//! The response is a torrent file `mandelbrot_2048x2048_infohash_v1.png.torrent`. +//! +//! ```text +//! $ imdl torrent show mandelbrot_2048x2048_infohash_v1.png.torrent +//! Name mandelbrot_2048x2048.png +//! Info Hash 1a326de411f96bc15622c62358130f0824f561e1 +//! Torrent Size 492 bytes +//! Content Size 168.17 KiB +//! Private no +//! Tracker udp://localhost:6969/eklijkg8901K2Ol6O6CttT1xlUzO4bFD +//! Announce List Tier 1: udp://localhost:6969/eklijkg8901K2Ol6O6CttT1xlUzO4bFD +//! Tier 2: udp://localhost:6969 +//! Piece Size 16 KiB +//! Piece Count 11 +//! File Count 1 +//! Files mandelbrot_2048x2048.png +//! ``` +//! +//! # Get torrent info +//! +//! `GET /v1/torrents/{info_hash}` +//! +//! It returns the torrent info. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | `InfoHash` | The info-hash | Yes | `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request GET \ +//! "http://127.0.0.1:3001/v1/torrent/5452869BE36F9F3350CCEE6B4544E7E76CAAADAB" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "torrent_id": 2, +//! "uploader": "indexadmin", +//! "info_hash": "5452869BE36F9F3350CCEE6B4544E7E76CAAADAB", +//! "title": "MandelbrotSet", +//! "description": "MandelbrotSet image", +//! "category": { +//! "category_id": 5, +//! "name": "software", +//! "num_torrents": 1 +//! }, +//! "upload_date": "2023-05-25 11:33:02", +//! "file_size": 172204, +//! "seeders": 0, +//! "leechers": 0, +//! "files": [ +//! { +//! "path": [ +//! "mandelbrot_2048x2048.png" +//! ], +//! "length": 172204, +//! "md5sum": null +//! } +//! ], +//! "trackers": [ +//! "udp://localhost:6969/eklijkg8901K2Ol6O6CttT1xlUzO4bFD", +//! "udp://localhost:6969" +//! ], +//! "magnet_link": "magnet:?xt=urn:btih:5452869BE36F9F3350CCEE6B4544E7E76CAAADAB&dn=MandelbrotSet&tr=udp%3A%2F%2Flocalhost%3A6969%2Feklijkg8901K2Ol6O6CttT1xlUzO4bFD&tr=udp%3A%2F%2Flocalhost%3A6969" +//! } +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the [`TorrentResponse`](crate::models::response::TorrentResponse) +//! struct for more information about the response attributes. +//! +//! # List torrent infos +//! +//! `GET /v1/torrents` +//! +//! It returns the torrent info for multiple torrents +//! +//! **Get parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `search` | `Option` | A text to search | No | `MandelbrotSet` +//! `categories` | `Option` | A coma-separated category list | No | `music,other,movie,software` +//! +//! **Pagination GET parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `page_size` | `Option` | Number of torrents per page | No | `10` +//! `page` | `Option` | Page offset, starting at `0` | No | `music,other,movie,software` +//! +//! Pagination default values can be configured in the server configuration file. +//! +//! ```toml +//! [api] +//! default_torrent_page_size = 10 +//! max_torrent_page_size = 30 +//! ``` +//! +//! **Sorting GET parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `sort` | `Option` | [Sorting](crate::databases::database::Sorting) options | No | `size_DESC` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request GET \ +//! "http://127.0.0.1:3001/v1/torrents" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "total": 1, +//! "results": [ +//! { +//! "torrent_id": 2, +//! "uploader": "indexadmin", +//! "info_hash": "5452869BE36F9F3350CCEE6B4544E7E76CAAADAB", +//! "title": "MandelbrotSet", +//! "description": "MandelbrotSet image", +//! "category_id": 5, +//! "date_uploaded": "2023-05-25 11:33:02", +//! "file_size": 172204, +//! "seeders": 0, +//! "leechers": 0 +//! } +//! ] +//! } +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the [`TorrentsResponse`](crate::models::response::TorrentsResponse) +//! struct for more information about the response attributes. +//! +//! # Update torrent info +//! +//! `POST /v1/torrents/{info_hash}` +//! +//! It updates the torrent info. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | `InfoHash` | The info-hash | Yes | `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! **Post parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `title` | `Option` | The torrent title | No | `MandelbrotSet` +//! `description` | `Option` | The torrent description | No | `MandelbrotSet image` +//! `category` | `Option` | The torrent category ID | No | `1` +//! `tags` | `Option>` | The tag Id list | No | `[1,2,3]` +//! +//! +//! Refer to the [`UpdateTorrentInfoForm`](crate::web::api::v1::contexts::torrent::forms::UpdateTorrentInfoForm) +//! struct for more information about the request attributes. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request PUT \ +//! --data '{"title":"MandelbrotSet", "description":"MandelbrotSet image"}' \ +//! "http://127.0.0.1:3001/v1/torrent/5452869BE36F9F3350CCEE6B4544E7E76CAAADAB" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "torrent_id": 2, +//! "uploader": "indexadmin", +//! "info_hash": "5452869BE36F9F3350CCEE6B4544E7E76CAAADAB", +//! "title": "MandelbrotSet", +//! "description": "MandelbrotSet image", +//! "category": { +//! "category_id": 5, +//! "name": "software", +//! "num_torrents": 1 +//! }, +//! "upload_date": "2023-05-25 11:33:02", +//! "file_size": 172204, +//! "seeders": 0, +//! "leechers": 0, +//! "files": [], +//! "trackers": [], +//! "magnet_link": "" +//! } +//! } +//! ``` +//! +//! **NOTICE**: the response is not the same as the `GET /v1/torrents/{info_hash}`. +//! It does not contain the `files`, `trackers` and `magnet_link` attributes. +//! +//! **Resource** +//! +//! Refer to the [`TorrentResponse`](crate::models::response::TorrentResponse) +//! struct for more information about the response attributes. +//! +//! # Delete a torrent +//! +//! `DELETE /v1/torrents/{info_hash}` +//! +//! It deletes a torrent. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `info_hash` | `InfoHash` | The info-hash | Yes | `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request DELETE \ +//! "http://127.0.0.1:3001/v1/torrent/5452869BE36F9F3350CCEE6B4544E7E76CAAADAB" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "data": { +//! "torrent_id": 2, +//! "info_hash": "5452869BE36F9F3350CCEE6B4544E7E76CAAADAB", +//! } +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the [`DeletedTorrentResponse`](crate::models::response::DeletedTorrentResponse) +//! struct for more information about the response attributes. +pub mod errors; +pub mod forms; +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/web/api/v1/contexts/torrent/responses.rs b/src/web/api/v1/contexts/torrent/responses.rs new file mode 100644 index 00000000..9873b420 --- /dev/null +++ b/src/web/api/v1/contexts/torrent/responses.rs @@ -0,0 +1,58 @@ +use axum::response::{IntoResponse, Response}; +use axum::Json; +use hyper::{header, HeaderMap, StatusCode}; +use serde::{Deserialize, Serialize}; + +use crate::models::torrent::TorrentId; +use crate::services::torrent::AddTorrentResponse; +use crate::web::api::v1::responses::OkResponseData; + +#[allow(clippy::module_name_repetitions)] +#[derive(Serialize, Deserialize, Debug)] +pub struct NewTorrentResponseData { + pub torrent_id: TorrentId, + pub info_hash: String, + pub original_info_hash: String, +} + +/// Response after successfully uploading a new torrent. +pub fn new_torrent_response(add_torrent_response: &AddTorrentResponse) -> Json> { + Json(OkResponseData { + data: NewTorrentResponseData { + torrent_id: add_torrent_response.torrent_id, + info_hash: add_torrent_response.info_hash.clone(), + original_info_hash: add_torrent_response.original_info_hash.clone(), + }, + }) +} + +/// Builds the binary response for a torrent file. +/// +/// # Panics +/// +/// Panics if the filename is not a valid header value for the `content-disposition` +/// header. +#[must_use] +pub fn torrent_file_response(bytes: Vec, filename: &str, info_hash: &str) -> Response { + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + "application/x-bittorrent" + .parse() + .expect("HTTP content type header should be valid"), + ); + headers.insert( + header::CONTENT_DISPOSITION, + format!("attachment; filename={filename}") + .parse() + .expect("Torrent filename should be a valid header value for the content disposition header"), + ); + headers.insert( + "x-torrust-torrent-infohash", + info_hash + .parse() + .expect("Torrent infohash should be a valid header value for the content disposition header"), + ); + + (StatusCode::OK, headers, bytes).into_response() +} diff --git a/src/web/api/v1/contexts/torrent/routes.rs b/src/web/api/v1/contexts/torrent/routes.rs new file mode 100644 index 00000000..1c529599 --- /dev/null +++ b/src/web/api/v1/contexts/torrent/routes.rs @@ -0,0 +1,38 @@ +//! API routes for the [`torrent`](crate::web::api::v1::contexts::torrent) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::torrent). +use std::sync::Arc; + +use axum::routing::{delete, get, post, put}; +use axum::Router; + +use super::handlers::{ + create_random_torrent_handler, delete_torrent_handler, download_torrent_handler, get_torrent_info_handler, + get_torrents_handler, update_torrent_info_handler, upload_torrent_handler, +}; +use crate::common::AppData; + +/// Routes for the [`torrent`](crate::web::api::v1::contexts::torrent) API context for single resources. +pub fn router_for_single_resources(app_data: Arc) -> Router { + let torrent_info_routes = Router::new() + .route("/", get(get_torrent_info_handler).with_state(app_data.clone())) + .route("/", put(update_torrent_info_handler).with_state(app_data.clone())) + .route("/", delete(delete_torrent_handler).with_state(app_data.clone())); + + Router::new() + .route("/upload", post(upload_torrent_handler).with_state(app_data.clone())) + .route( + "/download/:info_hash", + get(download_torrent_handler).with_state(app_data.clone()), + ) + .route( + "/meta-info/random/:uuid", + get(create_random_torrent_handler).with_state(app_data), + ) + .nest("/:info_hash", torrent_info_routes) +} + +/// Routes for the [`torrent`](crate::web::api::v1::contexts::torrent) API context for multiple resources. +pub fn router_for_multiple_resources(app_data: Arc) -> Router { + Router::new().route("/", get(get_torrents_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/contexts/user/forms.rs b/src/web/api/v1/contexts/user/forms.rs new file mode 100644 index 00000000..6365c4da --- /dev/null +++ b/src/web/api/v1/contexts/user/forms.rs @@ -0,0 +1,24 @@ +use serde::{Deserialize, Serialize}; + +// Registration + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct RegistrationForm { + pub username: String, + pub email: Option, + pub password: String, + pub confirm_password: String, +} + +// Authentication + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct LoginForm { + pub login: String, // todo: rename to `username` + pub password: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct JsonWebToken { + pub token: String, // // todo: rename to `encoded` or `value` +} diff --git a/src/web/api/v1/contexts/user/handlers.rs b/src/web/api/v1/contexts/user/handlers.rs new file mode 100644 index 00000000..170bd073 --- /dev/null +++ b/src/web/api/v1/contexts/user/handlers.rs @@ -0,0 +1,161 @@ +//! API handlers for the the [`user`](crate::web::api::v1::contexts::user) API +//! context. +use std::sync::Arc; + +use axum::extract::{self, Host, Path, State}; +use axum::response::{IntoResponse, Response}; +use axum::Json; +use serde::Deserialize; + +use super::forms::{JsonWebToken, LoginForm, RegistrationForm}; +use super::responses::{self}; +use crate::common::AppData; +use crate::web::api::v1::extractors::bearer_token::Extract; +use crate::web::api::v1::responses::OkResponseData; + +// Registration + +/// It handles the registration of a new user. +/// +/// # Errors +/// +/// It returns an error if the user could not be registered. +#[allow(clippy::unused_async)] +pub async fn registration_handler( + State(app_data): State>, + Host(host_from_header): Host, + extract::Json(registration_form): extract::Json, +) -> Response { + let api_base_url = app_data + .cfg + .get_api_base_url() + .await + .unwrap_or(api_base_url(&host_from_header)); + + match app_data + .registration_service + .register_user(®istration_form, &api_base_url) + .await + { + Ok(user_id) => responses::added_user(user_id).into_response(), + Err(error) => error.into_response(), + } +} + +#[derive(Deserialize)] +pub struct TokenParam(String); + +/// It handles the verification of the email verification token. +#[allow(clippy::unused_async)] +pub async fn email_verification_handler(State(app_data): State>, Path(token): Path) -> String { + match app_data.registration_service.verify_email(&token.0).await { + Ok(_) => String::from("Email verified, you can close this page."), + Err(error) => error.to_string(), + } +} + +// Authentication + +/// It handles the user login. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - Unable to verify the supplied payload as a valid JWT. +/// - The JWT is not invalid or expired. +#[allow(clippy::unused_async)] +pub async fn login_handler( + State(app_data): State>, + extract::Json(login_form): extract::Json, +) -> Response { + match app_data + .authentication_service + .login(&login_form.login, &login_form.password) + .await + { + Ok((token, user_compact)) => responses::logged_in_user(token, user_compact).into_response(), + Err(error) => error.into_response(), + } +} + +/// It verifies a supplied JWT. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - Unable to verify the supplied payload as a valid JWT. +/// - The JWT is not invalid or expired. +#[allow(clippy::unused_async)] +pub async fn verify_token_handler( + State(app_data): State>, + extract::Json(token): extract::Json, +) -> Response { + match app_data.json_web_token.verify(&token.token).await { + Ok(_) => axum::Json(OkResponseData { + data: "Token is valid.".to_string(), + }) + .into_response(), + Err(error) => error.into_response(), + } +} + +#[derive(Deserialize)] +pub struct UsernameParam(pub String); + +/// It renews the JWT. +/// +/// # Errors +/// +/// It returns an error if: +/// +/// - Unable to parse the supplied payload as a valid JWT. +/// - The JWT is not invalid or expired. +#[allow(clippy::unused_async)] +pub async fn renew_token_handler( + State(app_data): State>, + extract::Json(token): extract::Json, +) -> Response { + match app_data.authentication_service.renew_token(&token.token).await { + Ok((token, user_compact)) => responses::renewed_token(token, user_compact).into_response(), + Err(error) => error.into_response(), + } +} + +/// It bans a user from the index. +/// +/// # Errors +/// +/// This function will return if: +/// +/// - The JWT provided by the banning authority was not valid. +/// - The user could not be banned: it does not exist, etcetera. +#[allow(clippy::unused_async)] +pub async fn ban_handler( + State(app_data): State>, + Path(to_be_banned_username): Path, + Extract(maybe_bearer_token): Extract, +) -> Response { + // todo: add reason and `date_expiry` parameters to request + + let user_id = match app_data.auth.get_user_id_from_bearer_token(&maybe_bearer_token).await { + Ok(user_id) => user_id, + Err(error) => return error.into_response(), + }; + + match app_data.ban_service.ban_user(&to_be_banned_username.0, &user_id).await { + Ok(()) => Json(OkResponseData { + data: format!("Banned user: {}", to_be_banned_username.0), + }) + .into_response(), + Err(error) => error.into_response(), + } +} + +/// It returns the base API URL without the port. For example: `http://localhost`. +fn api_base_url(host: &str) -> String { + // HTTPS is not supported yet. + // See https://github.com/torrust/torrust-index/issues/131 + format!("http://{host}") +} diff --git a/src/web/api/v1/contexts/user/mod.rs b/src/web/api/v1/contexts/user/mod.rs new file mode 100644 index 00000000..4f4682e0 --- /dev/null +++ b/src/web/api/v1/contexts/user/mod.rs @@ -0,0 +1,249 @@ +//! API context: `user`. +//! +//! This API context is responsible for handling: +//! +//! - User registration +//! - User authentication +//! - User ban +//! +//! For more information about the API authentication, refer to the [`auth`](crate::web::api::v1::auth) +//! module. +//! +//! # Endpoints +//! +//! Registration: +//! +//! - [Registration](#registration) +//! - [Email verification](#email-verification) +//! +//! Authentication: +//! +//! - [Login](#login) +//! - [Token verification](#token-verification) +//! - [Token renewal](#token-renewal) +//! +//! User ban: +//! +//! - [Ban a user](#ban-a-user) +//! +//! # Registration +//! +//! `POST /v1/user/register` +//! +//! It registers a new user. +//! +//! **Post parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `username` | `String` | The username | Yes | `indexadmin` +//! `email` | `Option` | The user's email | No | `indexadmin@torrust.com` +//! `password` | `String` | The password | Yes | `BenoitMandelbrot1924` +//! `confirm_password` | `String` | Same password again | Yes | `BenoitMandelbrot1924` +//! +//! **NOTICE**: Email could be optional, depending on the configuration. +//! +//! ```toml +//! [auth] +//! email_on_signup = "Optional" +//! min_password_length = 6 +//! max_password_length = 64 +//! ``` +//! +//! Refer to the [`RegistrationForm`](crate::web::api::v1::contexts::user::forms::RegistrationForm) +//! struct for more information about the registration form. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request POST \ +//! --data '{"username":"indexadmin","email":"indexadmin@torrust.com","password":"BenoitMandelbrot1924","confirm_password":"BenoitMandelbrot1924"}' \ +//! http://127.0.0.1:3001/v1/user/register +//! ``` +//! +//! For more information about the registration process, refer to the [`auth`](crate::web::api::v1::auth) +//! module. +//! +//! # Email verification +//! +//! `GET /v1/user/email/verify/{token}` +//! +//! If email on signup is enabled, the user will receive an email with a +//! verification link. The link will contain a token that can be used to verify +//! the email address. +//! +//! This endpoint will verify the email address and update the user's email +//! verification status. It also shows an text page with the result of the +//! verification. +//! +//! **Example response** `200` +//! +//! ```text +//! Email verified, you can close this page. +//! ``` +//! +//! # Login +//! +//! `POST /v1/user/login` +//! +//! It logs in a user. +//! +//! **Post parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `login` | `String` | The password | Yes | `indexadmin` +//! `password` | `String` | The password | Yes | `BenoitMandelbrot1924` +//! +//! Refer to the [`LoginForm`](crate::web::api::v1::contexts::user::forms::LoginForm) +//! struct for more information about the login form. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request POST \ +//! --data '{"login":"indexadmin","password":"BenoitMandelbrot1924"}' \ +//! http://127.0.0.1:3001/v1/user/login +//! ``` +//! +//! For more information about the login process, refer to the [`auth`](crate::web::api::v1::auth) +//! module. +//! +//! # Token verification +//! +//! `POST /v1/user/token/verify` +//! +//! It logs in a user. +//! +//! **Post parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `token` | `String` | The token you want to verify | Yes | `eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI` +//! +//! Refer to the [`JsonWebToken`](crate::web::api::v1::contexts::user::forms::JsonWebToken) +//! struct for more information about the token. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request POST \ +//! --data '{"token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI"}' \ +//! http://127.0.0.1:3001/v1/user/token/verify +//! ``` +//! +//! **Example response** `200` +//! +//! For a valid token: +//! +//! ```json +//! { +//! "data":"Token is valid." +//! } +//! ``` +//! +//! And for an invalid token: +//! +//! ```json +//! { +//! "data":"Token invalid." +//! } +//! ``` +//! +//! # Token renewal +//! +//! `POST /v1/user/token/verify` +//! +//! It renew a user's token. +//! +//! The token must be valid and not expired. And it's only renewed if it is +//! valid for less than one week. +//! +//! **Post parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `token` | `String` | The current valid token | Yes | `eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI` +//! +//! Refer to the [`JsonWebToken`](crate::web::api::v1::contexts::user::forms::JsonWebToken) +//! struct for more information about the token. +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --request POST \ +//! --data '{"token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI"}' \ +//! http://127.0.0.1:3001/v1/user/token/renew +//! ``` +//! +//! **Example response** `200` +//! +//! If you try to renew a token that is still valid for more than one week: +//! +//! ```json +//! { +//! "data": { +//! "token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI", +//! "username": "indexadmin", +//! "admin": true +//! } +//! } +//! ``` +//! +//! You will get the same token. If a new token is generated, the response will +//! be the same but with the new token. +//! +//! **WARNING**: The token is associated to the user's role. The application does not support +//! changing the role of a user. If you change the user's role manually in the +//! database, the token will still be valid but with the same role. That should +//! only be done for testing purposes. +//! +//! # Ban a user +//! +//! `DELETE /v1/user/ban/{user}` +//! +//! It add a user to the banned user list. +//! +//! Only admin can ban other users. +//! +//! **Path parameters** +//! +//! Name | Type | Description | Required | Example +//! ---|---|---|---|--- +//! `user` | `String` | username | Yes | `indexadmin` +//! +//! **Example request** +//! +//! ```bash +//! curl \ +//! --header "Content-Type: application/json" \ +//! --header "Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyIjp7InVzZXJfaWQiOjEsInVzZXJuYW1lIjoiaW5kZXhhZG1pbiIsImFkbWluaXN0cmF0b3IiOnRydWV9LCJleHAiOjE2ODYyMTU3ODh9.4k8ty27DiWwOk4WVcYEhIrAndhpXMRWnLZ3i_HlJnvI" \ +//! --request DELETE \ +//! http://127.0.0.1:3001/v1/user/ban/indexadmin +//! ``` +//! +//! **Example response** `200` +//! +//! If you try to renew a token that is still valid for more than one week: +//! +//! ```json +//! { +//! "data": "Banned user: indexadmin" +//! } +//! ``` +//! +//! **WARNING**: The admin can ban themselves. If they do, they will not be able +//! to unban themselves. The only way to unban themselves is to manually remove +//! the user from the banned user list in the database. +pub mod forms; +pub mod handlers; +pub mod responses; +pub mod routes; diff --git a/src/web/api/v1/contexts/user/responses.rs b/src/web/api/v1/contexts/user/responses.rs new file mode 100644 index 00000000..17a06bdf --- /dev/null +++ b/src/web/api/v1/contexts/user/responses.rs @@ -0,0 +1,50 @@ +use axum::Json; +use serde::{Deserialize, Serialize}; + +use crate::models::user::{UserCompact, UserId}; +use crate::web::api::v1::responses::OkResponseData; + +// Registration + +#[derive(Serialize, Deserialize, Debug)] +pub struct NewUser { + pub user_id: UserId, +} + +/// Response after successfully creating a new user. +pub fn added_user(user_id: i64) -> Json> { + Json(OkResponseData { + data: NewUser { user_id }, + }) +} + +// Authentication + +#[derive(Serialize, Deserialize, Debug)] +pub struct TokenResponse { + pub token: String, + pub username: String, + pub admin: bool, +} + +/// Response after successfully logging in a user. +pub fn logged_in_user(token: String, user_compact: UserCompact) -> Json> { + Json(OkResponseData { + data: TokenResponse { + token, + username: user_compact.username, + admin: user_compact.administrator, + }, + }) +} + +/// Response after successfully renewing a JWT. +pub fn renewed_token(token: String, user_compact: UserCompact) -> Json> { + Json(OkResponseData { + data: TokenResponse { + token, + username: user_compact.username, + admin: user_compact.administrator, + }, + }) +} diff --git a/src/web/api/v1/contexts/user/routes.rs b/src/web/api/v1/contexts/user/routes.rs new file mode 100644 index 00000000..b2a21624 --- /dev/null +++ b/src/web/api/v1/contexts/user/routes.rs @@ -0,0 +1,34 @@ +//! API routes for the [`user`](crate::web::api::v1::contexts::user) API context. +//! +//! Refer to the [API endpoint documentation](crate::web::api::v1::contexts::user). +use std::sync::Arc; + +use axum::routing::{delete, get, post}; +use axum::Router; + +use super::handlers::{ + ban_handler, email_verification_handler, login_handler, registration_handler, renew_token_handler, verify_token_handler, +}; +use crate::common::AppData; + +/// Routes for the [`user`](crate::web::api::v1::contexts::user) API context. +pub fn router(app_data: Arc) -> Router { + Router::new() + // Registration + .route("/register", post(registration_handler).with_state(app_data.clone())) + // code-review: should this be part of the REST API? + // - This endpoint should only verify the email. + // - There should be an independent service (web app) serving the email verification page. + // The wep app can user this endpoint to verify the email and render the page accordingly. + .route( + "/email/verify/:token", + get(email_verification_handler).with_state(app_data.clone()), + ) + // Authentication + .route("/login", post(login_handler).with_state(app_data.clone())) + .route("/token/verify", post(verify_token_handler).with_state(app_data.clone())) + .route("/token/renew", post(renew_token_handler).with_state(app_data.clone())) + // User ban + // code-review: should not this be a POST method? We add the user to the blacklist. We do not delete the user. + .route("/ban/:user", delete(ban_handler).with_state(app_data)) +} diff --git a/src/web/api/v1/extractors/bearer_token.rs b/src/web/api/v1/extractors/bearer_token.rs new file mode 100644 index 00000000..1c9b5be9 --- /dev/null +++ b/src/web/api/v1/extractors/bearer_token.rs @@ -0,0 +1,36 @@ +use axum::async_trait; +use axum::extract::FromRequestParts; +use axum::http::request::Parts; +use axum::response::Response; +use serde::Deserialize; + +use crate::web::api::v1::auth::parse_token; + +pub struct Extract(pub Option); + +#[derive(Deserialize, Debug)] +pub struct BearerToken(String); + +impl BearerToken { + #[must_use] + pub fn value(&self) -> String { + self.0.clone() + } +} + +#[async_trait] +impl FromRequestParts for Extract +where + S: Send + Sync, +{ + type Rejection = Response; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let header = parts.headers.get("Authorization"); + + match header { + Some(header_value) => Ok(Extract(Some(BearerToken(parse_token(header_value))))), + None => Ok(Extract(None)), + } + } +} diff --git a/src/web/api/v1/extractors/mod.rs b/src/web/api/v1/extractors/mod.rs new file mode 100644 index 00000000..36d737ca --- /dev/null +++ b/src/web/api/v1/extractors/mod.rs @@ -0,0 +1 @@ +pub mod bearer_token; diff --git a/src/web/api/v1/mod.rs b/src/web/api/v1/mod.rs new file mode 100644 index 00000000..cd9c4128 --- /dev/null +++ b/src/web/api/v1/mod.rs @@ -0,0 +1,11 @@ +//! The torrust Index API version `v1`. +//! +//! The API is organized in contexts. +//! +//! Refer to the [`contexts`] module for more +//! information. +pub mod auth; +pub mod contexts; +pub mod extractors; +pub mod responses; +pub mod routes; diff --git a/src/web/api/v1/responses.rs b/src/web/api/v1/responses.rs new file mode 100644 index 00000000..397862df --- /dev/null +++ b/src/web/api/v1/responses.rs @@ -0,0 +1,50 @@ +//! Generic responses for the API. +use axum::response::{IntoResponse, Response}; +use hyper::{header, StatusCode}; +use serde::{Deserialize, Serialize}; +use serde_json::json; + +use crate::databases::database; +use crate::errors::{http_status_code_for_service_error, map_database_error_to_service_error, ServiceError}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct OkResponseData { + pub data: T, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct ErrorResponseData { + pub error: String, +} + +impl IntoResponse for ServiceError { + fn into_response(self) -> Response { + json_error_response( + http_status_code_for_service_error(&self), + &ErrorResponseData { error: self.to_string() }, + ) + } +} + +impl IntoResponse for database::Error { + fn into_response(self) -> Response { + let service_error = map_database_error_to_service_error(&self); + + json_error_response( + http_status_code_for_service_error(&service_error), + &ErrorResponseData { + error: service_error.to_string(), + }, + ) + } +} + +#[must_use] +pub fn json_error_response(status_code: StatusCode, error_response_data: &ErrorResponseData) -> Response { + ( + status_code, + [(header::CONTENT_TYPE, "application/json")], + json!(error_response_data).to_string(), + ) + .into_response() +} diff --git a/src/web/api/v1/routes.rs b/src/web/api/v1/routes.rs new file mode 100644 index 00000000..44098f4c --- /dev/null +++ b/src/web/api/v1/routes.rs @@ -0,0 +1,47 @@ +//! Route initialization for the v1 API. +use std::env; +use std::sync::Arc; + +use axum::extract::DefaultBodyLimit; +use axum::routing::get; +use axum::Router; +use tower_http::compression::CompressionLayer; +use tower_http::cors::CorsLayer; + +use super::contexts::about::handlers::about_page_handler; +use super::contexts::{about, category, proxy, settings, tag, torrent, user}; +use crate::bootstrap::config::ENV_VAR_CORS_PERMISSIVE; +use crate::common::AppData; + +pub const API_VERSION_URL_PREFIX: &str = "v1"; + +/// Add all API routes to the router. +#[allow(clippy::needless_pass_by_value)] +pub fn router(app_data: Arc) -> Router { + // code-review: should we use plural for the resource prefix: `users`, `categories`, `tags`? + // See: https://stackoverflow.com/questions/6845772/should-i-use-singular-or-plural-name-convention-for-rest-resources + + let v1_api_routes = Router::new() + .route("/", get(about_page_handler).with_state(app_data.clone())) + .nest("/user", user::routes::router(app_data.clone())) + .nest("/about", about::routes::router(app_data.clone())) + .nest("/category", category::routes::router(app_data.clone())) + .nest("/tag", tag::routes::router_for_single_resources(app_data.clone())) + .nest("/tags", tag::routes::router_for_multiple_resources(app_data.clone())) + .nest("/settings", settings::routes::router(app_data.clone())) + .nest("/torrent", torrent::routes::router_for_single_resources(app_data.clone())) + .nest("/torrents", torrent::routes::router_for_multiple_resources(app_data.clone())) + .nest("/proxy", proxy::routes::router(app_data.clone())); + + let router = Router::new() + .route("/", get(about_page_handler).with_state(app_data)) + .nest(&format!("/{API_VERSION_URL_PREFIX}"), v1_api_routes); + + let router = if env::var(ENV_VAR_CORS_PERMISSIVE).is_ok() { + router.layer(CorsLayer::permissive()) + } else { + router + }; + + router.layer(DefaultBodyLimit::max(10_485_760)).layer(CompressionLayer::new()) +} diff --git a/src/web/mod.rs b/src/web/mod.rs new file mode 100644 index 00000000..9007e88f --- /dev/null +++ b/src/web/mod.rs @@ -0,0 +1,6 @@ +//! The Torrust Index API. +//! +//! Currently, the API has only one version: `v1`. +//! +//! Refer to the [`v1`](crate::web::api::v1) module for more information. +pub mod api; diff --git a/templates/verify.html b/templates/verify.html index 27c4b8fb..c54bbfb4 100644 --- a/templates/verify.html +++ b/templates/verify.html @@ -1,23 +1,176 @@ - - + + + + + + + + + + + + +
Torrust

Welcome to Torrust, <%= username %>.
Please click the confirmation link below to verify your account.
Verify account
Or copy and paste the following link into your browser:
© Copyright Torrust 2021
\ No newline at end of file + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ Torrust
+
+

+

+ +
+
+ Welcome to Torrust, {{ username }}.
+
+
+ Please click the confirmation link below to verify your account.
+
+ + + + +
+ Verify account +
+
+
+ Or copy and paste the following link into your browser:
+
+ +
+
+ +
+
+ +
+ + + \ No newline at end of file diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 81e9d18a..00000000 --- a/tests/README.md +++ /dev/null @@ -1,10 +0,0 @@ -### Running Tests -Torrust requires Docker to run different database systems for testing. [install docker here](https://docs.docker.com/engine/). - -Start the databases with `docker-compose` before running tests: - - $ docker-compose up - -Run all tests using: - - $ cargo test diff --git a/tests/common/asserts.rs b/tests/common/asserts.rs new file mode 100644 index 00000000..50a60e26 --- /dev/null +++ b/tests/common/asserts.rs @@ -0,0 +1,50 @@ +// Text responses + +use torrust_index::web::api::v1::responses::ErrorResponseData; + +use super::responses::TextResponse; + +pub fn assert_response_title(response: &TextResponse, title: &str) { + let title_element = format!("{title}"); + + assert!( + response.body.contains(title), + ":\n response does not contain the title element: `\"{title_element}\"`." + ); +} + +pub fn assert_text_ok(response: &TextResponse) { + assert_eq!(response.status, 200); + if let Some(content_type) = &response.content_type { + assert_eq!(content_type, "text/html; charset=utf-8"); + } +} + +pub fn _assert_text_bad_request(response: &TextResponse) { + assert_eq!(response.status, 400); + if let Some(content_type) = &response.content_type { + assert_eq!(content_type, "text/plain; charset=utf-8"); + } +} + +// JSON responses + +pub fn assert_json_ok_response(response: &TextResponse) { + if let Some(content_type) = &response.content_type { + assert_eq!(content_type, "application/json"); + } + assert_eq!(response.status, 200); +} + +pub fn assert_json_error_response(response: &TextResponse, error: &str) { + assert_eq!(response.body, "{\"error\":\"This torrent title has already been used.\"}"); + + let error_response_data: ErrorResponseData = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a ErrorResponseData", response.body)); + + assert_eq!(error_response_data.error, error); + if let Some(content_type) = &response.content_type { + assert_eq!(content_type, "application/json"); + } + assert_eq!(response.status, 400); +} diff --git a/tests/common/client.rs b/tests/common/client.rs new file mode 100644 index 00000000..97216bfa --- /dev/null +++ b/tests/common/client.rs @@ -0,0 +1,330 @@ +use reqwest::multipart; +use serde::Serialize; + +use super::connection_info::ConnectionInfo; +use super::contexts::category::forms::{AddCategoryForm, DeleteCategoryForm}; +use super::contexts::tag::forms::{AddTagForm, DeleteTagForm}; +use super::contexts::torrent::forms::UpdateTorrentFrom; +use super::contexts::torrent::requests::InfoHash; +use super::contexts::user::forms::{LoginForm, RegistrationForm, TokenRenewalForm, TokenVerificationForm, Username}; +use super::http::{Query, ReqwestQuery}; +use super::responses::{self, BinaryResponse, TextResponse}; + +/// API Client +pub struct Client { + http_client: Http, +} + +impl Client { + // todo: forms in POST requests can be passed by reference. + + fn base_path() -> String { + "/v1".to_string() + } + + pub fn unauthenticated(bind_address: &str) -> Self { + Self::new(ConnectionInfo::anonymous(bind_address, &Self::base_path())) + } + + pub fn authenticated(bind_address: &str, token: &str) -> Self { + Self::new(ConnectionInfo::new(bind_address, &Self::base_path(), token)) + } + + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { + http_client: Http::new(connection_info), + } + } + + /// It checks if the server is running. + pub async fn server_is_running(&self) -> bool { + let response = self.http_client.inner_get("").await; + response.is_ok() + } + + // Context: about + + pub async fn about(&self) -> TextResponse { + self.http_client.get("/about", Query::empty()).await + } + + pub async fn license(&self) -> TextResponse { + self.http_client.get("/about/license", Query::empty()).await + } + + // Context: category + + pub async fn get_categories(&self) -> TextResponse { + self.http_client.get("/category", Query::empty()).await + } + + pub async fn add_category(&self, add_category_form: AddCategoryForm) -> TextResponse { + self.http_client.post("/category", &add_category_form).await + } + + pub async fn delete_category(&self, delete_category_form: DeleteCategoryForm) -> TextResponse { + self.http_client.delete_with_body("/category", &delete_category_form).await + } + + // Context: tag + + pub async fn get_tags(&self) -> TextResponse { + // code-review: some endpoint are using plural + // (for instance, `get_categories`) and some singular. + self.http_client.get("/tags", Query::empty()).await + } + + pub async fn add_tag(&self, add_tag_form: AddTagForm) -> TextResponse { + self.http_client.post("/tag", &add_tag_form).await + } + + pub async fn delete_tag(&self, delete_tag_form: DeleteTagForm) -> TextResponse { + self.http_client.delete_with_body("/tag", &delete_tag_form).await + } + + // Context: root + + pub async fn root(&self) -> TextResponse { + self.http_client.get("", Query::empty()).await + } + + // Context: settings + + pub async fn get_public_settings(&self) -> TextResponse { + self.http_client.get("/settings/public", Query::empty()).await + } + + pub async fn get_site_name(&self) -> TextResponse { + self.http_client.get("/settings/name", Query::empty()).await + } + + pub async fn get_settings(&self) -> TextResponse { + self.http_client.get("/settings", Query::empty()).await + } + + // Context: torrent + + pub async fn get_torrents(&self, params: Query) -> TextResponse { + self.http_client.get("/torrents", params).await + } + + pub async fn get_torrent(&self, info_hash: &InfoHash) -> TextResponse { + self.http_client.get(&format!("/torrent/{info_hash}"), Query::empty()).await + } + + pub async fn delete_torrent(&self, info_hash: &InfoHash) -> TextResponse { + self.http_client.delete(&format!("/torrent/{info_hash}")).await + } + + pub async fn update_torrent(&self, info_hash: &InfoHash, update_torrent_form: UpdateTorrentFrom) -> TextResponse { + self.http_client + .put(&format!("/torrent/{info_hash}"), &update_torrent_form) + .await + } + + pub async fn upload_torrent(&self, form: multipart::Form) -> TextResponse { + self.http_client.post_multipart("/torrent/upload", form).await + } + + pub async fn download_torrent(&self, info_hash: &InfoHash) -> responses::BinaryResponse { + self.http_client + .get_binary(&format!("/torrent/download/{info_hash}"), Query::empty()) + .await + } + + // Context: user + + pub async fn register_user(&self, registration_form: RegistrationForm) -> TextResponse { + self.http_client.post("/user/register", ®istration_form).await + } + + pub async fn login_user(&self, registration_form: LoginForm) -> TextResponse { + self.http_client.post("/user/login", ®istration_form).await + } + + pub async fn verify_token(&self, token_verification_form: TokenVerificationForm) -> TextResponse { + self.http_client.post("/user/token/verify", &token_verification_form).await + } + + pub async fn renew_token(&self, token_verification_form: TokenRenewalForm) -> TextResponse { + self.http_client.post("/user/token/renew", &token_verification_form).await + } + + pub async fn ban_user(&self, username: Username) -> TextResponse { + self.http_client.delete(&format!("/user/ban/{}", &username.value)).await + } +} + +/// Generic HTTP Client +struct Http { + connection_info: ConnectionInfo, +} + +impl Http { + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { connection_info } + } + + pub async fn get(&self, path: &str, params: Query) -> TextResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path).clone()) + .query(&ReqwestQuery::from(params)) + .bearer_auth(token) + .send() + .await + .unwrap(), + None => reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path).clone()) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + }; + TextResponse::from(response).await + } + + pub async fn get_binary(&self, path: &str, params: Query) -> BinaryResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path).clone()) + .query(&ReqwestQuery::from(params)) + .bearer_auth(token) + .send() + .await + .unwrap(), + None => reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path).clone()) + .query(&ReqwestQuery::from(params)) + .send() + .await + .unwrap(), + }; + // todo: If the response is a JSON, it returns the JSON body in a byte + // array. This is not the expected behavior. + // - Rename BinaryResponse to BinaryTorrentResponse + // - Return an error if the response is not a bittorrent file + BinaryResponse::from(response).await + } + + pub async fn inner_get(&self, path: &str) -> Result { + reqwest::Client::builder() + .build() + .unwrap() + .get(self.base_url(path).clone()) + .send() + .await + } + + pub async fn post(&self, path: &str, form: &T) -> TextResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::new() + .post(self.base_url(path).clone()) + .bearer_auth(token) + .json(&form) + .send() + .await + .unwrap(), + None => reqwest::Client::new() + .post(self.base_url(path).clone()) + .json(&form) + .send() + .await + .unwrap(), + }; + TextResponse::from(response).await + } + + pub async fn post_multipart(&self, path: &str, form: multipart::Form) -> TextResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::builder() + .build() + .unwrap() + .post(self.base_url(path).clone()) + .multipart(form) + .bearer_auth(token) + .send() + .await + .expect("failed to send multipart request with token"), + None => reqwest::Client::builder() + .build() + .unwrap() + .post(self.base_url(path).clone()) + .multipart(form) + .send() + .await + .expect("failed to send multipart request without token"), + }; + TextResponse::from(response).await + } + + pub async fn put(&self, path: &str, form: &T) -> TextResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::new() + .put(self.base_url(path).clone()) + .bearer_auth(token) + .json(&form) + .send() + .await + .unwrap(), + None => reqwest::Client::new() + .put(self.base_url(path).clone()) + .json(&form) + .send() + .await + .unwrap(), + }; + TextResponse::from(response).await + } + + async fn delete(&self, path: &str) -> TextResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::new() + .delete(self.base_url(path).clone()) + .bearer_auth(token) + .send() + .await + .unwrap(), + None => reqwest::Client::new() + .delete(self.base_url(path).clone()) + .send() + .await + .unwrap(), + }; + TextResponse::from(response).await + } + + async fn delete_with_body(&self, path: &str, form: &T) -> TextResponse { + let response = match &self.connection_info.token { + Some(token) => reqwest::Client::new() + .delete(self.base_url(path).clone()) + .bearer_auth(token) + .json(&form) + .send() + .await + .unwrap(), + None => reqwest::Client::new() + .delete(self.base_url(path).clone()) + .json(&form) + .send() + .await + .unwrap(), + }; + TextResponse::from(response).await + } + + fn base_url(&self, path: &str) -> String { + format!( + "http://{}{}{path}", + &self.connection_info.bind_address, &self.connection_info.base_path + ) + } +} diff --git a/tests/common/connection_info.rs b/tests/common/connection_info.rs new file mode 100644 index 00000000..3f6c919e --- /dev/null +++ b/tests/common/connection_info.rs @@ -0,0 +1,24 @@ +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub base_path: String, + pub token: Option, +} + +impl ConnectionInfo { + pub fn new(bind_address: &str, base_path: &str, token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + base_path: base_path.to_string(), + token: Some(token.to_string()), + } + } + + pub fn anonymous(bind_address: &str, base_path: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + base_path: base_path.to_string(), + token: None, + } + } +} diff --git a/tests/common/contexts/about/mod.rs b/tests/common/contexts/about/mod.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/common/contexts/about/mod.rs @@ -0,0 +1 @@ + diff --git a/tests/common/contexts/category/asserts.rs b/tests/common/contexts/category/asserts.rs new file mode 100644 index 00000000..2568531d --- /dev/null +++ b/tests/common/contexts/category/asserts.rs @@ -0,0 +1,21 @@ +use crate::common::asserts::assert_json_ok_response; +use crate::common::contexts::category::responses::{AddedCategoryResponse, DeletedCategoryResponse}; +use crate::common::responses::TextResponse; + +pub fn assert_added_category_response(response: &TextResponse, category_name: &str) { + let added_category_response: AddedCategoryResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a AddedCategoryResponse", response.body)); + + assert_eq!(added_category_response.data, category_name); + + assert_json_ok_response(response); +} + +pub fn assert_deleted_category_response(response: &TextResponse, category_name: &str) { + let deleted_category_response: DeletedCategoryResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a DeletedCategoryResponse", response.body)); + + assert_eq!(deleted_category_response.data, category_name); + + assert_json_ok_response(response); +} diff --git a/tests/common/contexts/category/fixtures.rs b/tests/common/contexts/category/fixtures.rs new file mode 100644 index 00000000..05ec2c26 --- /dev/null +++ b/tests/common/contexts/category/fixtures.rs @@ -0,0 +1,18 @@ +use rand::Rng; + +pub fn software_category_name() -> String { + "software".to_string() +} + +pub fn software_predefined_category_id() -> i64 { + 5 +} + +pub fn random_category_name() -> String { + format!("category name {}", random_id()) +} + +fn random_id() -> u64 { + let mut rng = rand::thread_rng(); + rng.gen_range(0..1_000_000) +} diff --git a/tests/common/contexts/category/forms.rs b/tests/common/contexts/category/forms.rs new file mode 100644 index 00000000..ea9cf429 --- /dev/null +++ b/tests/common/contexts/category/forms.rs @@ -0,0 +1,9 @@ +use serde::Serialize; + +#[derive(Serialize)] +pub struct AddCategoryForm { + pub name: String, + pub icon: Option, +} + +pub type DeleteCategoryForm = AddCategoryForm; diff --git a/tests/common/contexts/category/mod.rs b/tests/common/contexts/category/mod.rs new file mode 100644 index 00000000..cfe5dd24 --- /dev/null +++ b/tests/common/contexts/category/mod.rs @@ -0,0 +1,4 @@ +pub mod asserts; +pub mod fixtures; +pub mod forms; +pub mod responses; diff --git a/tests/common/contexts/category/responses.rs b/tests/common/contexts/category/responses.rs new file mode 100644 index 00000000..cbadb631 --- /dev/null +++ b/tests/common/contexts/category/responses.rs @@ -0,0 +1,23 @@ +use serde::Deserialize; + +#[derive(Deserialize)] +pub struct AddedCategoryResponse { + pub data: String, +} + +#[derive(Deserialize)] +pub struct DeletedCategoryResponse { + pub data: String, +} + +#[derive(Deserialize, Debug)] +pub struct ListResponse { + pub data: Vec, +} + +#[derive(Deserialize, Debug, PartialEq)] +pub struct ListItem { + pub category_id: i64, + pub name: String, + pub num_torrents: i64, +} diff --git a/tests/common/contexts/mod.rs b/tests/common/contexts/mod.rs new file mode 100644 index 00000000..fa791e5f --- /dev/null +++ b/tests/common/contexts/mod.rs @@ -0,0 +1,7 @@ +pub mod about; +pub mod category; +pub mod root; +pub mod settings; +pub mod tag; +pub mod torrent; +pub mod user; diff --git a/tests/common/contexts/root/mod.rs b/tests/common/contexts/root/mod.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/common/contexts/root/mod.rs @@ -0,0 +1 @@ + diff --git a/tests/common/contexts/settings/mod.rs b/tests/common/contexts/settings/mod.rs new file mode 100644 index 00000000..5ba7a0db --- /dev/null +++ b/tests/common/contexts/settings/mod.rs @@ -0,0 +1,190 @@ +pub mod responses; + +use serde::{Deserialize, Serialize}; +use torrust_index::config::{ + Api as DomainApi, Auth as DomainAuth, Database as DomainDatabase, ImageCache as DomainImageCache, Mail as DomainMail, + Network as DomainNetwork, TorrustIndex as DomainSettings, Tracker as DomainTracker, + TrackerStatisticsImporter as DomainTrackerStatisticsImporter, Website as DomainWebsite, +}; + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Settings { + pub website: Website, + pub tracker: Tracker, + pub net: Network, + pub auth: Auth, + pub database: Database, + pub mail: Mail, + pub image_cache: ImageCache, + pub api: Api, + pub tracker_statistics_importer: TrackerStatisticsImporter, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Website { + pub name: String, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Tracker { + pub url: String, + pub mode: String, + pub api_url: String, + pub token: String, + pub token_valid_seconds: u64, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Network { + pub port: u16, + pub base_url: Option, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Auth { + pub email_on_signup: String, + pub min_password_length: usize, + pub max_password_length: usize, + pub secret_key: String, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Database { + pub connect_url: String, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Mail { + pub email_verification_enabled: bool, + pub from: String, + pub reply_to: String, + pub username: String, + pub password: String, + pub server: String, + pub port: u16, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct ImageCache { + pub max_request_timeout_ms: u64, + pub capacity: usize, + pub entry_size_limit: usize, + pub user_quota_period_seconds: u64, + pub user_quota_bytes: usize, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct Api { + pub default_torrent_page_size: u8, + pub max_torrent_page_size: u8, +} + +#[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] +pub struct TrackerStatisticsImporter { + pub torrent_info_update_interval: u64, +} + +impl From for Settings { + fn from(settings: DomainSettings) -> Self { + Settings { + website: Website::from(settings.website), + tracker: Tracker::from(settings.tracker), + net: Network::from(settings.net), + auth: Auth::from(settings.auth), + database: Database::from(settings.database), + mail: Mail::from(settings.mail), + image_cache: ImageCache::from(settings.image_cache), + api: Api::from(settings.api), + tracker_statistics_importer: TrackerStatisticsImporter::from(settings.tracker_statistics_importer), + } + } +} + +impl From for Website { + fn from(website: DomainWebsite) -> Self { + Self { name: website.name } + } +} + +impl From for Tracker { + fn from(tracker: DomainTracker) -> Self { + Self { + url: tracker.url, + mode: format!("{:?}", tracker.mode), + api_url: tracker.api_url, + token: tracker.token, + token_valid_seconds: tracker.token_valid_seconds, + } + } +} + +impl From for Network { + fn from(net: DomainNetwork) -> Self { + Self { + port: net.port, + base_url: net.base_url, + } + } +} + +impl From for Auth { + fn from(auth: DomainAuth) -> Self { + Self { + email_on_signup: format!("{:?}", auth.email_on_signup), + min_password_length: auth.min_password_length, + max_password_length: auth.max_password_length, + secret_key: auth.secret_key, + } + } +} + +impl From for Database { + fn from(database: DomainDatabase) -> Self { + Self { + connect_url: database.connect_url, + } + } +} + +impl From for Mail { + fn from(mail: DomainMail) -> Self { + Self { + email_verification_enabled: mail.email_verification_enabled, + from: mail.from, + reply_to: mail.reply_to, + username: mail.username, + password: mail.password, + server: mail.server, + port: mail.port, + } + } +} + +impl From for ImageCache { + fn from(image_cache: DomainImageCache) -> Self { + Self { + max_request_timeout_ms: image_cache.max_request_timeout_ms, + capacity: image_cache.capacity, + entry_size_limit: image_cache.entry_size_limit, + user_quota_period_seconds: image_cache.user_quota_period_seconds, + user_quota_bytes: image_cache.user_quota_bytes, + } + } +} + +impl From for Api { + fn from(api: DomainApi) -> Self { + Self { + default_torrent_page_size: api.default_torrent_page_size, + max_torrent_page_size: api.max_torrent_page_size, + } + } +} + +impl From for TrackerStatisticsImporter { + fn from(tracker_statistics_importer: DomainTrackerStatisticsImporter) -> Self { + Self { + torrent_info_update_interval: tracker_statistics_importer.torrent_info_update_interval, + } + } +} diff --git a/tests/common/contexts/settings/responses.rs b/tests/common/contexts/settings/responses.rs new file mode 100644 index 00000000..096ef1f4 --- /dev/null +++ b/tests/common/contexts/settings/responses.rs @@ -0,0 +1,26 @@ +use serde::Deserialize; + +use super::Settings; + +#[derive(Deserialize)] +pub struct AllSettingsResponse { + pub data: Settings, +} + +#[derive(Deserialize)] +pub struct PublicSettingsResponse { + pub data: Public, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct Public { + pub website_name: String, + pub tracker_url: String, + pub tracker_mode: String, + pub email_on_signup: String, +} + +#[derive(Deserialize)] +pub struct SiteNameResponse { + pub data: String, +} diff --git a/tests/common/contexts/tag/asserts.rs b/tests/common/contexts/tag/asserts.rs new file mode 100644 index 00000000..a7c40888 --- /dev/null +++ b/tests/common/contexts/tag/asserts.rs @@ -0,0 +1,23 @@ +use torrust_index::models::torrent_tag::TagId; + +use crate::common::asserts::assert_json_ok_response; +use crate::common::contexts::tag::responses::{AddedTagResponse, DeletedTagResponse}; +use crate::common::responses::TextResponse; + +pub fn assert_added_tag_response(response: &TextResponse, tag_name: &str) { + let added_tag_response: AddedTagResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a AddedTagResponse", response.body)); + + assert_eq!(added_tag_response.data, tag_name); + + assert_json_ok_response(response); +} + +pub fn assert_deleted_tag_response(response: &TextResponse, tag_id: TagId) { + let deleted_tag_response: DeletedTagResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a DeletedTagResponse", response.body)); + + assert_eq!(deleted_tag_response.data, tag_id); + + assert_json_ok_response(response); +} diff --git a/tests/common/contexts/tag/fixtures.rs b/tests/common/contexts/tag/fixtures.rs new file mode 100644 index 00000000..6012497f --- /dev/null +++ b/tests/common/contexts/tag/fixtures.rs @@ -0,0 +1,10 @@ +use rand::Rng; + +pub fn random_tag_name() -> String { + format!("tag name {}", random_id()) +} + +fn random_id() -> u64 { + let mut rng = rand::thread_rng(); + rng.gen_range(0..1_000_000) +} diff --git a/tests/common/contexts/tag/forms.rs b/tests/common/contexts/tag/forms.rs new file mode 100644 index 00000000..26d1395d --- /dev/null +++ b/tests/common/contexts/tag/forms.rs @@ -0,0 +1,11 @@ +use serde::Serialize; + +#[derive(Serialize)] +pub struct AddTagForm { + pub name: String, +} + +#[derive(Serialize)] +pub struct DeleteTagForm { + pub tag_id: i64, +} diff --git a/tests/common/contexts/tag/mod.rs b/tests/common/contexts/tag/mod.rs new file mode 100644 index 00000000..cfe5dd24 --- /dev/null +++ b/tests/common/contexts/tag/mod.rs @@ -0,0 +1,4 @@ +pub mod asserts; +pub mod fixtures; +pub mod forms; +pub mod responses; diff --git a/tests/common/contexts/tag/responses.rs b/tests/common/contexts/tag/responses.rs new file mode 100644 index 00000000..df4cc9ff --- /dev/null +++ b/tests/common/contexts/tag/responses.rs @@ -0,0 +1,42 @@ +use serde::Deserialize; + +// code-review: we should always include a API resource in the `data`attribute. +// +// ``` +// pub struct DeletedTagResponse { +// pub data: DeletedTag, +// } +// +// pub struct DeletedTag { +// pub tag_id: i64, +// } +// ``` +// +// This way the API client knows what's the meaning of the `data` attribute. + +#[derive(Deserialize)] +pub struct AddedTagResponse { + pub data: String, +} + +#[derive(Deserialize)] +pub struct DeletedTagResponse { + pub data: i64, +} + +#[derive(Deserialize, Debug)] +pub struct ListResponse { + pub data: Vec, +} + +impl ListResponse { + pub fn find_tag_id(&self, tag_name: &str) -> i64 { + self.data.iter().find(|tag| tag.name == tag_name).unwrap().tag_id + } +} + +#[derive(Deserialize, Debug, PartialEq)] +pub struct ListItem { + pub tag_id: i64, + pub name: String, +} diff --git a/tests/common/contexts/torrent/asserts.rs b/tests/common/contexts/torrent/asserts.rs new file mode 100644 index 00000000..d0f1a8cf --- /dev/null +++ b/tests/common/contexts/torrent/asserts.rs @@ -0,0 +1,42 @@ +use super::responses::TorrentDetails; + +type Check = (&'static str, bool); + +/// Assert that the torrent details match the expected ones. +/// +/// It ignores some fields that are not relevant for the E2E tests +/// or hard to assert due to the concurrent nature of the tests. +pub fn assert_expected_torrent_details(torrent: &TorrentDetails, expected_torrent: &TorrentDetails) { + let mut discrepancies = Vec::new(); + + let checks: Vec = vec![ + ("torrent_id", torrent.torrent_id == expected_torrent.torrent_id), + ("uploader", torrent.uploader == expected_torrent.uploader), + ("info_hash", torrent.info_hash == expected_torrent.info_hash), + ("title", torrent.title == expected_torrent.title), + ("description", torrent.description == expected_torrent.description), + ( + "category.category_id", + torrent.category.category_id == expected_torrent.category.category_id, + ), + ("category.name", torrent.category.name == expected_torrent.category.name), + ("file_size", torrent.file_size == expected_torrent.file_size), + ("seeders", torrent.seeders == expected_torrent.seeders), + ("leechers", torrent.leechers == expected_torrent.leechers), + ("files", torrent.files == expected_torrent.files), + ("trackers", torrent.trackers == expected_torrent.trackers), + ("magnet_link", torrent.magnet_link == expected_torrent.magnet_link), + ("tags", torrent.tags == expected_torrent.tags), + ("name", torrent.name == expected_torrent.name), + ]; + + for (field_name, equals) in &checks { + if !equals { + discrepancies.push((*field_name).to_string()); + } + } + + let error_message = format!("left:\n{torrent:#?}\nright:\n{expected_torrent:#?}\ndiscrepancies: {discrepancies:#?}"); + + assert!(discrepancies.is_empty(), "{}", error_message); +} diff --git a/tests/common/contexts/torrent/file.rs b/tests/common/contexts/torrent/file.rs new file mode 100644 index 00000000..b5f58339 --- /dev/null +++ b/tests/common/contexts/torrent/file.rs @@ -0,0 +1,83 @@ +//! Utility functions for torrent files. +//! +//! It's a wrapper around the [imdl](https://crates.io/crates/imdl) program. +use std::path::{Path, PathBuf}; +use std::process::Command; + +use serde::Deserialize; +use which::which; + +/// Attributes parsed from a torrent file. +#[derive(Deserialize, Clone, Debug)] +pub struct TorrentFileInfo { + pub name: String, + pub comment: Option, + pub creation_date: Option, + pub created_by: Option, + pub source: Option, + pub info_hash: String, + pub torrent_size: u64, + pub content_size: u64, + pub private: bool, + pub tracker: Option, + pub announce_list: Option>>, + pub update_url: Option, + pub dht_nodes: Option>, + pub piece_size: u64, + pub piece_count: u64, + pub file_count: u64, + pub files: Vec, +} + +/// Creates a torrent file for the given file. +/// This function requires the `imdl` program to be installed. +/// +pub fn create_torrent(dir: &Path, file_name: &str) -> PathBuf { + guard_that_torrent_edition_cmd_is_installed(); + + let input_file_path = Path::new(dir).join(file_name); + let output_file_path = Path::new(dir).join(format!("{file_name}.torrent")); + + let _output = Command::new("imdl") + .args(["torrent", "create", "--show"]) + .args(["--input", &format!("{}", input_file_path.to_string_lossy())]) + .args(["--output", &format!("{}", output_file_path.to_string_lossy())]) + .output() + .unwrap_or_else(|_| panic!("failed to create torrent file: {:?}", output_file_path.to_string_lossy())); + + //io::stdout().write_all(&output.stdout).unwrap(); + //io::stderr().write_all(&output.stderr).unwrap(); + + output_file_path +} + +/// Parses torrent file. +/// This function requires the `imdl` program to be installed. +/// +pub fn parse_torrent(torrent_file_path: &Path) -> TorrentFileInfo { + guard_that_torrent_edition_cmd_is_installed(); + + let output = Command::new("imdl") + .args(["torrent", "show", "--json", &torrent_file_path.to_string_lossy()]) + .output() + .unwrap_or_else(|_| panic!("failed to open torrent file: {:?}", &torrent_file_path.to_string_lossy())); + + match std::str::from_utf8(&output.stdout) { + Ok(parsed_torrent_json) => { + let res: TorrentFileInfo = serde_json::from_str(parsed_torrent_json).unwrap(); + res + } + Err(err) => panic!("got non UTF-8 data from 'imdl'. Error: {err}"), + } +} + +/// It panics if the `imdl` console application is not installed. +fn guard_that_torrent_edition_cmd_is_installed() { + const IMDL_BINARY: &str = "imdl"; + match which(IMDL_BINARY) { + Ok(_path) => (), + Err(err) => { + panic!("Can't create torrent with \"imdl\": {err}. Please install it with: `cargo install imdl`"); + } + } +} diff --git a/tests/common/contexts/torrent/fixtures.rs b/tests/common/contexts/torrent/fixtures.rs new file mode 100644 index 00000000..e60b6089 --- /dev/null +++ b/tests/common/contexts/torrent/fixtures.rs @@ -0,0 +1,235 @@ +use std::fs::File; +use std::io::Write; +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use serde_bytes::ByteBuf; +use tempfile::{tempdir, TempDir}; +use torrust_index::services::hasher::sha1; +use torrust_index::utils::hex::into_bytes; +use uuid::Uuid; + +use super::file::{create_torrent, parse_torrent, TorrentFileInfo}; +use super::forms::{BinaryFile, UploadTorrentMultipartForm}; +use super::requests::InfoHash; +use super::responses::Id; +use crate::common::contexts::category::fixtures::software_category_name; + +/// Information about a torrent that is going to be added to the index. +#[derive(Clone)] +pub struct TorrentIndexInfo { + // Metadata + pub title: String, + pub description: String, + pub category: String, + pub tags: Option>, + // Other fields + pub torrent_file: BinaryFile, + pub name: String, +} + +impl From for UploadTorrentMultipartForm { + fn from(indexed_torrent: TorrentIndexInfo) -> UploadTorrentMultipartForm { + UploadTorrentMultipartForm { + title: indexed_torrent.title, + description: indexed_torrent.description, + category: indexed_torrent.category, + torrent_file: indexed_torrent.torrent_file, + } + } +} + +/// Torrent that has been added to the index. +pub struct TorrentListedInIndex { + pub torrent_id: Id, + pub title: String, + pub description: String, + pub category: String, + pub torrent_file: BinaryFile, +} + +impl TorrentListedInIndex { + pub fn from(torrent_to_index: TorrentIndexInfo, torrent_id: Id) -> Self { + Self { + torrent_id, + title: torrent_to_index.title, + description: torrent_to_index.description, + category: torrent_to_index.category, + torrent_file: torrent_to_index.torrent_file, + } + } +} + +#[derive(Clone)] +pub struct TestTorrent { + /// Parsed info from torrent file. + pub file_info: TorrentFileInfo, + /// Torrent info needed to add the torrent to the index. + pub index_info: TorrentIndexInfo, +} + +impl TestTorrent { + pub fn random() -> Self { + let temp_dir = temp_dir(); + + let torrents_dir_path = temp_dir.path().to_owned(); + + // Random ID to identify all the torrent related entities: files, fields, ... + // That makes easier to debug the tests outputs. + let id = Uuid::new_v4(); + + // Create a random torrent file + let torrent_path = random_torrent_file(&torrents_dir_path, &id); + + Self::build_from_torrent_file(&id, &torrent_path) + } + + pub fn with_custom_info_dict_field(id: Uuid, file_contents: &str, custom: &str) -> Self { + let temp_dir = temp_dir(); + + let torrents_dir_path = temp_dir.path().to_owned(); + + // Create the torrent in memory + let torrent = TestTorrentWithCustomInfoField::with_contents(id, file_contents, custom); + + // Bencode the torrent + let torrent_data = TestTorrentWithCustomInfoField::encode(&torrent).unwrap(); + + // Torrent temporary file path + let contents_filename = contents_file_name(&id); + let torrent_filename = format!("{contents_filename}.torrent"); + let torrent_path = torrents_dir_path.join(torrent_filename.clone()); + + // Write the torrent file to the temporary file + let mut file = File::create(torrent_path.clone()).unwrap(); + file.write_all(&torrent_data).unwrap(); + + Self::build_from_torrent_file(&id, &torrent_path) + } + + pub fn file_info_hash(&self) -> InfoHash { + self.file_info.info_hash.clone() + } + + /// It builds a `TestTorrent` from a torrent file. + fn build_from_torrent_file(id: &Uuid, torrent_path: &Path) -> TestTorrent { + // Load torrent binary file + let torrent_file = BinaryFile::from_file_at_path(torrent_path); + + // Load torrent file metadata + let torrent_info = parse_torrent(torrent_path); + + let torrent_to_index = TorrentIndexInfo { + title: format!("title-{id}"), + description: format!("description-{id}"), + category: software_category_name(), + // todo: include one tag in test torrents. Implementation is not + // trivial because the tag must exist in the database and there are + // no predefined tags in the database like there are for categories. + tags: None, + torrent_file, + name: contents_file_name(id), + }; + + TestTorrent { + file_info: torrent_info, + index_info: torrent_to_index, + } + } +} + +pub fn random_torrent() -> TestTorrent { + TestTorrent::random() +} + +pub fn random_torrent_file(dir: &Path, id: &Uuid) -> PathBuf { + // Create random text file + let file_name = random_txt_file(dir, id); + + // Create torrent file for the text file + create_torrent(dir, &file_name) +} + +pub fn random_txt_file(dir: &Path, id: &Uuid) -> String { + // Sample file name + let file_name = contents_file_name(id); + + // Sample file path + let file_path = dir.join(file_name.clone()); + + // Write sample text to the temporary file + let mut file = File::create(file_path).unwrap(); + file.write_all(id.as_bytes()).unwrap(); + + file_name +} + +fn contents_file_name(id: &Uuid) -> String { + format!("file-{id}.txt") +} + +pub fn temp_dir() -> TempDir { + tempdir().unwrap() +} + +/// A minimal torrent file with a custom field in the info dict. +/// +/// ```json +/// { +/// "info": { +/// "length": 602515, +/// "name": "mandelbrot_set_01", +/// "piece length": 32768, +/// "pieces": "8A 88 32 BE ED 05 5F AA C4 AF 4A 90 4B 9A BF 0D EC 83 42 1C 73 39 05 B8 D6 20 2C 1B D1 8A 53 28 1F B5 D4 23 0A 23 C8 DB AC C4 E6 6B 16 12 08 C7 A4 AD 64 45 70 ED 91 0D F1 38 E7 DF 0C 1A D0 C9 23 27 7C D1 F9 D4 E5 A1 5F F5 E5 A0 E4 9E FB B1 43 F5 4B AD 0E D4 9D CB 49 F7 E6 7B BA 30 5F AF F9 88 56 FB 45 9A B4 95 92 3E 2C 7F DA A6 D3 82 E7 63 A3 BB 4B 28 F3 57 C7 CB 7D 8C 06 E3 46 AB D7 E8 8E 8A 8C 9F C7 E6 C5 C5 64 82 ED 47 BB 2A F1 B7 3F A5 3C 5B 9C AF 43 EC 2A E1 08 68 9A 49 C8 BF 1B 07 AD BE E9 2D 7E BE 9C 18 7F 4C A1 97 0E 54 3A 18 94 0E 60 8D 5C 69 0E 41 46 0D 3C 9A 37 F6 81 62 4F 95 C0 73 92 CA 9A D5 A9 89 AC 8B 85 12 53 0B FB E2 96 26 3E 26 A6 5B 70 53 48 65 F3 6C 27 0F 6B BD 1C EE EB 1A 9D 5F 77 A8 D8 AF D8 14 82 4A E0 B4 62 BC F1 A5 F5 F2 C7 60 F8 38 C8 5B 0B A9 07 DD 86 FA C0 7B F0 26 D7 D1 9A 42 C3 1F 9F B9 59 83 10 62 41 E9 06 3C 6D A1 19 75 01 57 25 9E B7 FE DF 91 04 D4 51 4B 6D 44 02 8D 31 8E 84 26 95 0F 30 31 F0 2C 16 39 BD 53 1D CF D3 5E 3E 41 A9 1E 14 3F 73 24 AC 5E 9E FC 4D C5 70 45 0F 45 8B 9B 52 E6 D0 26 47 8F 43 08 9E 2A 7C C5 92 D5 86 36 FE 48 E9 B8 86 84 92 23 49 5B EE C4 31 B2 1D 10 75 8E 4C 07 84 8F", +/// "custom": "custom03" +/// } +/// } +/// ``` +/// +/// Changing the value of the `custom` field will change the info-hash of the torrent. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct TestTorrentWithCustomInfoField { + pub info: InfoDictWithCustomField, +} + +/// A minimal torrent info dict with a custom field. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +pub struct InfoDictWithCustomField { + #[serde(default)] + pub length: i64, + #[serde(default)] + pub name: String, + #[serde(rename = "piece length")] + pub piece_length: i64, + #[serde(default)] + pub pieces: ByteBuf, + #[serde(default)] + pub custom: String, +} + +impl TestTorrentWithCustomInfoField { + pub fn with_contents(id: Uuid, file_contents: &str, custom: &str) -> Self { + let sha1_of_file_contents = sha1(file_contents); + let pieces = into_bytes(&sha1_of_file_contents).expect("sha1 of test torrent contents cannot be converted to bytes"); + + Self { + info: InfoDictWithCustomField { + length: i64::try_from(file_contents.len()).expect("file contents size in bytes cannot exceed i64::MAX"), + name: format!("file-{id}.txt"), + piece_length: 16384, + pieces: ByteBuf::from(pieces), + custom: custom.to_owned(), + }, + } + } + + pub fn encode(torrent: &Self) -> Result, serde_bencode::Error> { + match serde_bencode::to_bytes(torrent) { + Ok(bencode_bytes) => Ok(bencode_bytes), + Err(e) => { + eprintln!("{e:?}"); + Err(e) + } + } + } +} diff --git a/tests/common/contexts/torrent/forms.rs b/tests/common/contexts/torrent/forms.rs new file mode 100644 index 00000000..c6212611 --- /dev/null +++ b/tests/common/contexts/torrent/forms.rs @@ -0,0 +1,52 @@ +use std::fs; +use std::path::Path; + +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Serialize)] +pub struct UpdateTorrentFrom { + pub title: Option, + pub description: Option, + pub category: Option, + pub tags: Option>, +} + +use reqwest::multipart::Form; + +pub struct UploadTorrentMultipartForm { + pub title: String, + pub description: String, + pub category: String, + pub torrent_file: BinaryFile, +} + +#[derive(Clone)] +pub struct BinaryFile { + pub name: String, + pub contents: Vec, +} + +impl BinaryFile { + pub fn from_file_at_path(path: &Path) -> Self { + BinaryFile { + name: path.file_name().unwrap().to_owned().into_string().unwrap(), + contents: fs::read(path).unwrap(), + } + } +} + +impl From for Form { + fn from(form: UploadTorrentMultipartForm) -> Self { + Form::new() + .text("title", form.title) + .text("description", form.description) + .text("category", form.category) + .part( + "torrent", + reqwest::multipart::Part::bytes(form.torrent_file.contents) + .file_name(form.torrent_file.name) + .mime_str("application/x-bittorrent") + .unwrap(), + ) + } +} diff --git a/tests/common/contexts/torrent/mod.rs b/tests/common/contexts/torrent/mod.rs new file mode 100644 index 00000000..efe732e6 --- /dev/null +++ b/tests/common/contexts/torrent/mod.rs @@ -0,0 +1,6 @@ +pub mod asserts; +pub mod file; +pub mod fixtures; +pub mod forms; +pub mod requests; +pub mod responses; diff --git a/tests/common/contexts/torrent/requests.rs b/tests/common/contexts/torrent/requests.rs new file mode 100644 index 00000000..1d4ac583 --- /dev/null +++ b/tests/common/contexts/torrent/requests.rs @@ -0,0 +1 @@ +pub type InfoHash = String; diff --git a/tests/common/contexts/torrent/responses.rs b/tests/common/contexts/torrent/responses.rs new file mode 100644 index 00000000..f95d67ce --- /dev/null +++ b/tests/common/contexts/torrent/responses.rs @@ -0,0 +1,117 @@ +use serde::Deserialize; + +pub type Id = i64; +pub type CategoryId = i64; +pub type TagId = i64; +pub type UtcDateTime = String; // %Y-%m-%d %H:%M:%S + +#[derive(Deserialize, PartialEq, Debug)] +pub struct ErrorResponse { + pub error: String, +} + +#[derive(Deserialize)] +pub struct TorrentListResponse { + pub data: TorrentList, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct TorrentList { + pub total: u32, + pub results: Vec, +} + +impl TorrentList { + pub fn _contains(&self, torrent_id: Id) -> bool { + self.results.iter().any(|item| item.torrent_id == torrent_id) + } +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct ListItem { + pub torrent_id: i64, + pub uploader: String, + pub info_hash: String, + pub title: String, + pub description: Option, + pub category_id: i64, + pub date_uploaded: String, + pub file_size: i64, + pub seeders: i64, + pub leechers: i64, + pub name: String, + pub comment: Option, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct TorrentDetailsResponse { + pub data: TorrentDetails, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct TorrentDetails { + pub torrent_id: Id, + pub uploader: String, + pub info_hash: String, + pub title: String, + pub description: String, + pub category: Category, + pub upload_date: UtcDateTime, + pub file_size: u64, + pub seeders: u64, + pub leechers: u64, + pub files: Vec, + pub trackers: Vec, + pub magnet_link: String, + pub tags: Vec, + pub name: String, + pub comment: Option, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct Category { + pub category_id: CategoryId, + pub name: String, + pub num_torrents: u64, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct Tag { + pub tag_id: TagId, + pub name: String, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct File { + pub path: Vec, + pub length: u64, + pub md5sum: Option, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct UploadedTorrentResponse { + pub data: UploadedTorrent, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct UploadedTorrent { + pub torrent_id: Id, + pub info_hash: String, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct DeletedTorrentResponse { + pub data: DeletedTorrent, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct DeletedTorrent { + pub torrent_id: Id, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct UpdatedTorrentResponse { + pub data: UpdatedTorrent, +} + +pub type UpdatedTorrent = TorrentDetails; diff --git a/tests/common/contexts/user/asserts.rs b/tests/common/contexts/user/asserts.rs new file mode 100644 index 00000000..dfa5352b --- /dev/null +++ b/tests/common/contexts/user/asserts.rs @@ -0,0 +1,61 @@ +use super::forms::RegistrationForm; +use super::responses::LoggedInUserData; +use crate::common::asserts::assert_json_ok_response; +use crate::common::contexts::user::responses::{ + AddedUserResponse, BannedUserResponse, SuccessfulLoginResponse, TokenRenewalData, TokenRenewalResponse, TokenVerifiedResponse, +}; +use crate::common::responses::TextResponse; + +pub fn assert_added_user_response(response: &TextResponse) { + let _added_user_response: AddedUserResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a AddedUserResponse", response.body)); + assert_json_ok_response(response); +} + +pub fn assert_successful_login_response(response: &TextResponse, registered_user: &RegistrationForm) { + let successful_login_response: SuccessfulLoginResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a SuccessfulLoginResponse", response.body)); + + let logged_in_user = successful_login_response.data; + + assert_eq!(logged_in_user.username, registered_user.username); + + assert_json_ok_response(response); +} + +pub fn assert_token_verified_response(response: &TextResponse) { + let token_verified_response: TokenVerifiedResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a TokenVerifiedResponse", response.body)); + + assert_eq!(token_verified_response.data, "Token is valid."); + + assert_json_ok_response(response); +} + +pub fn assert_token_renewal_response(response: &TextResponse, logged_in_user: &LoggedInUserData) { + let token_renewal_response: TokenRenewalResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a TokenRenewalResponse", response.body)); + + assert_eq!( + token_renewal_response.data, + TokenRenewalData { + token: logged_in_user.token.clone(), + username: logged_in_user.username.clone(), + admin: logged_in_user.admin, + } + ); + + assert_json_ok_response(response); +} + +pub fn assert_banned_user_response(response: &TextResponse, registered_user: &RegistrationForm) { + let banned_user_response: BannedUserResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a BannedUserResponse", response.body)); + + assert_eq!( + banned_user_response.data, + format!("Banned user: {}", registered_user.username) + ); + + assert_json_ok_response(response); +} diff --git a/tests/common/contexts/user/fixtures.rs b/tests/common/contexts/user/fixtures.rs new file mode 100644 index 00000000..fea39e7f --- /dev/null +++ b/tests/common/contexts/user/fixtures.rs @@ -0,0 +1,18 @@ +use rand::Rng; + +use crate::common::contexts::user::forms::RegistrationForm; + +pub fn random_user_registration_form() -> RegistrationForm { + let user_id = random_user_id(); + RegistrationForm { + username: format!("username_{user_id}"), + email: Some(format!("email_{user_id}@email.com")), + password: "password".to_string(), + confirm_password: "password".to_string(), + } +} + +fn random_user_id() -> u64 { + let mut rng = rand::thread_rng(); + rng.gen_range(0..1_000_000) +} diff --git a/tests/common/contexts/user/forms.rs b/tests/common/contexts/user/forms.rs new file mode 100644 index 00000000..359252a8 --- /dev/null +++ b/tests/common/contexts/user/forms.rs @@ -0,0 +1,37 @@ +use serde::Serialize; + +#[derive(Clone, Serialize)] +pub struct RegistrationForm { + pub username: String, + pub email: Option, + pub password: String, + pub confirm_password: String, +} + +pub type RegisteredUser = RegistrationForm; + +#[derive(Serialize)] +pub struct LoginForm { + pub login: String, + pub password: String, +} + +#[derive(Serialize)] +pub struct TokenVerificationForm { + pub token: String, +} + +#[derive(Serialize)] +pub struct TokenRenewalForm { + pub token: String, +} + +pub struct Username { + pub value: String, +} + +impl Username { + pub fn new(value: String) -> Self { + Self { value } + } +} diff --git a/tests/common/contexts/user/mod.rs b/tests/common/contexts/user/mod.rs new file mode 100644 index 00000000..cfe5dd24 --- /dev/null +++ b/tests/common/contexts/user/mod.rs @@ -0,0 +1,4 @@ +pub mod asserts; +pub mod fixtures; +pub mod forms; +pub mod responses; diff --git a/tests/common/contexts/user/responses.rs b/tests/common/contexts/user/responses.rs new file mode 100644 index 00000000..1a9a3837 --- /dev/null +++ b/tests/common/contexts/user/responses.rs @@ -0,0 +1,45 @@ +use serde::Deserialize; + +#[derive(Deserialize, Debug)] +pub struct AddedUserResponse { + pub data: NewUserData, +} + +#[derive(Deserialize, Debug)] +pub struct NewUserData { + pub user_id: i64, +} + +#[derive(Deserialize, Debug)] +pub struct SuccessfulLoginResponse { + pub data: LoggedInUserData, +} + +#[derive(Deserialize, Debug)] +pub struct LoggedInUserData { + pub token: String, + pub username: String, + pub admin: bool, +} + +#[derive(Deserialize)] +pub struct TokenVerifiedResponse { + pub data: String, +} + +#[derive(Deserialize)] +pub struct BannedUserResponse { + pub data: String, +} + +#[derive(Deserialize)] +pub struct TokenRenewalResponse { + pub data: TokenRenewalData, +} + +#[derive(Deserialize, PartialEq, Debug)] +pub struct TokenRenewalData { + pub token: String, + pub username: String, + pub admin: bool, +} diff --git a/tests/common/http.rs b/tests/common/http.rs new file mode 100644 index 00000000..7bfb64ef --- /dev/null +++ b/tests/common/http.rs @@ -0,0 +1,54 @@ +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn with_params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs new file mode 100644 index 00000000..cbf6de10 --- /dev/null +++ b/tests/common/mod.rs @@ -0,0 +1,7 @@ +pub mod asserts; +pub mod client; +pub mod connection_info; +pub mod contexts; +pub mod http; +pub mod random; +pub mod responses; diff --git a/tests/common/random.rs b/tests/common/random.rs new file mode 100644 index 00000000..2133dcd2 --- /dev/null +++ b/tests/common/random.rs @@ -0,0 +1,10 @@ +//! Random data generators for testing. +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; + +/// Returns a random alphanumeric string of a certain size. +/// +/// It is useful for generating random names, IDs, etc for testing. +pub fn string(size: usize) -> String { + thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() +} diff --git a/tests/common/responses.rs b/tests/common/responses.rs new file mode 100644 index 00000000..2545a9e8 --- /dev/null +++ b/tests/common/responses.rs @@ -0,0 +1,70 @@ +use reqwest::Response as ReqwestResponse; + +#[derive(Debug)] +pub struct TextResponse { + pub status: u16, + pub content_type: Option, + pub body: String, +} + +impl TextResponse { + pub async fn from(response: ReqwestResponse) -> Self { + Self { + status: response.status().as_u16(), + content_type: response + .headers() + .get("content-type") + .map(|content_type| content_type.to_str().unwrap().to_owned()), + body: response.text().await.unwrap(), + } + } + + pub fn is_json_and_ok(&self) -> bool { + self.is_ok() && self.is_json() + } + + pub fn is_json(&self) -> bool { + if let Some(content_type) = &self.content_type { + return content_type == "application/json"; + } + false + } + + pub fn is_ok(&self) -> bool { + self.status == 200 + } +} + +#[derive(Debug)] +pub struct BinaryResponse { + pub status: u16, + pub content_type: Option, + pub bytes: Vec, +} + +impl BinaryResponse { + pub async fn from(response: ReqwestResponse) -> Self { + Self { + status: response.status().as_u16(), + content_type: response + .headers() + .get("content-type") + .map(|content_type| content_type.to_str().unwrap().to_owned()), + bytes: response.bytes().await.unwrap().to_vec(), + } + } + pub fn is_a_bit_torrent_file(&self) -> bool { + self.is_ok() && self.is_bittorrent_content_type() + } + + pub fn is_bittorrent_content_type(&self) -> bool { + if let Some(content_type) = &self.content_type { + return content_type == "application/x-bittorrent"; + } + false + } + + pub fn is_ok(&self) -> bool { + self.status == 200 + } +} diff --git a/tests/databases/mod.rs b/tests/databases/mod.rs deleted file mode 100644 index 66f90e92..00000000 --- a/tests/databases/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::future::Future; -use torrust_index_backend::databases::database::{connect_database, Database, DatabaseDriver}; - -mod mysql; -mod tests; -mod sqlite; - -// used to run tests with a clean database -async fn run_test<'a, T, F>(db_fn: T, db: &'a Box) - where - T: FnOnce(&'a Box) -> F + 'a, - F: Future -{ - // cleanup database before testing - assert!(db.delete_all_database_rows().await.is_ok()); - - // run test using clean database - db_fn(db).await; -} - -// runs all tests -pub async fn run_tests(db_driver: DatabaseDriver, db_path: &str) { - let db = connect_database(&db_driver, db_path).await; - - run_test(tests::it_can_add_a_user, &db).await; - run_test(tests::it_can_add_a_torrent_category, &db).await; - run_test(tests::it_can_add_a_torrent_and_tracker_stats_to_that_torrent, &db).await; -} - diff --git a/tests/databases/mysql.rs b/tests/databases/mysql.rs deleted file mode 100644 index d64ac1b3..00000000 --- a/tests/databases/mysql.rs +++ /dev/null @@ -1,11 +0,0 @@ -use torrust_index_backend::databases::database::{DatabaseDriver}; -use crate::databases::{run_tests}; - -const DATABASE_URL: &str = "mysql://root:password@localhost:3306/torrust-index_test"; - -#[tokio::test] -async fn run_mysql_tests() { - run_tests(DatabaseDriver::Mysql, DATABASE_URL).await; -} - - diff --git a/tests/databases/sqlite.rs b/tests/databases/sqlite.rs deleted file mode 100644 index 7aab5b1d..00000000 --- a/tests/databases/sqlite.rs +++ /dev/null @@ -1,11 +0,0 @@ -use torrust_index_backend::databases::database::{DatabaseDriver}; -use crate::databases::{run_tests}; - -const DATABASE_URL: &str = "sqlite::memory:"; - -#[tokio::test] -async fn run_sqlite_tests() { - run_tests(DatabaseDriver::Sqlite3, DATABASE_URL).await; -} - - diff --git a/tests/databases/tests.rs b/tests/databases/tests.rs deleted file mode 100644 index 0c33cba1..00000000 --- a/tests/databases/tests.rs +++ /dev/null @@ -1,144 +0,0 @@ -use serde_bytes::ByteBuf; -use torrust_index_backend::databases::database::{Database, DatabaseError}; -use torrust_index_backend::models::torrent::TorrentListing; -use torrust_index_backend::models::torrent_file::{TorrentInfo, Torrent}; -use torrust_index_backend::models::user::UserProfile; - -// test user options -const TEST_USER_USERNAME: &str = "luckythelab"; -const TEST_USER_EMAIL: &str = "lucky@labradormail.com"; -const TEST_USER_PASSWORD: &str = "imagoodboy"; - -// test category options -const TEST_CATEGORY_NAME: &str = "Labrador Retrievers"; - -// test torrent options -const TEST_TORRENT_TITLE: &str = "Picture of dog treat"; -const TEST_TORRENT_DESCRIPTION: &str = "This is a picture of a dog treat."; -const TEST_TORRENT_FILE_SIZE: i64 = 128_000; -const TEST_TORRENT_SEEDERS: i64 = 437; -const TEST_TORRENT_LEECHERS: i64 = 1289; - -async fn add_test_user(db: &Box) -> Result { - db.insert_user_and_get_id(TEST_USER_USERNAME, TEST_USER_EMAIL, TEST_USER_PASSWORD).await -} - -async fn add_test_torrent_category(db: &Box) -> Result { - db.insert_category_and_get_id(TEST_CATEGORY_NAME).await -} - -pub async fn it_can_add_a_user(db: &Box) { - let add_test_user_result = add_test_user(&db).await; - - assert!(add_test_user_result.is_ok()); - - let inserted_user_id = add_test_user_result.unwrap(); - - let get_user_profile_from_username_result = db.get_user_profile_from_username(TEST_USER_USERNAME).await; - - // verify that we can grab the newly inserted user's profile data - assert!(get_user_profile_from_username_result.is_ok()); - - let returned_user_profile = get_user_profile_from_username_result.unwrap(); - - // verify that the profile data is as we expect it to be - assert_eq!(returned_user_profile, UserProfile { - user_id: inserted_user_id, - username: TEST_USER_USERNAME.to_string(), - email: TEST_USER_EMAIL.to_string(), - email_verified: returned_user_profile.email_verified.clone(), - bio: returned_user_profile.bio.clone(), - avatar: returned_user_profile.avatar.clone() - }); -} - -pub async fn it_can_add_a_torrent_category(db: &Box) { - let add_test_torrent_category_result = add_test_torrent_category(&db).await; - - assert!(add_test_torrent_category_result.is_ok()); - - let get_category_from_name_result = db.get_category_from_name(TEST_CATEGORY_NAME).await; - - assert!(get_category_from_name_result.is_ok()); - - let category = get_category_from_name_result.unwrap(); - - assert_eq!(category.name, TEST_CATEGORY_NAME.to_string()); -} - -pub async fn it_can_add_a_torrent_and_tracker_stats_to_that_torrent(db: &Box) { - // set pre-conditions - let user_id = add_test_user(&db).await.expect("add_test_user failed."); - let torrent_category_id = add_test_torrent_category(&db).await.expect("add_test_torrent_category failed."); - - let torrent = Torrent { - info: TorrentInfo { - name: TEST_TORRENT_TITLE.to_string(), - pieces: Some(ByteBuf::from("1234567890123456789012345678901234567890".as_bytes())), - piece_length: 256000, - md5sum: None, - length: Some(TEST_TORRENT_FILE_SIZE), - files: None, - private: Some(1), - path: None, - root_hash: None - }, - announce: Some("https://tracker.dutchbits.nl/announce".to_string()), - nodes: None, - encoding: None, - httpseeds: None, - announce_list: None, - creation_date: None, - comment: None, - created_by: None - }; - - let insert_torrent_and_get_id_result = db.insert_torrent_and_get_id( - &torrent, - user_id, - torrent_category_id, - TEST_TORRENT_TITLE, - TEST_TORRENT_DESCRIPTION - ).await; - - assert!(insert_torrent_and_get_id_result.is_ok()); - - let torrent_id = insert_torrent_and_get_id_result.unwrap(); - - // add tracker stats to the torrent - let insert_torrent_tracker_stats_result = db.update_tracker_info( - torrent_id, - "https://tracker.torrust.com", - TEST_TORRENT_SEEDERS, - TEST_TORRENT_LEECHERS).await; - - assert!(insert_torrent_tracker_stats_result.is_ok()); - - let get_torrent_listing_from_id_result = db.get_torrent_listing_from_id(torrent_id).await; - - assert!(get_torrent_listing_from_id_result.is_ok()); - - let returned_torrent_listing = get_torrent_listing_from_id_result.unwrap(); - - assert_eq!(returned_torrent_listing, TorrentListing { - torrent_id, - uploader: TEST_USER_USERNAME.to_string(), - info_hash: returned_torrent_listing.info_hash.to_string(), - title: TEST_TORRENT_TITLE.to_string(), - description: Some(TEST_TORRENT_DESCRIPTION.to_string()), - category_id: torrent_category_id, - date_uploaded: returned_torrent_listing.date_uploaded.to_string(), - file_size: TEST_TORRENT_FILE_SIZE, - seeders: TEST_TORRENT_SEEDERS, - leechers: TEST_TORRENT_LEECHERS - }); - - // check if we get the same info hash on the retrieved torrent from database - let get_torrent_from_id_result = db.get_torrent_from_id(torrent_id).await; - - assert!(get_torrent_from_id_result.is_ok()); - - let returned_torrent = get_torrent_from_id_result.unwrap(); - - assert_eq!(returned_torrent.info_hash(), torrent.info_hash()); -} diff --git a/tests/docker-compose.yml b/tests/docker-compose.yml deleted file mode 100644 index 4a5501bd..00000000 --- a/tests/docker-compose.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: "3.9" - -services: - - mysql_8: - image: mysql:8.0.30 - ports: - - "3306:3306" - environment: - MYSQL_ROOT_HOST: '%' - MYSQL_ROOT_PASSWORD: password - MYSQL_DATABASE: torrust-index_test diff --git a/tests/e2e/config.rs b/tests/e2e/config.rs new file mode 100644 index 00000000..747e0a05 --- /dev/null +++ b/tests/e2e/config.rs @@ -0,0 +1,62 @@ +//! Initialize configuration for the shared E2E tests environment from a +//! config file `config.toml` or env var. +//! +//! All environment variables are prefixed with `TORRUST_INDEX_E2E_`. + +// Environment variables + +use torrust_index::config::{Configuration, Info}; + +/// The whole `index.toml` file content. It has priority over the config file. +/// Even if the file is not on the default path. +const ENV_VAR_CONFIG: &str = "TORRUST_INDEX_E2E_CONFIG"; + +/// Token needed to communicate with the Torrust Tracker +const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_INDEX_E2E_TRACKER_API_TOKEN"; + +/// The `index.toml` file location. +pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_INDEX_E2E_PATH_CONFIG"; + +// Default values +pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/index.development.sqlite3.toml"; + +/// If present, E2E tests will run against a shared instance of the server +pub const ENV_VAR_INDEX_SHARED: &str = "TORRUST_INDEX_E2E_SHARED"; + +/// It loads the application configuration from the environment. +/// +/// There are two methods to inject the configuration: +/// +/// 1. By using a config file: `index.toml`. +/// 2. Environment variable: `TORRUST_INDEX_E2E_CONFIG`. The variable contains the same contents as the `index.toml` file. +/// +/// Environment variable has priority over the config file. +/// +/// Refer to the [configuration documentation](https://docs.rs/torrust-index-configuration) for the configuration options. +/// +/// # Panics +/// +/// Will panic if it can't load the configuration from either +/// `./index.toml` file or the env var `TORRUST_INDEX_CONFIG`. +#[must_use] +pub fn initialize_configuration() -> Configuration { + let info = Info::new( + ENV_VAR_CONFIG.to_string(), + ENV_VAR_PATH_CONFIG.to_string(), + DEFAULT_PATH_CONFIG.to_string(), + ENV_VAR_API_ADMIN_TOKEN.to_string(), + ) + .unwrap(); + + Configuration::load(&info).unwrap() +} + +#[cfg(test)] +mod tests { + use torrust_index::bootstrap::config::initialize_configuration; + + #[test] + fn it_should_load_with_default_config() { + drop(initialize_configuration()); + } +} diff --git a/tests/e2e/environment.rs b/tests/e2e/environment.rs new file mode 100644 index 00000000..73652725 --- /dev/null +++ b/tests/e2e/environment.rs @@ -0,0 +1,171 @@ +use std::env; + +use torrust_index::databases::database; +use torrust_index::web::api::Version; + +use super::config::{initialize_configuration, ENV_VAR_INDEX_SHARED}; +use crate::common::contexts::settings::Settings; +use crate::environments::{isolated, shared}; + +enum State { + Stopped, + RunningShared, + RunningIsolated, +} + +/// Test environment for E2E tests. It's a wrapper around the shared or isolated +/// test environment. +/// +/// Shared test environment: +/// +/// - It's a out-of-process test environment. +/// - It has to be started before running the tests. +/// - All tests run against the same instance of the server. +/// +/// Isolated test environment: +/// +/// - It's an in-process test environment. +/// - It's started automatically when the test starts. +/// - Each test runs against a different instance of the server. +#[derive(Default)] +pub struct TestEnv { + /// Copy of the settings when the test environment was started. + starting_settings: Option, + /// Shared independent test environment if we start using it. + shared: Option, + /// Isolated test environment if we start an isolate test environment. + isolated: Option, +} + +impl TestEnv { + // code-review: consider extracting a trait for test environments. The state + // could be only `Running` or `Stopped`, and we could have a single + // attribute with the current started test environment (`Option`). + + pub fn new() -> Self { + Self::default() + } + + pub fn is_shared(&self) -> bool { + self.shared.is_some() + } + + pub fn is_isolated(&self) -> bool { + self.isolated.is_some() + } + + /// It starts the test environment. It can be a shared or isolated test + /// environment depending on the value of the `ENV_VAR_E2E_SHARED` env var. + pub async fn start(&mut self, api_version: Version) { + let e2e_shared = ENV_VAR_INDEX_SHARED; // bool + + if let Ok(_e2e_test_env_is_shared) = env::var(e2e_shared) { + // Using the shared test env. + let shared_env = shared::TestEnv::running().await; + + self.shared = Some(shared_env); + self.starting_settings = self.server_settings_for_shared_env().await; + } else { + // Using an isolated test env. + let isolated_env = isolated::TestEnv::running(api_version).await; + + self.isolated = Some(isolated_env); + self.starting_settings = self.server_settings_for_isolated_env(); + } + } + + /// Some test requires the real tracker to be running, so they can only + /// be run in shared mode. + pub fn provides_a_tracker(&self) -> bool { + self.is_shared() + } + + /// Returns the server starting settings if the servers was already started. + /// We do not know the settings until we start the server. + pub fn server_settings(&self) -> Option { + self.starting_settings.as_ref().cloned() + } + + /// Provides the API server socket address. + /// For example: `localhost:3001`. + pub fn server_socket_addr(&self) -> Option { + match self.state() { + State::RunningShared => self.shared.as_ref().unwrap().server_socket_addr(), + State::RunningIsolated => self.isolated.as_ref().unwrap().server_socket_addr(), + State::Stopped => None, + } + } + + /// Provides a database connect URL to connect to the database. For example: + /// + /// `sqlite://storage/database/torrust_index_e2e_testing.db?mode=rwc`. + /// + /// It's used to run SQL queries against the database needed for some tests. + pub fn database_connect_url(&self) -> Option { + let internal_connect_url = self + .starting_settings + .as_ref() + .map(|settings| settings.database.connect_url.clone()); + + match self.state() { + State::RunningShared => { + if let Some(db_path) = internal_connect_url { + let maybe_db_driver = database::get_driver(&db_path); + + return match maybe_db_driver { + Ok(db_driver) => match db_driver { + database::Driver::Sqlite3 => Some(db_path), + database::Driver::Mysql => Some(Self::overwrite_mysql_host(&db_path, "localhost")), + }, + Err(_) => None, + }; + } + None + } + State::RunningIsolated => internal_connect_url, + State::Stopped => None, + } + } + + /// It overrides the "Host" in a `SQLx` database connection URL. For example: + /// + /// For: + /// + /// `mysql://root:root_secret_password@mysql:3306/torrust_index_e2e_testing`. + /// + /// It changes the `mysql` host name to `localhost`: + /// + /// `mysql://root:root_secret_password@localhost:3306/torrust_index_e2e_testing`. + /// + /// For E2E tests, we use docker compose, internally the index connects to + /// the database using the "mysql" host, which is the docker compose service + /// name, but tests connects directly to the localhost since the `MySQL` + /// is exposed to the host. + fn overwrite_mysql_host(db_path: &str, new_host: &str) -> String { + db_path.replace("@mysql:", &format!("@{new_host}:")) + } + + fn state(&self) -> State { + if self.is_shared() { + return State::RunningShared; + } + + if self.is_isolated() { + return State::RunningIsolated; + } + + State::Stopped + } + + fn server_settings_for_isolated_env(&self) -> Option { + self.isolated + .as_ref() + .map(|env| Settings::from(env.app_starter.server_configuration())) + } + + async fn server_settings_for_shared_env(&self) -> Option { + let configuration = initialize_configuration(); + let settings = configuration.settings.read().await; + Some(Settings::from(settings.clone())) + } +} diff --git a/tests/e2e/mod.rs b/tests/e2e/mod.rs new file mode 100644 index 00000000..07c53151 --- /dev/null +++ b/tests/e2e/mod.rs @@ -0,0 +1,41 @@ +//! End-to-end tests +//! +//! These test can be executed against an out-of-process server (shared) or +//! against an in-process server (isolated). +//! +//! If you want to run the tests against an out-of-process server, you need to +//! set the environment variable `TORRUST_IDX_BACK_E2E_SHARED` to `true`. +//! +//! > **NOTICE**: The server must be running before running the tests. The +//! server url is hardcoded to `http://localhost:3001` for now. We are planning +//! to make it configurable in the future via a environment variable. +//! +//! ```text +//! TORRUST_IDX_BACK_E2E_SHARED=true cargo test +//! ``` +//! +//! If you want to run the tests against an isolated server, you need to execute +//! the following command: +//! +//! ```text +//! cargo test +//! ``` +//! +//! > **NOTICE**: Some tests require the real tracker to be running, so they +//! can only be run in shared mode until we implement a mock for the +//! `torrust_index::tracker::TrackerService`. +//! +//! You may have errors like `Too many open files (os error 24)`. If so, you +//! need to increase the limit of open files for the current user. You can do +//! it by executing the following command (on Ubuntu): +//! +//! ```text +//! ulimit -n 4096 +//! ``` +//! +//! You can also make that change permanent, please refer to your OS +//! documentation. See for more +//! information. +pub mod config; +pub mod environment; +pub mod web; diff --git a/tests/e2e/web/api/mod.rs b/tests/e2e/web/api/mod.rs new file mode 100644 index 00000000..a3a6d96c --- /dev/null +++ b/tests/e2e/web/api/mod.rs @@ -0,0 +1 @@ +pub mod v1; diff --git a/tests/e2e/web/api/v1/contexts/about/contract.rs b/tests/e2e/web/api/v1/contexts/about/contract.rs new file mode 100644 index 00000000..41906f26 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/about/contract.rs @@ -0,0 +1,31 @@ +//! API contract for `about` context. + +use torrust_index::web::api; + +use crate::common::asserts::{assert_response_title, assert_text_ok}; +use crate::common::client::Client; +use crate::e2e::environment::TestEnv; + +#[tokio::test] +async fn it_should_load_the_about_page_with_information_about_the_api() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.about().await; + + assert_text_ok(&response); + assert_response_title(&response, "About"); +} + +#[tokio::test] +async fn it_should_load_the_license_page_at_the_api_entrypoint() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.license().await; + + assert_text_ok(&response); + assert_response_title(&response, "Licensing"); +} diff --git a/tests/e2e/web/api/v1/contexts/about/mod.rs b/tests/e2e/web/api/v1/contexts/about/mod.rs new file mode 100644 index 00000000..2943dbb5 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/about/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/e2e/web/api/v1/contexts/category/contract.rs b/tests/e2e/web/api/v1/contexts/category/contract.rs new file mode 100644 index 00000000..b4775bd2 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/category/contract.rs @@ -0,0 +1,198 @@ +//! API contract for `category` context. + +use torrust_index::web::api; + +use crate::common::asserts::assert_json_ok_response; +use crate::common::client::Client; +use crate::common::contexts::category::asserts::{assert_added_category_response, assert_deleted_category_response}; +use crate::common::contexts::category::fixtures::random_category_name; +use crate::common::contexts::category::forms::{AddCategoryForm, DeleteCategoryForm}; +use crate::common::contexts::category::responses::ListResponse; +use crate::e2e::environment::TestEnv; +use crate::e2e::web::api::v1::contexts::category::steps::{add_category, add_random_category}; +use crate::e2e::web::api::v1::contexts::user::steps::{new_logged_in_admin, new_logged_in_user}; + +#[tokio::test] +async fn it_should_return_an_empty_category_list_when_there_are_no_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.get_categories().await; + + assert_json_ok_response(&response); +} + +#[tokio::test] +async fn it_should_return_a_category_list() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + add_random_category(&env).await; + + let response = client.get_categories().await; + + let res: ListResponse = serde_json::from_str(&response.body).unwrap(); + + // There should be at least the category we added. + // Since this is an E2E test and it could be run in a shared test env, + // there might be more categories. + assert!(res.data.len() > 1); + if let Some(content_type) = &response.content_type { + assert_eq!(content_type, "application/json"); + } + assert_eq!(response.status, 200); +} + +#[tokio::test] +async fn it_should_not_allow_adding_a_new_category_to_unauthenticated_users() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client + .add_category(AddCategoryForm { + name: "CATEGORY NAME".to_string(), + icon: None, + }) + .await; + + assert_eq!(response.status, 401); +} + +#[tokio::test] +async fn it_should_not_allow_adding_a_new_category_to_non_admins() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_non_admin = new_logged_in_user(&env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_non_admin.token); + + let response = client + .add_category(AddCategoryForm { + name: "CATEGORY NAME".to_string(), + icon: None, + }) + .await; + + assert_eq!(response.status, 403); +} + +#[tokio::test] +async fn it_should_allow_admins_to_add_new_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let category_name = random_category_name(); + + let response = client + .add_category(AddCategoryForm { + name: category_name.to_string(), + icon: None, + }) + .await; + + assert_added_category_response(&response, &category_name); +} + +#[tokio::test] +async fn it_should_not_allow_adding_empty_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let invalid_category_names = vec![String::new(), " ".to_string()]; + + for invalid_name in invalid_category_names { + let response = client + .add_category(AddCategoryForm { + name: invalid_name, + icon: None, + }) + .await; + + assert_eq!(response.status, 400); + } +} + +#[tokio::test] +async fn it_should_not_allow_adding_duplicated_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let added_category_name = add_random_category(&env).await; + + // Try to add the same category again + let response = add_category(&added_category_name, &env).await; + + assert_eq!(response.status, 400); +} + +#[tokio::test] +async fn it_should_allow_admins_to_delete_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let added_category_name = add_random_category(&env).await; + + let response = client + .delete_category(DeleteCategoryForm { + name: added_category_name.to_string(), + icon: None, + }) + .await; + + assert_deleted_category_response(&response, &added_category_name); +} + +#[tokio::test] +async fn it_should_not_allow_non_admins_to_delete_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let added_category_name = add_random_category(&env).await; + + let logged_in_non_admin = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_non_admin.token); + + let response = client + .delete_category(DeleteCategoryForm { + name: added_category_name.to_string(), + icon: None, + }) + .await; + + assert_eq!(response.status, 403); +} + +#[tokio::test] +async fn it_should_not_allow_guests_to_delete_categories() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let added_category_name = add_random_category(&env).await; + + let response = client + .delete_category(DeleteCategoryForm { + name: added_category_name.to_string(), + icon: None, + }) + .await; + + assert_eq!(response.status, 401); +} diff --git a/tests/e2e/web/api/v1/contexts/category/mod.rs b/tests/e2e/web/api/v1/contexts/category/mod.rs new file mode 100644 index 00000000..2001efb8 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/category/mod.rs @@ -0,0 +1,2 @@ +pub mod contract; +pub mod steps; diff --git a/tests/e2e/web/api/v1/contexts/category/steps.rs b/tests/e2e/web/api/v1/contexts/category/steps.rs new file mode 100644 index 00000000..cca5b8ae --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/category/steps.rs @@ -0,0 +1,32 @@ +use crate::common::client::Client; +use crate::common::contexts::category::fixtures::random_category_name; +use crate::common::contexts::category::forms::AddCategoryForm; +use crate::common::contexts::category::responses::AddedCategoryResponse; +use crate::common::responses::TextResponse; +use crate::e2e::environment::TestEnv; +use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_admin; + +/// Add a random category and return its name. +pub async fn add_random_category(env: &TestEnv) -> String { + let category_name = random_category_name(); + + let response = add_category(&category_name, env).await; + + let res: AddedCategoryResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a AddedCategoryResponse", response.body)); + + res.data +} + +pub async fn add_category(category_name: &str, env: &TestEnv) -> TextResponse { + let logged_in_admin = new_logged_in_admin(env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + client + .add_category(AddCategoryForm { + name: category_name.to_string(), + icon: None, + }) + .await +} diff --git a/tests/e2e/web/api/v1/contexts/mod.rs b/tests/e2e/web/api/v1/contexts/mod.rs new file mode 100644 index 00000000..797781be --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/mod.rs @@ -0,0 +1,8 @@ +pub mod about; +pub mod category; +pub mod proxy; +pub mod root; +pub mod settings; +pub mod tag; +pub mod torrent; +pub mod user; diff --git a/tests/e2e/web/api/v1/contexts/proxy/contract.rs b/tests/e2e/web/api/v1/contexts/proxy/contract.rs new file mode 100644 index 00000000..0b63dfc4 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/proxy/contract.rs @@ -0,0 +1,3 @@ +//! API contract for `proxy` context. + +// todo diff --git a/tests/e2e/web/api/v1/contexts/proxy/mod.rs b/tests/e2e/web/api/v1/contexts/proxy/mod.rs new file mode 100644 index 00000000..2943dbb5 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/proxy/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/e2e/web/api/v1/contexts/root/contract.rs b/tests/e2e/web/api/v1/contexts/root/contract.rs new file mode 100644 index 00000000..a8c5c04b --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/root/contract.rs @@ -0,0 +1,20 @@ +//! API contract for `root` context. + +use torrust_index::web::api; + +use crate::common::asserts::{assert_response_title, assert_text_ok}; +use crate::common::client::Client; +use crate::e2e::environment::TestEnv; + +#[tokio::test] +async fn it_should_load_the_about_page_at_the_api_entrypoint() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.root().await; + + assert_text_ok(&response); + assert_response_title(&response, "About"); +} diff --git a/tests/e2e/web/api/v1/contexts/root/mod.rs b/tests/e2e/web/api/v1/contexts/root/mod.rs new file mode 100644 index 00000000..2943dbb5 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/root/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/e2e/web/api/v1/contexts/settings/contract.rs b/tests/e2e/web/api/v1/contexts/settings/contract.rs new file mode 100644 index 00000000..fbef5659 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/settings/contract.rs @@ -0,0 +1,67 @@ +//! API contract for `settings` context. + +use torrust_index::web::api; + +use crate::common::asserts::assert_json_ok_response; +use crate::common::client::Client; +use crate::common::contexts::settings::responses::{AllSettingsResponse, Public, PublicSettingsResponse, SiteNameResponse}; +use crate::e2e::environment::TestEnv; +use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_admin; + +#[tokio::test] +async fn it_should_allow_guests_to_get_the_public_settings() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.get_public_settings().await; + + let res: PublicSettingsResponse = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a PublicSettingsResponse", response.body)); + + assert_eq!( + res.data, + Public { + website_name: env.server_settings().unwrap().website.name, + tracker_url: env.server_settings().unwrap().tracker.url, + tracker_mode: env.server_settings().unwrap().tracker.mode, + email_on_signup: env.server_settings().unwrap().auth.email_on_signup, + } + ); + + assert_json_ok_response(&response); +} + +#[tokio::test] +async fn it_should_allow_guests_to_get_the_site_name() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.get_site_name().await; + + let res: SiteNameResponse = serde_json::from_str(&response.body).unwrap(); + + assert_eq!(res.data, "Torrust"); + + assert_json_ok_response(&response); +} + +#[tokio::test] +async fn it_should_allow_admins_to_get_all_the_settings() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let response = client.get_settings().await; + + let res: AllSettingsResponse = serde_json::from_str(&response.body).unwrap(); + + assert_eq!(res.data, env.server_settings().unwrap()); + + assert_json_ok_response(&response); +} diff --git a/tests/e2e/web/api/v1/contexts/settings/mod.rs b/tests/e2e/web/api/v1/contexts/settings/mod.rs new file mode 100644 index 00000000..2943dbb5 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/settings/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/e2e/web/api/v1/contexts/tag/contract.rs b/tests/e2e/web/api/v1/contexts/tag/contract.rs new file mode 100644 index 00000000..77771d49 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/tag/contract.rs @@ -0,0 +1,178 @@ +//! API contract for `tag` context. + +use torrust_index::web::api; + +use crate::common::asserts::assert_json_ok_response; +use crate::common::client::Client; +use crate::common::contexts::tag::asserts::{assert_added_tag_response, assert_deleted_tag_response}; +use crate::common::contexts::tag::fixtures::random_tag_name; +use crate::common::contexts::tag::forms::{AddTagForm, DeleteTagForm}; +use crate::common::contexts::tag::responses::ListResponse; +use crate::e2e::environment::TestEnv; +use crate::e2e::web::api::v1::contexts::tag::steps::{add_random_tag, add_tag}; +use crate::e2e::web::api::v1::contexts::user::steps::{new_logged_in_admin, new_logged_in_user}; + +#[tokio::test] +async fn it_should_return_an_empty_tag_list_when_there_are_no_tags() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client.get_tags().await; + + assert_json_ok_response(&response); +} + +#[tokio::test] +async fn it_should_return_a_tag_list() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + // Add a tag + let tag_name = random_tag_name(); + let response = add_tag(&tag_name, &env).await; + assert_eq!(response.status, 200); + + let response = client.get_tags().await; + + let res: ListResponse = serde_json::from_str(&response.body).unwrap(); + + // There should be at least the tag we added. + // Since this is an E2E test that could be executed in a shred env, + // there might be more tags. + assert!(!res.data.is_empty()); + if let Some(content_type) = &response.content_type { + assert_eq!(content_type, "application/json"); + } + assert_eq!(response.status, 200); +} + +#[tokio::test] +async fn it_should_not_allow_adding_a_new_tag_to_unauthenticated_users() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let response = client + .add_tag(AddTagForm { + name: "TAG NAME".to_string(), + }) + .await; + + assert_eq!(response.status, 401); +} + +#[tokio::test] +async fn it_should_not_allow_adding_a_new_tag_to_non_admins() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_non_admin = new_logged_in_user(&env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_non_admin.token); + + let response = client + .add_tag(AddTagForm { + name: "TAG NAME".to_string(), + }) + .await; + + assert_eq!(response.status, 403); +} + +#[tokio::test] +async fn it_should_allow_admins_to_add_new_tags() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let tag_name = random_tag_name(); + + let response = client + .add_tag(AddTagForm { + name: tag_name.to_string(), + }) + .await; + + assert_added_tag_response(&response, &tag_name); +} + +#[tokio::test] +async fn it_should_not_allow_adding_duplicated_tags() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + // Add a tag + let random_tag_name = random_tag_name(); + let response = add_tag(&random_tag_name, &env).await; + assert_eq!(response.status, 200); + + // Try to add the same tag again + let response = add_tag(&random_tag_name, &env).await; + + assert_eq!(response.status, 400); +} + +#[tokio::test] +async fn it_should_not_allow_adding_a_tag_with_an_empty_name() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let invalid_tag_names = vec![String::new(), " ".to_string()]; + + for invalid_name in invalid_tag_names { + let response = add_tag(&invalid_name, &env).await; + + assert_eq!(response.status, 400); + } +} + +#[tokio::test] +async fn it_should_allow_admins_to_delete_tags() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let (tag_id, _tag_name) = add_random_tag(&env).await; + + let response = client.delete_tag(DeleteTagForm { tag_id }).await; + + assert_deleted_tag_response(&response, tag_id); +} + +#[tokio::test] +async fn it_should_not_allow_non_admins_to_delete_tags() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_non_admin = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_non_admin.token); + + let (tag_id, _tag_name) = add_random_tag(&env).await; + + let response = client.delete_tag(DeleteTagForm { tag_id }).await; + + assert_eq!(response.status, 403); +} + +#[tokio::test] +async fn it_should_not_allow_guests_to_delete_tags() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let (tag_id, _tag_name) = add_random_tag(&env).await; + + let response = client.delete_tag(DeleteTagForm { tag_id }).await; + + assert_eq!(response.status, 401); +} diff --git a/tests/e2e/web/api/v1/contexts/tag/mod.rs b/tests/e2e/web/api/v1/contexts/tag/mod.rs new file mode 100644 index 00000000..2001efb8 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/tag/mod.rs @@ -0,0 +1,2 @@ +pub mod contract; +pub mod steps; diff --git a/tests/e2e/web/api/v1/contexts/tag/steps.rs b/tests/e2e/web/api/v1/contexts/tag/steps.rs new file mode 100644 index 00000000..0e59d0ec --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/tag/steps.rs @@ -0,0 +1,39 @@ +use crate::common::client::Client; +use crate::common::contexts::tag::fixtures::random_tag_name; +use crate::common::contexts::tag::forms::AddTagForm; +use crate::common::contexts::tag::responses::ListResponse; +use crate::common::responses::TextResponse; +use crate::e2e::environment::TestEnv; +use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_admin; + +pub async fn add_random_tag(env: &TestEnv) -> (i64, String) { + let tag_name = random_tag_name(); + + add_tag(&tag_name, env).await; + + let tag_id = get_tag_id(&tag_name, env).await; + + (tag_id, tag_name) +} + +pub async fn add_tag(tag_name: &str, env: &TestEnv) -> TextResponse { + let logged_in_admin = new_logged_in_admin(env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + client + .add_tag(AddTagForm { + name: tag_name.to_string(), + }) + .await +} + +pub async fn get_tag_id(tag_name: &str, env: &TestEnv) -> i64 { + let logged_in_admin = new_logged_in_admin(env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let response = client.get_tags().await; + + let res: ListResponse = serde_json::from_str(&response.body).unwrap(); + + res.find_tag_id(tag_name) +} diff --git a/tests/e2e/web/api/v1/contexts/torrent/asserts.rs b/tests/e2e/web/api/v1/contexts/torrent/asserts.rs new file mode 100644 index 00000000..376f9c82 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/torrent/asserts.rs @@ -0,0 +1,81 @@ +use std::sync::Arc; + +use torrust_index::databases::database; +use torrust_index::models::torrent_file::Torrent; +use torrust_index::models::tracker_key::TrackerKey; + +use crate::common::contexts::user::responses::LoggedInUserData; +use crate::e2e::environment::TestEnv; + +/// The index does not generate exactly the same torrent that was uploaded. +/// +/// The index stores the canonical version of the uploaded torrent. So we need +/// to update the expected torrent to match the one generated by the index. +pub async fn canonical_torrent_for( + mut uploaded_torrent: Torrent, + env: &TestEnv, + downloader: &Option, +) -> Torrent { + let tracker_url = env.server_settings().unwrap().tracker.url.to_string(); + + let tracker_key = match downloader { + Some(logged_in_user) => get_user_tracker_key(logged_in_user, env).await, + None => None, + }; + + uploaded_torrent.announce = Some(build_announce_url(&tracker_url, &tracker_key)); + uploaded_torrent.announce_list = Some(build_announce_list(&tracker_url, &tracker_key)); + + // These fields are not persisted in the database yet. + // See https://github.com/torrust/torrust-index/issues/284 + // They are ignore when the user uploads the torrent. So the stored + // canonical torrent does not contain them. + uploaded_torrent.encoding = None; + uploaded_torrent.creation_date = None; + uploaded_torrent.created_by = None; + + uploaded_torrent +} + +pub async fn get_user_tracker_key(logged_in_user: &LoggedInUserData, env: &TestEnv) -> Option { + // code-review: could we add a new endpoint to get the user's tracker key? + // `/user/keys/recent` or `/user/keys/latest + // We could use that endpoint to get the user's tracker key instead of + // querying the database. + + let database = Arc::new( + database::connect(&env.database_connect_url().unwrap()) + .await + .expect("database connection to be established."), + ); + + // Get the logged-in user id + let user_profile = database + .get_user_profile_from_username(&logged_in_user.username) + .await + .unwrap(); + + // Get the user's tracker key + let tracker_key = database + .get_user_tracker_key(user_profile.user_id) + .await + .expect("user to have a tracker key"); + + Some(tracker_key) +} + +pub fn build_announce_url(tracker_url: &str, tracker_key: &Option) -> String { + if let Some(key) = &tracker_key { + format!("{tracker_url}/{}", key.key) + } else { + tracker_url.to_string() + } +} + +fn build_announce_list(tracker_url: &str, tracker_key: &Option) -> Vec> { + if let Some(key) = &tracker_key { + vec![vec![format!("{tracker_url}/{}", key.key)], vec![format!("{tracker_url}")]] + } else { + vec![vec![format!("{tracker_url}")]] + } +} diff --git a/tests/e2e/web/api/v1/contexts/torrent/contract.rs b/tests/e2e/web/api/v1/contexts/torrent/contract.rs new file mode 100644 index 00000000..1ebf8f70 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/torrent/contract.rs @@ -0,0 +1,1008 @@ +//! API contract for `torrent` context. + +/* +todo: + +Delete torrent: + +- After deleting a torrent, it should be removed from the tracker whitelist + +Get torrent info: + +- The torrent info: + - should contain the magnet link with the trackers from the torrent file + - should contain realtime seeders and leechers from the tracker +*/ + +mod for_guests { + + use torrust_index::utils::parse_torrent::decode_torrent; + use torrust_index::web::api; + use uuid::Uuid; + + use crate::common::client::Client; + use crate::common::contexts::category::fixtures::software_predefined_category_id; + use crate::common::contexts::torrent::asserts::assert_expected_torrent_details; + use crate::common::contexts::torrent::fixtures::{random_torrent, TestTorrent}; + use crate::common::contexts::torrent::forms::UploadTorrentMultipartForm; + use crate::common::contexts::torrent::requests::InfoHash; + use crate::common::contexts::torrent::responses::{ + Category, File, TorrentDetails, TorrentDetailsResponse, TorrentListResponse, + }; + use crate::common::http::{Query, QueryParam}; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::torrent::steps::{upload_random_torrent_to_index, upload_test_torrent}; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn it_should_allow_guests_to_get_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let uploader = new_logged_in_user(&env).await; + let (_test_torrent, _indexed_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let response = client.get_torrents(Query::empty()).await; + + let torrent_list_response: TorrentListResponse = serde_json::from_str(&response.body).unwrap(); + + assert!(torrent_list_response.data.total > 0); + assert!(response.is_json_and_ok()); + } + + #[tokio::test] + async fn it_should_allow_to_get_torrents_with_pagination() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + + // Given we insert two torrents + let (_test_torrent, _indexed_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + let (_test_torrent, _indexed_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + // When we request only one torrent per page + let response = client + .get_torrents(Query::with_params([QueryParam::new("page_size", "1")].to_vec())) + .await; + + let torrent_list_response: TorrentListResponse = serde_json::from_str(&response.body).unwrap(); + + // Then we should have only one torrent per page + assert_eq!(torrent_list_response.data.results.len(), 1); + assert!(response.is_json_and_ok()); + } + + #[tokio::test] + async fn it_should_allow_to_limit_the_number_of_torrents_per_request() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + + let max_torrent_page_size = 30; + + // Given we insert one torrent more than the page size limit + for _ in 0..max_torrent_page_size { + let (_test_torrent, _indexed_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + // When we request more torrents than the page size limit + let response = client + .get_torrents(Query::with_params( + [QueryParam::new("page_size", &format!("{}", (max_torrent_page_size + 1)))].to_vec(), + )) + .await; + + let torrent_list_response: TorrentListResponse = serde_json::from_str(&response.body).unwrap(); + + // Then we should get only the page size limit + assert_eq!(torrent_list_response.data.results.len(), max_torrent_page_size); + assert!(response.is_json_and_ok()); + } + + #[tokio::test] + async fn it_should_return_a_default_amount_of_torrents_per_request_if_no_page_size_is_provided() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + + let default_torrent_page_size = 10; + + // Given we insert one torrent more than the default page size + for _ in 0..default_torrent_page_size { + let (_test_torrent, _indexed_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + // When we request more torrents than the default page size limit + let response = client.get_torrents(Query::empty()).await; + + let torrent_list_response: TorrentListResponse = serde_json::from_str(&response.body).unwrap(); + + // Then we should get only the default number of torrents per page + assert_eq!(torrent_list_response.data.results.len(), default_torrent_page_size); + assert!(response.is_json_and_ok()); + } + + #[tokio::test] + async fn it_should_allow_guests_to_get_torrent_details_searching_by_info_hash() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let uploader = new_logged_in_user(&env).await; + let (test_torrent, uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let response = client.get_torrent(&test_torrent.file_info_hash()).await; + + let torrent_details_response: TorrentDetailsResponse = serde_json::from_str(&response.body).unwrap(); + + let tracker_url = env.server_settings().unwrap().tracker.url; + let encoded_tracker_url = urlencoding::encode(&tracker_url); + + let expected_torrent = TorrentDetails { + torrent_id: uploaded_torrent.torrent_id, + uploader: uploader.username, + info_hash: test_torrent.file_info.info_hash.to_lowercase(), + title: test_torrent.index_info.title.clone(), + description: test_torrent.index_info.description, + category: Category { + category_id: software_predefined_category_id(), + name: test_torrent.index_info.category, + num_torrents: 19, // Ignored in assertion + }, + upload_date: "2023-04-27 07:56:08".to_string(), // Ignored in assertion + file_size: test_torrent.file_info.content_size, + seeders: 0, + leechers: 0, + files: vec![File { + path: vec![test_torrent.file_info.files[0].clone()], + // Using one file torrent for testing: content_size = first file size + length: test_torrent.file_info.content_size, + md5sum: None, + }], + // code-review: why is this duplicated? It seems that is adding the + // same tracker twice because first ti adds all trackers and then + // it adds the tracker with the personal announce url, if the user + // is logged in. If the user is not logged in, it adds the default + // tracker again, and it ends up with two trackers. + trackers: vec![tracker_url.clone(), tracker_url.clone()], + magnet_link: format!( + // cspell:disable-next-line + "magnet:?xt=urn:btih:{}&dn={}&tr={}&tr={}", + test_torrent.file_info.info_hash.to_lowercase(), + urlencoding::encode(&test_torrent.index_info.title), + encoded_tracker_url, + encoded_tracker_url + ), + tags: vec![], + name: test_torrent.index_info.name.clone(), + comment: test_torrent.file_info.comment.clone(), + }; + + assert_expected_torrent_details(&torrent_details_response.data, &expected_torrent); + assert!(response.is_json_and_ok()); + } + + #[tokio::test] + async fn it_should_allow_guests_to_find_torrent_details_using_a_non_canonical_info_hash() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + // Sample data needed to build two torrents with the same canonical info-hash. + // Those torrents belong to the same Canonical Infohash Group. + let id = Uuid::new_v4(); + let title = format!("title-{id}"); + let file_contents = "data".to_string(); + + // Upload the first torrent + let mut first_torrent = TestTorrent::with_custom_info_dict_field(id, &file_contents, "custom 01"); + first_torrent.index_info.title = title.clone(); + + let first_torrent_canonical_info_hash = upload_test_torrent(&client, &first_torrent) + .await + .expect("first torrent should be uploaded"); + + // Upload the second torrent with the same canonical info-hash + let mut second_torrent = TestTorrent::with_custom_info_dict_field(id, &file_contents, "custom 02"); + second_torrent.index_info.title = format!("{title}-clone"); + + let _result = upload_test_torrent(&client, &second_torrent).await; + + // Get torrent details using the non-canonical info-hash (second torrent info-hash) + let response = client.get_torrent(&second_torrent.file_info_hash()).await; + let torrent_details_response: TorrentDetailsResponse = serde_json::from_str(&response.body).unwrap(); + + // The returned torrent info should be the same as the first torrent + assert_eq!(response.status, 200); + assert_eq!( + torrent_details_response.data.info_hash, + first_torrent_canonical_info_hash.to_hex_string() + ); + } + + mod it_should_allow_guests_to_download_a_torrent_file_searching_by_info_hash { + + use torrust_index::utils::parse_torrent::{calculate_info_hash, decode_torrent}; + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::torrent::asserts::canonical_torrent_for; + use crate::e2e::web::api::v1::contexts::torrent::steps::upload_random_torrent_to_index; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn returning_a_bittorrent_binary_ok_response() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + let uploader = new_logged_in_user(&env).await; + + // Upload + let (test_torrent, _torrent_listed_in_index) = upload_random_torrent_to_index(&uploader, &env).await; + + // Download + let response = client.download_torrent(&test_torrent.file_info_hash()).await; + + assert!(response.is_a_bit_torrent_file()); + } + + #[tokio::test] + async fn the_downloaded_torrent_should_keep_the_same_info_hash_if_the_torrent_does_not_have_non_standard_fields_in_the_info_dict( + ) { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + let uploader = new_logged_in_user(&env).await; + + // Upload + let (test_torrent, _torrent_listed_in_index) = upload_random_torrent_to_index(&uploader, &env).await; + + // Download + let response = client.download_torrent(&test_torrent.file_info_hash()).await; + + let downloaded_torrent_info_hash = + calculate_info_hash(&response.bytes).expect("failed to calculate info-hash of the downloaded torrent"); + + assert_eq!( + downloaded_torrent_info_hash.to_hex_string(), + test_torrent.file_info_hash(), + "downloaded torrent info-hash does not match uploaded torrent info-hash" + ); + } + + #[tokio::test] + async fn the_downloaded_torrent_should_be_the_canonical_version_of_the_uploaded_one() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + let uploader = new_logged_in_user(&env).await; + + // Upload + let (test_torrent, _torrent_listed_in_index) = upload_random_torrent_to_index(&uploader, &env).await; + + let uploaded_torrent = + decode_torrent(&test_torrent.index_info.torrent_file.contents).expect("could not decode uploaded torrent"); + + // Download + let response = client.download_torrent(&test_torrent.file_info_hash()).await; + + let downloaded_torrent = decode_torrent(&response.bytes).expect("could not decode downloaded torrent"); + + let expected_downloaded_torrent = canonical_torrent_for(uploaded_torrent, &env, &None).await; + + assert_eq!(downloaded_torrent, expected_downloaded_torrent); + } + } + + #[tokio::test] + async fn it_should_allow_guests_to_download_a_torrent_using_a_non_canonical_info_hash() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + // Sample data needed to build two torrents with the same canonical info-hash. + // Those torrents belong to the same Canonical Infohash Group. + let id = Uuid::new_v4(); + let title = format!("title-{id}"); + let file_contents = "data".to_string(); + + // Upload the first torrent + let mut first_torrent = TestTorrent::with_custom_info_dict_field(id, &file_contents, "custom 01"); + first_torrent.index_info.title = title.clone(); + + let first_torrent_canonical_info_hash = upload_test_torrent(&client, &first_torrent) + .await + .expect("first torrent should be uploaded"); + + // Upload the second torrent with the same canonical info-hash + let mut second_torrent = TestTorrent::with_custom_info_dict_field(id, &file_contents, "custom 02"); + second_torrent.index_info.title = format!("{title}-clone"); + + let _result = upload_test_torrent(&client, &second_torrent).await; + + // Download the torrent using the non-canonical info-hash (second torrent info-hash) + let response = client.download_torrent(&second_torrent.file_info_hash()).await; + + let torrent = decode_torrent(&response.bytes).expect("could not decode downloaded torrent"); + + // The returned torrent info-hash should be the same as the first torrent + assert_eq!(response.status, 200); + assert_eq!( + torrent.canonical_info_hash_hex(), + first_torrent_canonical_info_hash.to_hex_string() + ); + } + + #[tokio::test] + async fn it_should_return_a_not_found_response_trying_to_get_the_torrent_info_for_a_non_existing_torrent() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let non_existing_info_hash: InfoHash = "443c7602b4fde83d1154d6d9da48808418b181b6".to_string(); + + let response = client.get_torrent(&non_existing_info_hash).await; + + assert_eq!(response.status, 404); + } + + #[tokio::test] + async fn it_should_return_a_not_found_trying_to_download_a_non_existing_torrent() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let non_existing_info_hash: InfoHash = "443c7602b4fde83d1154d6d9da48808418b181b6".to_string(); + + let response = client.download_torrent(&non_existing_info_hash).await; + + assert_eq!(response.status, 404); + } + + #[tokio::test] + async fn it_should_not_allow_guests_to_upload_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let test_torrent = random_torrent(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 401); + } + + #[tokio::test] + async fn it_should_not_allow_guests_to_delete_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let uploader = new_logged_in_user(&env).await; + let (test_torrent, _uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let response = client.delete_torrent(&test_torrent.file_info_hash()).await; + + assert_eq!(response.status, 401); + } +} + +mod for_authenticated_users { + + use torrust_index::utils::parse_torrent::decode_torrent; + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::torrent::asserts::{build_announce_url, get_user_tracker_key}; + use crate::e2e::web::api::v1::contexts::torrent::steps::upload_random_torrent_to_index; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + mod uploading_a_torrent { + + use torrust_index::web::api; + use uuid::Uuid; + + use crate::common::asserts::assert_json_error_response; + use crate::common::client::Client; + use crate::common::contexts::torrent::fixtures::{random_torrent, TestTorrent}; + use crate::common::contexts::torrent::forms::UploadTorrentMultipartForm; + use crate::common::contexts::torrent::responses::UploadedTorrentResponse; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn it_should_allow_authenticated_users_to_upload_new_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let test_torrent = random_torrent(); + let info_hash = test_torrent.file_info_hash().clone(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + let uploaded_torrent_response: UploadedTorrentResponse = serde_json::from_str(&response.body).unwrap(); + + assert_eq!( + uploaded_torrent_response.data.info_hash.to_lowercase(), + info_hash.to_lowercase() + ); + assert!(response.is_json_and_ok()); + } + + mod it_should_guard_that_torrent_metadata { + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::torrent::fixtures::random_torrent; + use crate::common::contexts::torrent::forms::UploadTorrentMultipartForm; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn contains_a_non_empty_category_name() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + test_torrent.index_info.category = String::new(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + + #[tokio::test] + async fn contains_a_non_empty_title() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + test_torrent.index_info.title = String::new(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + + #[tokio::test] + async fn title_has_at_least_3_chars() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + test_torrent.index_info.title = "12".to_string(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + } + + mod it_should_guard_that_the_torrent_file { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::torrent::fixtures::random_torrent; + use crate::common::contexts::torrent::forms::UploadTorrentMultipartForm; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn contains_a_bencoded_dictionary_with_the_info_key_in_order_to_calculate_the_original_info_hash() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + // Make the random torrent invalid by changing the bytes of the torrent file + let minimal_bencoded_value = b"de".to_vec(); + test_torrent.index_info.torrent_file.contents = minimal_bencoded_value; + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + + #[tokio::test] + async fn contains_a_valid_metainfo_file() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + // Make the random torrent invalid by changing the bytes of the torrent file. + // It's a valid bencoded format but an invalid torrent. It contains + // a `info` otherwise the test to validate the `info` key would fail. + // cspell:disable-next-line + let minimal_bencoded_value_with_info_key = b"d4:infod6:custom6:customee".to_vec(); + test_torrent.index_info.torrent_file.contents = minimal_bencoded_value_with_info_key; + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + + #[tokio::test] + async fn pieces_key_has_a_length_that_is_a_multiple_of_20() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + // cspell:disable-next-line + let torrent_with_19_pieces = b"d4:infod6:lengthi2e4:name42:torrent-with-invalid-pieces-key-length.txt12:piece lengthi16384e6:pieces19:\x3F\x78\x68\x50\xE3\x87\x55\x0F\xDA\xB8\x36\xED\x7E\x6D\xC8\x81\xDE\x23\x00ee"; + test_torrent.index_info.torrent_file.contents = torrent_with_19_pieces.to_vec(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + } + + #[tokio::test] + async fn it_should_not_allow_uploading_a_torrent_with_a_non_existing_category() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let mut test_torrent = random_torrent(); + + test_torrent.index_info.category = "non-existing-category".to_string(); + + let form: UploadTorrentMultipartForm = test_torrent.index_info.into(); + + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + + #[tokio::test] + async fn it_should_not_allow_uploading_a_torrent_with_a_title_that_already_exists() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + // Upload the first torrent + let first_torrent = random_torrent(); + let first_torrent_title = first_torrent.index_info.title.clone(); + let form: UploadTorrentMultipartForm = first_torrent.index_info.into(); + let _response = client.upload_torrent(form.into()).await; + + // Upload the second torrent with the same title as the first one + let mut second_torrent = random_torrent(); + second_torrent.index_info.title = first_torrent_title; + let form: UploadTorrentMultipartForm = second_torrent.index_info.into(); + let response = client.upload_torrent(form.into()).await; + + assert_json_error_response(&response, "This torrent title has already been used."); + } + + #[tokio::test] + async fn it_should_not_allow_uploading_a_torrent_with_a_info_hash_that_already_exists() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + // Upload the first torrent + let first_torrent = random_torrent(); + let mut first_torrent_clone = first_torrent.clone(); + let first_torrent_title = first_torrent.index_info.title.clone(); + let form: UploadTorrentMultipartForm = first_torrent.index_info.into(); + let _response = client.upload_torrent(form.into()).await; + + // Upload the second torrent with the same info-hash as the first one. + // We need to change the title otherwise the torrent will be rejected + // because of the duplicate title. + first_torrent_clone.index_info.title = format!("{first_torrent_title}-clone"); + let form: UploadTorrentMultipartForm = first_torrent_clone.index_info.into(); + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + + #[tokio::test] + async fn it_should_not_allow_uploading_a_torrent_whose_canonical_info_hash_already_exists() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let id1 = Uuid::new_v4(); + + // Upload the first torrent + let first_torrent = TestTorrent::with_custom_info_dict_field(id1, "data", "custom 01"); + let first_torrent_title = first_torrent.index_info.title.clone(); + let form: UploadTorrentMultipartForm = first_torrent.index_info.into(); + let _response = client.upload_torrent(form.into()).await; + + // Upload the second torrent with the same canonical info-hash as the first one. + // We need to change the title otherwise the torrent will be rejected + // because of the duplicate title. + let mut torrent_with_the_same_canonical_info_hash = + TestTorrent::with_custom_info_dict_field(id1, "data", "custom 02"); + torrent_with_the_same_canonical_info_hash.index_info.title = format!("{first_torrent_title}-clone"); + let form: UploadTorrentMultipartForm = torrent_with_the_same_canonical_info_hash.index_info.into(); + let response = client.upload_torrent(form.into()).await; + + assert_eq!(response.status, 400); + } + } + + #[tokio::test] + async fn it_should_allow_authenticated_users_to_download_a_torrent_with_a_personal_announce_url() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + // Given a previously uploaded torrent + let uploader = new_logged_in_user(&env).await; + let (test_torrent, _torrent_listed_in_index) = upload_random_torrent_to_index(&uploader, &env).await; + + // And a logged in user who is going to download the torrent + let downloader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &downloader.token); + + // When the user downloads the torrent + let response = client.download_torrent(&test_torrent.file_info_hash()).await; + + let torrent = decode_torrent(&response.bytes).expect("could not decode downloaded torrent"); + + // Then the torrent should have the personal announce URL + let tracker_key = get_user_tracker_key(&downloader, &env) + .await + .expect("uploader should have a valid tracker key"); + + let tracker_url = env.server_settings().unwrap().tracker.url; + + assert_eq!( + torrent.announce.unwrap(), + build_announce_url(&tracker_url, &Some(tracker_key)) + ); + } + + mod and_non_admins { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::torrent::forms::UpdateTorrentFrom; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::torrent::steps::upload_random_torrent_to_index; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn it_should_not_allow_non_admins_to_delete_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let (test_torrent, _uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let response = client.delete_torrent(&test_torrent.file_info_hash()).await; + + assert_eq!(response.status, 403); + } + + #[tokio::test] + async fn it_should_not_allow_non_admin_users_to_update_someone_elses_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + // Given a users uploads a torrent + let uploader = new_logged_in_user(&env).await; + let (test_torrent, _uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + // Then another non admin user should not be able to update the torrent + let not_the_uploader = new_logged_in_user(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), ¬_the_uploader.token); + + let new_title = format!("{}-new-title", test_torrent.index_info.title); + let new_description = format!("{}-new-description", test_torrent.index_info.description); + + let response = client + .update_torrent( + &test_torrent.file_info_hash(), + UpdateTorrentFrom { + title: Some(new_title.clone()), + description: Some(new_description.clone()), + category: None, + tags: None, + }, + ) + .await; + + assert_eq!(response.status, 403); + } + } + + mod and_torrent_owners { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::torrent::forms::UpdateTorrentFrom; + use crate::common::contexts::torrent::responses::UpdatedTorrentResponse; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::torrent::steps::upload_random_torrent_to_index; + use crate::e2e::web::api::v1::contexts::user::steps::new_logged_in_user; + + #[tokio::test] + async fn it_should_allow_torrent_owners_to_update_their_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let (test_torrent, _uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let new_title = format!("{}-new-title", test_torrent.index_info.title); + let new_description = format!("{}-new-description", test_torrent.index_info.description); + + let response = client + .update_torrent( + &test_torrent.file_info_hash(), + UpdateTorrentFrom { + title: Some(new_title.clone()), + description: Some(new_description.clone()), + category: None, + tags: None, + }, + ) + .await; + + let updated_torrent_response: UpdatedTorrentResponse = serde_json::from_str(&response.body).unwrap(); + + let torrent = updated_torrent_response.data; + + assert_eq!(torrent.title, new_title); + assert_eq!(torrent.description, new_description); + assert!(response.is_json_and_ok()); + } + } + + mod and_admins { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::torrent::forms::UpdateTorrentFrom; + use crate::common::contexts::torrent::responses::{DeletedTorrentResponse, UpdatedTorrentResponse}; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::torrent::steps::upload_random_torrent_to_index; + use crate::e2e::web::api::v1::contexts::user::steps::{new_logged_in_admin, new_logged_in_user}; + + #[tokio::test] + async fn it_should_allow_admins_to_delete_torrents_searching_by_info_hash() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let (test_torrent, uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &admin.token); + + let response = client.delete_torrent(&test_torrent.file_info_hash()).await; + + let deleted_torrent_response: DeletedTorrentResponse = serde_json::from_str(&response.body).unwrap(); + + assert_eq!(deleted_torrent_response.data.torrent_id, uploaded_torrent.torrent_id); + assert!(response.is_json_and_ok()); + } + + #[tokio::test] + async fn it_should_allow_admins_to_update_someone_elses_torrents() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + if !env.provides_a_tracker() { + println!("test skipped. It requires a tracker to be running."); + return; + } + + let uploader = new_logged_in_user(&env).await; + let (test_torrent, _uploaded_torrent) = upload_random_torrent_to_index(&uploader, &env).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let new_title = format!("{}-new-title", test_torrent.index_info.title); + let new_description = format!("{}-new-description", test_torrent.index_info.description); + + let response = client + .update_torrent( + &test_torrent.file_info_hash(), + UpdateTorrentFrom { + title: Some(new_title.clone()), + description: Some(new_description.clone()), + category: None, + tags: None, + }, + ) + .await; + + let updated_torrent_response: UpdatedTorrentResponse = serde_json::from_str(&response.body).unwrap(); + + let torrent = updated_torrent_response.data; + + assert_eq!(torrent.title, new_title); + assert_eq!(torrent.description, new_description); + assert!(response.is_json_and_ok()); + } + } +} diff --git a/tests/e2e/web/api/v1/contexts/torrent/mod.rs b/tests/e2e/web/api/v1/contexts/torrent/mod.rs new file mode 100644 index 00000000..c0126d77 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/torrent/mod.rs @@ -0,0 +1,3 @@ +pub mod asserts; +pub mod contract; +pub mod steps; diff --git a/tests/e2e/web/api/v1/contexts/torrent/steps.rs b/tests/e2e/web/api/v1/contexts/torrent/steps.rs new file mode 100644 index 00000000..56c7a648 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/torrent/steps.rs @@ -0,0 +1,59 @@ +use std::str::FromStr; + +use torrust_index::models::info_hash::InfoHash; +use torrust_index::web::api::v1::responses::ErrorResponseData; + +use crate::common::client::Client; +use crate::common::contexts::torrent::fixtures::{random_torrent, TestTorrent, TorrentIndexInfo, TorrentListedInIndex}; +use crate::common::contexts::torrent::forms::UploadTorrentMultipartForm; +use crate::common::contexts::torrent::responses::UploadedTorrentResponse; +use crate::common::contexts::user::responses::LoggedInUserData; +use crate::e2e::environment::TestEnv; + +/// Add a new random torrent to the index +pub async fn upload_random_torrent_to_index(uploader: &LoggedInUserData, env: &TestEnv) -> (TestTorrent, TorrentListedInIndex) { + let random_torrent = random_torrent(); + let indexed_torrent = upload_torrent(uploader, &random_torrent.index_info, env).await; + (random_torrent, indexed_torrent) +} + +/// Upload a torrent to the index +pub async fn upload_torrent(uploader: &LoggedInUserData, torrent: &TorrentIndexInfo, env: &TestEnv) -> TorrentListedInIndex { + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &uploader.token); + + let form: UploadTorrentMultipartForm = torrent.clone().into(); + + let response = client.upload_torrent(form.into()).await; + + let res = serde_json::from_str::(&response.body); + + if res.is_err() { + println!("Error deserializing response: {res:?}"); + } + + TorrentListedInIndex::from(torrent.clone(), res.unwrap().data.torrent_id) +} + +/// Upload a torrent to the index. +/// +/// # Errors +/// +/// Returns an `ErrorResponseData` if the response is not a 200. +pub async fn upload_test_torrent(client: &Client, test_torrent: &TestTorrent) -> Result { + let form: UploadTorrentMultipartForm = test_torrent.clone().index_info.into(); + let response = client.upload_torrent(form.into()).await; + + if response.status != 200 { + let error: ErrorResponseData = serde_json::from_str(&response.body) + .unwrap_or_else(|_| panic!("response {:#?} should be a ErrorResponseData", response.body)); + return Err(error); + } + + let uploaded_torrent_response: UploadedTorrentResponse = serde_json::from_str(&response.body).unwrap(); + let canonical_info_hash_hex = uploaded_torrent_response.data.info_hash.to_lowercase(); + + let canonical_info_hash = InfoHash::from_str(&canonical_info_hash_hex) + .unwrap_or_else(|_| panic!("Invalid info-hash in database: {canonical_info_hash_hex}")); + + Ok(canonical_info_hash) +} diff --git a/tests/e2e/web/api/v1/contexts/user/contract.rs b/tests/e2e/web/api/v1/contexts/user/contract.rs new file mode 100644 index 00000000..809a2cb9 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/user/contract.rs @@ -0,0 +1,176 @@ +//! API contract for `user` context. + +/* + +This test suite is not complete. It's just a starting point to show how to +write E2E tests. Anyway, the goal is not to fully cover all the app features +with E2E tests. The goal is to cover the most important features and to +demonstrate how to write E2E tests. Some important pending tests could be: + +todo: + +- It should allow renewing a token one week before it expires. +- It should allow verifying user registration via email. + +The first one requires to mock the time. Consider extracting the mod + into +an independent crate. + +The second one requires: +- To call the mailcatcher API to get the verification URL. +- To enable email verification in the configuration. +- To fix current tests to verify the email for newly created users. +- To find out which email is the one that contains the verification URL for a +given test. That maybe done using the email recipient if that's possible with +the mailcatcher API. + +*/ + +mod registration { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::user::asserts::assert_added_user_response; + use crate::common::contexts::user::fixtures::random_user_registration_form; + use crate::e2e::environment::TestEnv; + + #[tokio::test] + async fn it_should_allow_a_guest_user_to_register() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let form = random_user_registration_form(); + + let response = client.register_user(form).await; + + assert_added_user_response(&response); + } +} + +mod authentication { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::user::asserts::{ + assert_successful_login_response, assert_token_renewal_response, assert_token_verified_response, + }; + use crate::common::contexts::user::forms::{LoginForm, TokenRenewalForm, TokenVerificationForm}; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::user::steps::{new_logged_in_user, new_registered_user}; + + #[tokio::test] + async fn it_should_allow_a_registered_user_to_login() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let registered_user = new_registered_user(&env).await; + + let response = client + .login_user(LoginForm { + login: registered_user.username.clone(), + password: registered_user.password.clone(), + }) + .await; + + assert_successful_login_response(&response, ®istered_user); + } + + #[tokio::test] + async fn it_should_allow_a_logged_in_user_to_verify_an_authentication_token() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let logged_in_user = new_logged_in_user(&env).await; + + let response = client + .verify_token(TokenVerificationForm { + token: logged_in_user.token.clone(), + }) + .await; + + assert_token_verified_response(&response); + } + + #[tokio::test] + async fn it_should_not_allow_a_logged_in_user_to_renew_an_authentication_token_which_is_still_valid_for_more_than_one_week() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_user = new_logged_in_user(&env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_user.token); + + let response = client + .renew_token(TokenRenewalForm { + token: logged_in_user.token.clone(), + }) + .await; + + assert_token_renewal_response(&response, &logged_in_user); + } +} + +mod banned_user_list { + + use torrust_index::web::api; + + use crate::common::client::Client; + use crate::common::contexts::user::asserts::assert_banned_user_response; + use crate::common::contexts::user::forms::Username; + use crate::e2e::environment::TestEnv; + use crate::e2e::web::api::v1::contexts::user::steps::{new_logged_in_admin, new_logged_in_user, new_registered_user}; + + #[tokio::test] + async fn it_should_allow_an_admin_to_ban_a_user() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_in_admin = new_logged_in_admin(&env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_in_admin.token); + + let registered_user = new_registered_user(&env).await; + + let response = client.ban_user(Username::new(registered_user.username.clone())).await; + + assert_banned_user_response(&response, ®istered_user); + } + + #[tokio::test] + async fn it_should_not_allow_a_non_admin_to_ban_a_user() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let logged_non_admin = new_logged_in_user(&env).await; + + let client = Client::authenticated(&env.server_socket_addr().unwrap(), &logged_non_admin.token); + + let registered_user = new_registered_user(&env).await; + + let response = client.ban_user(Username::new(registered_user.username.clone())).await; + + assert_eq!(response.status, 403); + } + + #[tokio::test] + async fn it_should_not_allow_a_guest_to_ban_a_user() { + let mut env = TestEnv::new(); + env.start(api::Version::V1).await; + + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let registered_user = new_registered_user(&env).await; + + let response = client.ban_user(Username::new(registered_user.username.clone())).await; + + assert_eq!(response.status, 401); + } +} diff --git a/tests/e2e/web/api/v1/contexts/user/mod.rs b/tests/e2e/web/api/v1/contexts/user/mod.rs new file mode 100644 index 00000000..2001efb8 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/user/mod.rs @@ -0,0 +1,2 @@ +pub mod contract; +pub mod steps; diff --git a/tests/e2e/web/api/v1/contexts/user/steps.rs b/tests/e2e/web/api/v1/contexts/user/steps.rs new file mode 100644 index 00000000..ec23ff14 --- /dev/null +++ b/tests/e2e/web/api/v1/contexts/user/steps.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; + +use torrust_index::databases::database; + +use crate::common::client::Client; +use crate::common::contexts::user::fixtures::random_user_registration_form; +use crate::common::contexts::user::forms::{LoginForm, RegisteredUser}; +use crate::common::contexts::user::responses::{LoggedInUserData, SuccessfulLoginResponse}; +use crate::e2e::environment::TestEnv; + +pub async fn new_logged_in_admin(env: &TestEnv) -> LoggedInUserData { + let user = new_logged_in_user(env).await; + + let database = Arc::new( + database::connect(&env.database_connect_url().unwrap()) + .await + .expect("Database error."), + ); + + let user_profile = database + .get_user_profile_from_username(&user.username) + .await + .unwrap_or_else(|_| panic!("no user profile for the user: {user:#?}.")); + + database.grant_admin_role(user_profile.user_id).await.unwrap(); + + user +} + +pub async fn new_logged_in_user(env: &TestEnv) -> LoggedInUserData { + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let registered_user = new_registered_user(env).await; + + let response = client + .login_user(LoginForm { + login: registered_user.username.clone(), + password: registered_user.password.clone(), + }) + .await; + + let res: SuccessfulLoginResponse = serde_json::from_str(&response.body).unwrap(); + + let user = res.data; + + if !user.admin { + return user; + } + + // The first registered user is always an admin, so we need to register + // a second user to ge a non admin user. + + let second_registered_user = new_registered_user(env).await; + + let response = client + .login_user(LoginForm { + login: second_registered_user.username.clone(), + password: second_registered_user.password.clone(), + }) + .await; + + let res: SuccessfulLoginResponse = serde_json::from_str(&response.body).unwrap(); + + res.data +} + +pub async fn new_registered_user(env: &TestEnv) -> RegisteredUser { + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + + let form = random_user_registration_form(); + + let registered_user = form.clone(); + + let _response = client.register_user(form).await; + + registered_user +} diff --git a/tests/e2e/web/api/v1/mod.rs b/tests/e2e/web/api/v1/mod.rs new file mode 100644 index 00000000..0f9779b8 --- /dev/null +++ b/tests/e2e/web/api/v1/mod.rs @@ -0,0 +1 @@ +pub mod contexts; diff --git a/tests/e2e/web/mod.rs b/tests/e2e/web/mod.rs new file mode 100644 index 00000000..e5fdf85e --- /dev/null +++ b/tests/e2e/web/mod.rs @@ -0,0 +1 @@ +pub mod api; diff --git a/tests/environments/app_starter.rs b/tests/environments/app_starter.rs new file mode 100644 index 00000000..b21f80d5 --- /dev/null +++ b/tests/environments/app_starter.rs @@ -0,0 +1,135 @@ +use std::net::SocketAddr; + +use log::info; +use tokio::sync::{oneshot, RwLock}; +use tokio::task::JoinHandle; +use torrust_index::config::Configuration; +use torrust_index::web::api::Version; +use torrust_index::{app, config}; + +/// It launches the app and provides a way to stop it. +pub struct AppStarter { + configuration: config::TorrustIndex, + config_path: Option, + /// The application binary state (started or not): + /// - `None`: if the app is not started, + /// - `RunningState`: if the app was started. + running_state: Option, +} + +impl AppStarter { + #[must_use] + pub fn with_custom_configuration(configuration: config::TorrustIndex, config_path: Option) -> Self { + Self { + configuration, + config_path, + running_state: None, + } + } + + /// Starts the whole app with all its services. + /// + /// # Panics + /// + /// Will panic if the app was dropped after spawning it. + pub async fn start(&mut self, api_version: Version) { + let configuration = Configuration { + settings: RwLock::new(self.configuration.clone()), + config_path: self.config_path.clone(), + }; + + // Open a channel to communicate back with this function + let (tx, rx) = oneshot::channel::(); + + // Launch the app in a separate task + let app_handle = tokio::spawn(async move { + let app = app::run(configuration, &api_version).await; + + info!("Application started. API server listening on {}", app.api_socket_addr); + + // Send the socket address back to the main thread + tx.send(AppStartedMessage { + api_socket_addr: app.api_socket_addr, + }) + .expect("the app starter should not be dropped"); + + match api_version { + Version::V1 => app.api_server.unwrap().await, + } + }); + + // Wait until the app is started + let socket_addr = match rx.await { + Ok(msg) => msg.api_socket_addr, + Err(e) => panic!("the app was dropped: {e}"), + }; + + let running_state = RunningState { app_handle, socket_addr }; + + // Update the app state + self.running_state = Some(running_state); + } + + pub fn stop(&mut self) { + match &self.running_state { + Some(running_state) => { + running_state.app_handle.abort(); + self.running_state = None; + } + None => {} + } + } + + #[must_use] + pub fn server_configuration(&self) -> config::TorrustIndex { + self.configuration.clone() + } + + #[must_use] + pub fn server_socket_addr(&self) -> Option { + self.running_state.as_ref().map(|running_state| running_state.socket_addr) + } + + #[must_use] + pub fn database_connect_url(&self) -> String { + self.configuration.database.connect_url.clone() + } +} + +#[derive(Debug)] +pub struct AppStartedMessage { + pub api_socket_addr: SocketAddr, +} + +/// Stores the app state when it is running. +pub struct RunningState { + app_handle: JoinHandle, tokio::task::JoinError>>, + pub socket_addr: SocketAddr, +} + +impl Drop for AppStarter { + /// Child threads spawned with `tokio::spawn()` and tasks spawned with + /// `async { }` blocks will not be automatically killed when the owner of + /// the struct that spawns them goes out of scope. + /// + /// The `tokio::spawn()` function and `async { }` blocks create an + /// independent task that runs on a separate thread or the same thread, + /// respectively. The task will continue to run until it completes, even if + /// the owner of the struct that spawned it goes out of scope. + /// + /// However, it's important to note that dropping the owner of the struct + /// may cause the task to be orphaned, which means that the task is no + /// longer associated with any parent task or thread. Orphaned tasks can + /// continue running in the background, consuming system resources, and may + /// eventually cause issues if left unchecked. + /// + /// To avoid orphaned tasks, we ensure that the app ois stopped when the + /// owner of the struct goes out of scope. + /// + /// This avoids having to call `TestEnv::stop()` explicitly at the end of + /// each test. + fn drop(&mut self) { + // Stop the app when the owner of the struct goes out of scope + self.stop(); + } +} diff --git a/tests/environments/isolated.rs b/tests/environments/isolated.rs new file mode 100644 index 00000000..e30c8907 --- /dev/null +++ b/tests/environments/isolated.rs @@ -0,0 +1,95 @@ +use tempfile::TempDir; +use torrust_index::config; +use torrust_index::config::FREE_PORT; +use torrust_index::web::api::Version; + +use super::app_starter::AppStarter; +use crate::common::random; + +/// Provides an isolated test environment for testing. The environment is +/// launched with a temporary directory and a default ephemeral configuration +/// before running the test. +pub struct TestEnv { + pub app_starter: AppStarter, + pub temp_dir: TempDir, +} + +impl TestEnv { + /// Provides a running app instance for integration tests. + pub async fn running(api_version: Version) -> Self { + let mut env = Self::default(); + env.start(api_version).await; + env + } + + /// Provides a test environment with a default configuration for testing + /// application. + /// + /// # Panics + /// + /// Panics if the temporary directory cannot be created. + #[must_use] + pub fn with_test_configuration() -> Self { + let temp_dir = TempDir::new().expect("failed to create a temporary directory"); + + let configuration = ephemeral(&temp_dir); + // Even if we load the configuration from the environment variable, we + // still need to provide a path to save the configuration when the + // configuration is updated via the `POST /settings` endpoints. + let config_path = format!("{}/config.toml", temp_dir.path().to_string_lossy()); + + let app_starter = AppStarter::with_custom_configuration(configuration, Some(config_path)); + + Self { app_starter, temp_dir } + } + + /// Starts the app. + pub async fn start(&mut self, api_version: Version) { + self.app_starter.start(api_version).await; + } + + /// Provides the whole server configuration. + #[must_use] + pub fn server_configuration(&self) -> config::TorrustIndex { + self.app_starter.server_configuration() + } + + /// Provides the API server socket address. + #[must_use] + pub fn server_socket_addr(&self) -> Option { + self.app_starter.server_socket_addr().map(|addr| addr.to_string()) + } + + #[must_use] + pub fn database_connect_url(&self) -> String { + self.app_starter.database_connect_url() + } +} + +impl Default for TestEnv { + fn default() -> Self { + Self::with_test_configuration() + } +} + +/// Provides a configuration with ephemeral data for testing. +fn ephemeral(temp_dir: &TempDir) -> config::TorrustIndex { + let mut configuration = config::TorrustIndex { + log_level: Some("off".to_owned()), // Change to `debug` for tests debugging + ..config::TorrustIndex::default() + }; + + // Ephemeral API port + configuration.net.port = FREE_PORT; + + // Ephemeral SQLite database + configuration.database.connect_url = format!("sqlite://{}?mode=rwc", random_database_file_path_in(temp_dir)); + + configuration +} + +fn random_database_file_path_in(temp_dir: &TempDir) -> String { + let random_db_id = random::string(16); + let db_file_name = format!("data_{random_db_id}.db"); + temp_dir.path().join(db_file_name).to_string_lossy().to_string() +} diff --git a/tests/environments/mod.rs b/tests/environments/mod.rs new file mode 100644 index 00000000..abbdbd41 --- /dev/null +++ b/tests/environments/mod.rs @@ -0,0 +1,3 @@ +pub mod app_starter; +pub mod isolated; +pub mod shared; diff --git a/tests/environments/shared.rs b/tests/environments/shared.rs new file mode 100644 index 00000000..d9db57be --- /dev/null +++ b/tests/environments/shared.rs @@ -0,0 +1,40 @@ +use crate::common::client::Client; + +/// Provides a shared test environment for testing. All tests shared the same +/// application instance. +pub struct TestEnv { + pub authority: String, +} + +impl TestEnv { + /// Provides a wrapper for an external running app instance. + /// + /// # Panics + /// + /// Will panic if the app is not running. This function requires the app to + /// be running to provide a valid environment. + pub async fn running() -> Self { + let env = Self::default(); + let client = Client::unauthenticated(&env.server_socket_addr().unwrap()); + let is_running = client.server_is_running().await; + assert!(is_running, "Test server is not running on {}", env.authority); + env + } + + /// Provides the API server socket address. + #[must_use] + pub fn server_socket_addr(&self) -> Option { + // If the E2E configuration uses port 0 in the future instead of a + // predefined port (right now we are using port 3001) we will + // need to pass an env var with the port used by the server. + Some(self.authority.clone()) + } +} + +impl Default for TestEnv { + fn default() -> Self { + Self { + authority: "localhost:3001".to_string(), + } + } +} diff --git a/tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent b/tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent new file mode 100644 index 00000000..8be3da33 Binary files /dev/null and b/tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent differ diff --git a/tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent.json b/tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent.json new file mode 100644 index 00000000..fbd639c8 --- /dev/null +++ b/tests/fixtures/torrents/6c690018c5786dbbb00161f62b0712d69296df97_with_custom_info_dict_key.torrent.json @@ -0,0 +1,73 @@ +{ + "announce": "https://academictorrents.com/announce.php", + "announce-list": [ + [ + "https://academictorrents.com/announce.php" + ], + [ + "https://ipv6.academictorrents.com/announce.php" + ], + [ + "udp://tracker.opentrackr.org:1337/announce" + ], + [ + "udp://tracker.openbittorrent.com:80/announce" + ], + [ + "http://bt1.archive.org:6969/announce" + ], + [ + "http://bt2.archive.org:6969/announce" + ] + ], + "comment": "This content hosted at the Internet Archive at https://archive.org/details/rapppid-weights.tar\nFiles may have changed, which prevents torrents from downloading correctly or completely; please check for an updated torrent at https://archive.org/download/rapppid-weights.tar/rapppid-weights.tar_archive.torrent\nNote: retrieval usually requires a client that supports webseeding (GetRight style).\nNote: many Internet Archive torrents contain a 'pad file' directory. This directory and the files within it may be erased once retrieval completes.\nNote: the file rapppid-weights.tar_meta.xml contains metadata about this torrent's contents.", + "created by": "ia_make_torrent", + "creation date": 1689273787, + "info": { + "collections": [ + "org.archive.rapppid-weights.tar" + ], + "files": [ + { + "crc32": "57d33fcc", + "length": 11528324, + "md5": "e91bb4ba82695161be68f8b33ae76142", + "mtime": "1689273730", + "path": [ + "RAPPPID Weights.tar.gz" + ], + "sha1": "45970ef33cb3049a7a8629e40c8f5e5268d1dc53" + }, + { + "crc32": "c658fd4f", + "length": 20480, + "md5": "a782b2a53ba49f0d45f3dd6e35e0d593", + "mtime": "1689273783", + "path": [ + "rapppid-weights.tar_meta.sqlite" + ], + "sha1": "bcb06b3164f1d2aba22ef6046eb80f65264e9fba" + }, + { + "crc32": "8140a5c7", + "length": 1044, + "md5": "1bab21e50e06ab42d3a77d872bf252e5", + "mtime": "1689273763", + "path": [ + "rapppid-weights.tar_meta.xml" + ], + "sha1": "b2f0f2bbec34aa9140fb9ac3fcb190588a496aa3" + } + ], + "name": "rapppid-weights.tar", + "piece length": 524288, + "pieces": "AB EC 55 6E 0F 7B E7 D3 30 0C F6 68 8C 90 6D 99 0C 3E 32 B5 2C F2 B6 7C 0C 32 52 BC 72 6F 07 1E 73 AB 76 F1 BC 32 2B FC 21 D4 7F 1A E9 72 35 40 7E C3 B4 89 09 2B ED 4B D8 B0 6C 65 8C 27 58 AE FB 72 75 73 44 37 88 28 20 D2 94 BD A4 6A F8 D2 A6 FD 02 65 1C 1C DF B8 56 6D 3A D2 7E A7 3D CA E2 49 F7 36 8D 17 77 6E 32 AD EF A5 44 C2 8F B6 9C 24 56 AD E8 FB 7B A6 71 C0 81 E5 43 03 91 D4 4F B0 A6 64 CA 29 1B 0D 1D 40 7D 39 4E 76 96 EB 01 18 F3 F5 50 8E 2F FA 54 FC 49 66 85 D8 38 87 78 9B 0A 8F 7A A3 2C 8F 72 36 AD 6D 74 0B FC C5 57 71 86 FB F3 CF CA C9 DA EC 61 62 A2 2A 1B A7 85 89 91 8F AA C0 C0 CB 0D 57 D8 B7 E7 64 4D F2 84 73 76 98 FB 3A 17 48 D7 9C 01 FE CA 6D 1F C5 97 34 05 54 39 DA C2 6E 17 41 11 69 F3 46 D1 7D 16 D3 C0 87 3B C3 B2 0C 1D E0 E2 49 C3 05 D2 4C 00 5A 5B 78 01 12 3E BF 52 43 49 6D 1A EE 23 79 D2 0E 28 B6 84 7E C5 ED 79 DE 64 02 ED 47 71 3D 93 16 C4 A2 76 18 77 54 C5 31 48 96 3A 51 C1 4A 92 90 91 F3 CF 48 5B 24 86 55 A8 EB 0C C6 2D 86 E2 29 56 09 2C 38 0B CD C1 CA 45 E6 64 6A 47 FE BB 2E 47 9A 77 45 29 E9 72 19 20 6F 42 79 2B 37 B9 53 25 ED 0F 29 04 D5 E2 96 F1 DE CF 99 BE 32 AA B8 0A 1D 0B 9F B9 D6 AB 5C 50 43 78 85 41 09 01 24 CF E0 89 76 5B 4D A9 CA 72 C0 DF 92 47 0F 0D CE CA 96 C6 7E A5 41 5F 2B A7 BB 04 CC F7 44 7F 94 1E 24 D2 1B 17 CA 18 79 90 A3 D6 20 75 A2 96 68 06 58 5A DE F5 2C 1A 90 22 72 33 8E D5 B2 A8 FA E5 E9 E7 69 62 02 7C 09 B3 4C" + }, + "locale": "en", + "title": "rapppid-weights.tar", + "url-list": [ + "https://archive.org/download/", + "http://ia902702.us.archive.org/22/items/", + "http://ia802702.us.archive.org/22/items/" + ] +} \ No newline at end of file diff --git a/tests/fixtures/torrents/MC_GRID.zip-3cd18ff2d3eec881207dcc5ca5a2c3a2a3afe462.torrent b/tests/fixtures/torrents/MC_GRID.zip-3cd18ff2d3eec881207dcc5ca5a2c3a2a3afe462.torrent new file mode 100644 index 00000000..38e24e4b Binary files /dev/null and b/tests/fixtures/torrents/MC_GRID.zip-3cd18ff2d3eec881207dcc5ca5a2c3a2a3afe462.torrent differ diff --git a/tests/fixtures/torrents/not-working-with-two-nodes.torrent b/tests/fixtures/torrents/not-working-with-two-nodes.torrent new file mode 100644 index 00000000..9378c375 --- /dev/null +++ b/tests/fixtures/torrents/not-working-with-two-nodes.torrent @@ -0,0 +1 @@ +d4:infod6:lengthi8e4:name11:minimal.txt12:piece lengthi16384e6:pieces20:0š„QJñ ÐŒ4B&g„³#Mxe5:nodesll15:188.163.121.224i56711eel14:162.250.131.26i13386eeee \ No newline at end of file diff --git a/tests/fixtures/torrents/working-with-one-node.torrent b/tests/fixtures/torrents/working-with-one-node.torrent new file mode 100644 index 00000000..fdfcfc04 --- /dev/null +++ b/tests/fixtures/torrents/working-with-one-node.torrent @@ -0,0 +1 @@ +d4:infod6:lengthi8e4:name11:minimal.txt12:piece lengthi16384e6:pieces20:0š„QJñ ÐŒ4B&g„³#Mxe5:nodesll15:188.163.121.224i56711eeee \ No newline at end of file diff --git a/tests/mod.rs b/tests/mod.rs index 22adeb6d..4d330909 100644 --- a/tests/mod.rs +++ b/tests/mod.rs @@ -1 +1,4 @@ -mod databases; +mod common; +mod e2e; +pub mod environments; +mod upgrades; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql new file mode 100644 index 00000000..c535dfb9 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + username VARCHAR(32) NOT NULL UNIQUE, + email VARCHAR(100) NOT NULL UNIQUE, + email_verified BOOLEAN NOT NULL DEFAULT FALSE, + password TEXT NOT NULL +) \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql new file mode 100644 index 00000000..ef6f6865 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + key VARCHAR(32) NOT NULL, + valid_until INT(10) NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql new file mode 100644 index 00000000..c88abfe2 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql @@ -0,0 +1,7 @@ +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES +('movies'), ('tv shows'), ('games'), ('music'), ('software'); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql new file mode 100644 index 00000000..aeb3135a --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + number INTEGER NOT NULL, + path VARCHAR(255) NOT NULL, + length INTEGER NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql new file mode 100644 index 00000000..0b574c69 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql @@ -0,0 +1,2 @@ +ALTER TABLE torrust_users +ADD COLUMN administrator BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql new file mode 100644 index 00000000..413539a4 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader VARCHAR(32) NOT NULL, + info_hash VARCHAR(20) UNIQUE NOT NULL, + title VARCHAR(256) UNIQUE NOT NULL, + category_id INTEGER NOT NULL, + description TEXT, + upload_date INT(10) NOT NULL, + file_size BIGINT NOT NULL, + seeders INTEGER NOT NULL, + leechers INTEGER NOT NULL, + FOREIGN KEY(uploader) REFERENCES torrust_users(username) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql new file mode 100644 index 00000000..b786dcd2 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql @@ -0,0 +1,2 @@ +ALTER TABLE torrust_categories +ADD COLUMN icon VARCHAR(32); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent new file mode 100644 index 00000000..faa30f4c Binary files /dev/null and b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent differ diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent new file mode 100644 index 00000000..a62afbff Binary files /dev/null and b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent differ diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs new file mode 100644 index 00000000..29897ff7 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -0,0 +1,4 @@ +pub mod sqlite_v1_0_0; +pub mod sqlite_v2_0_0; +pub mod transferrer_testers; +pub mod upgrader; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore b/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore new file mode 100644 index 00000000..3997bead --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore @@ -0,0 +1 @@ +*.db \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs new file mode 100644 index 00000000..7c2ccfb4 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -0,0 +1,127 @@ +#![allow(clippy::missing_errors_doc)] + +use std::fs; + +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query, SqlitePool}; +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ + CategoryRecordV1, TorrentRecordV1, TrackerKeyRecordV1, UserRecordV1, +}; + +pub struct SqliteDatabaseV1_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV1_0_0 { + pub async fn db_connection(database_file: &str) -> Self { + let connect_url = format!("sqlite://{database_file}?mode=rwc"); + Self::new(&connect_url).await + } + + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + /// Execute migrations for database in version v1.0.0 + pub async fn migrate(&self, fixtures_dir: &str) { + let migrations_dir = format!("{fixtures_dir}database/v1.0.0/migrations/"); + + let migrations = vec![ + "20210831113004_torrust_users.sql", + "20210904135524_torrust_tracker_keys.sql", + "20210905160623_torrust_categories.sql", + "20210907083424_torrust_torrent_files.sql", + "20211208143338_torrust_users.sql", + "20220308083424_torrust_torrents.sql", + "20220308170028_torrust_categories.sql", + ]; + + for migration_file_name in &migrations { + let migration_file_path = format!("{}{}", &migrations_dir, &migration_file_name); + self.run_migration_from_file(&migration_file_path).await; + } + } + + async fn run_migration_from_file(&self, migration_file_path: &str) { + println!("Executing migration: {migration_file_path:?}"); + + let sql = fs::read_to_string(migration_file_path).expect("Should have been able to read the file"); + + let res = sqlx::query(&sql).execute(&self.pool).await; + + println!("Migration result {res:?}"); + } + + pub async fn insert_category(&self, category: &CategoryRecordV1) -> Result { + query("INSERT INTO torrust_categories (category_id, name) VALUES (?, ?)") + .bind(category.category_id) + .bind(category.name.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + #[allow(clippy::missing_panics_doc)] + pub async fn delete_all_categories(&self) -> Result<(), sqlx::Error> { + query("DELETE FROM torrust_categories").execute(&self.pool).await.unwrap(); + Ok(()) + } + + pub async fn insert_user(&self, user: &UserRecordV1) -> Result { + query("INSERT INTO torrust_users (user_id, username, email, email_verified, password, administrator) VALUES (?, ?, ?, ?, ?, ?)") + .bind(user.user_id) + .bind(user.username.clone()) + .bind(user.email.clone()) + .bind(user.email_verified) + .bind(user.password.clone()) + .bind(user.administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_tracker_key(&self, tracker_key: &TrackerKeyRecordV1) -> Result { + query("INSERT INTO torrust_tracker_keys (key_id, user_id, key, valid_until) VALUES (?, ?, ?, ?)") + .bind(tracker_key.key_id) + .bind(tracker_key.user_id) + .bind(tracker_key.key.clone()) + .bind(tracker_key.valid_until) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent(&self, torrent: &TorrentRecordV1) -> Result { + query( + "INSERT INTO torrust_torrents ( + torrent_id, + uploader, + info_hash, + title, + category_id, + description, + upload_date, + file_size, + seeders, + leechers + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(torrent.torrent_id) + .bind(torrent.uploader.clone()) + .bind(torrent.info_hash.clone()) + .bind(torrent.title.clone()) + .bind(torrent.category_id) + .bind(torrent.description.clone()) + .bind(torrent.upload_date) + .bind(torrent.file_size) + .bind(torrent.seeders) + .bind(torrent.leechers) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs new file mode 100644 index 00000000..9b1ade49 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -0,0 +1,149 @@ +#![allow(clippy::missing_errors_doc)] + +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query_as, SqlitePool}; +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::TorrentRecordV2; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV2 { + pub category_id: i64, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserRecordV2 { + pub user_id: i64, + pub date_registered: Option, + pub date_imported: Option, + pub administrator: bool, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserProfileRecordV2 { + pub user_id: i64, + pub username: String, + pub email: String, + pub email_verified: bool, + pub bio: Option, + pub avatar: Option, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserAuthenticationRecordV2 { + pub user_id: i64, + pub password_hash: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TrackerKeyRecordV2 { + pub tracker_key_id: i64, + pub user_id: i64, + pub tracker_key: String, + pub date_expiry: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentInfoRecordV2 { + pub torrent_id: i64, + pub title: String, + pub description: Option, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, PartialEq)] +pub struct TorrentAnnounceUrlV2 { + pub announce_url_id: i64, + pub torrent_id: i64, + pub tracker_url: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, PartialEq)] +pub struct TorrentFileV2 { + pub file_id: i64, + pub torrent_id: i64, + pub md5sum: Option, + pub length: i64, + pub path: Option, +} + +pub struct SqliteDatabaseV2_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV2_0_0 { + pub async fn db_connection(database_file: &str) -> Self { + let connect_url = format!("sqlite://{database_file}?mode=rwc"); + Self::new(&connect_url).await + } + + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn get_category(&self, category_id: i64) -> Result { + query_as::<_, CategoryRecordV2>("SELECT * FROM torrust_categories WHERE category_id = ?") + .bind(category_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user(&self, user_id: i64) -> Result { + query_as::<_, UserRecordV2>("SELECT * FROM torrust_users WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user_profile(&self, user_id: i64) -> Result { + query_as::<_, UserProfileRecordV2>("SELECT * FROM torrust_user_profiles WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user_authentication(&self, user_id: i64) -> Result { + query_as::<_, UserAuthenticationRecordV2>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_tracker_key(&self, tracker_key_id: i64) -> Result { + query_as::<_, TrackerKeyRecordV2>("SELECT * FROM torrust_tracker_keys WHERE user_id = ?") + .bind(tracker_key_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent(&self, torrent_id: i64) -> Result { + query_as::<_, TorrentRecordV2>("SELECT * FROM torrust_torrents WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent_info(&self, torrent_id: i64) -> Result { + query_as::<_, TorrentInfoRecordV2>("SELECT * FROM torrust_torrent_info WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent_announce_urls(&self, torrent_id: i64) -> Result, sqlx::Error> { + query_as::<_, TorrentAnnounceUrlV2>("SELECT * FROM torrust_torrent_announce_urls WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrent_files(&self, torrent_id: i64) -> Result, sqlx::Error> { + query_as::<_, TorrentFileV2>("SELECT * FROM torrust_torrent_files WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs new file mode 100644 index 00000000..4d12b2b4 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; + +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::CategoryRecordV1; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct CategoryTester { + source_database: Arc, + target_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub categories: Vec, +} + +impl CategoryTester { + pub fn new(source_database: Arc, target_database: Arc) -> Self { + let category_01 = CategoryRecordV1 { + category_id: 10, + name: "category name 10".to_string(), + }; + let category_02 = CategoryRecordV1 { + category_id: 11, + name: "category name 11".to_string(), + }; + + Self { + source_database, + target_database, + test_data: TestData { + categories: vec![category_01, category_02], + }, + } + } + + pub fn get_valid_category_id(&self) -> i64 { + self.test_data.categories[0].category_id + } + + #[allow(clippy::missing_panics_doc)] + /// Table `torrust_categories` + pub async fn load_data_into_source_db(&self) { + // Delete categories added by migrations + self.source_database.delete_all_categories().await.unwrap(); + + // Add test categories + for categories in &self.test_data.categories { + self.source_database.insert_category(categories).await.unwrap(); + } + } + + #[allow(clippy::missing_panics_doc)] + /// Table `torrust_categories` + pub async fn assert_data_in_target_db(&self) { + for categories in &self.test_data.categories { + let imported_category = self.target_database.get_category(categories.category_id).await.unwrap(); + + assert_eq!(imported_category.category_id, categories.category_id); + assert_eq!(imported_category.name, categories.name); + } + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs new file mode 100644 index 00000000..459bcac8 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs @@ -0,0 +1,4 @@ +pub mod category_transferrer_tester; +pub mod torrent_transferrer_tester; +pub mod tracker_key_transferrer_tester; +pub mod user_transferrer_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs new file mode 100644 index 00000000..6677b04b --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs @@ -0,0 +1,183 @@ +use std::sync::Arc; + +use torrust_index::models::torrent_file::Torrent; +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::convert_timestamp_to_datetime; +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::read_torrent_from_file; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct TorrentTester { + source_database: Arc, + target_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub torrents: Vec, + pub user: UserRecordV1, +} + +impl TorrentTester { + pub fn new( + source_database: Arc, + target_database: Arc, + user: &UserRecordV1, + category_id: i64, + ) -> Self { + let torrent_01 = TorrentRecordV1 { + torrent_id: 1, + uploader: user.username.clone(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + title: "A Mandelbrot Set 2048x2048px picture".to_string(), + category_id, + description: Some( + "A beautiful Mandelbrot Set picture in black and white. \n \ + - Hybrid torrent V1 and V2. \n \ + - Single-file torrent. \n \ + - Public. \n \ + - More than one tracker URL. \n \ + " + .to_string(), + ), + upload_date: 1_667_546_358, // 2022-11-04 07:19:18 + file_size: 9_219_566, + seeders: 0, + leechers: 0, + }; + let torrent_02 = TorrentRecordV1 { + torrent_id: 2, + uploader: user.username.clone(), + info_hash: "0902d375f18ec020f0cc68ed4810023032ba81cb".to_string(), + title: "Two Mandelbrot Set 2048x2048px pictures".to_string(), + category_id, + description: Some( + "Two beautiful Mandelbrot Set pictures in black and white. \n \ + - Hybrid torrent V1 and V2. \n \ + - Multiple-files torrent. \n \ + - Private. \n \ + - Only one tracker URL. \n \ + " + .to_string(), + ), + upload_date: 1_667_546_358, // 2022-11-04 07:19:18 + file_size: 9_219_566, + seeders: 0, + leechers: 0, + }; + + Self { + source_database, + target_database, + test_data: TestData { + torrents: vec![torrent_01, torrent_02], + user: user.clone(), + }, + } + } + + #[allow(clippy::missing_panics_doc)] + pub async fn load_data_into_source_db(&self) { + for torrent in &self.test_data.torrents { + self.source_database.insert_torrent(torrent).await.unwrap(); + } + } + + #[allow(clippy::missing_panics_doc)] + pub async fn assert_data_in_target_db(&self, upload_path: &str) { + for torrent in &self.test_data.torrents { + let filepath = Self::torrent_file_path(upload_path, torrent.torrent_id); + + let torrent_file = read_torrent_from_file(&filepath).unwrap(); + + self.assert_torrent(torrent, &torrent_file).await; + self.assert_torrent_info(torrent).await; + self.assert_torrent_announce_urls(torrent, &torrent_file).await; + self.assert_torrent_files(torrent, &torrent_file).await; + } + } + + pub fn torrent_file_path(upload_path: &str, torrent_id: i64) -> String { + format!("{}/{}.torrent", &upload_path, &torrent_id) + } + + /// Table `torrust_torrents` + async fn assert_torrent(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { + let imported_torrent = self.target_database.get_torrent(torrent.torrent_id).await.unwrap(); + + assert_eq!(imported_torrent.torrent_id, torrent.torrent_id); + assert_eq!(imported_torrent.uploader_id, self.test_data.user.user_id); + assert_eq!(imported_torrent.category_id, torrent.category_id); + assert_eq!(imported_torrent.info_hash, torrent.info_hash); + assert_eq!(imported_torrent.size, torrent.file_size); + assert_eq!(imported_torrent.name, torrent_file.info.name); + assert_eq!(imported_torrent.pieces, torrent_file.info.get_pieces_as_string()); + assert_eq!(imported_torrent.piece_length, torrent_file.info.piece_length); + if torrent_file.info.private.is_none() { + assert_eq!(imported_torrent.private, Some(0)); + } else { + assert_eq!(imported_torrent.private, torrent_file.info.private); + } + assert_eq!(imported_torrent.root_hash, torrent_file.info.get_root_hash_as_i64()); + assert_eq!( + imported_torrent.date_uploaded, + convert_timestamp_to_datetime(torrent.upload_date) + ); + } + + /// Table `torrust_torrent_info` + async fn assert_torrent_info(&self, torrent: &TorrentRecordV1) { + let torrent_info = self.target_database.get_torrent_info(torrent.torrent_id).await.unwrap(); + + assert_eq!(torrent_info.torrent_id, torrent.torrent_id); + assert_eq!(torrent_info.title, torrent.title); + assert_eq!(torrent_info.description, torrent.description); + } + + /// Table `torrust_torrent_announce_urls` + async fn assert_torrent_announce_urls(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { + let torrent_announce_urls = self + .target_database + .get_torrent_announce_urls(torrent.torrent_id) + .await + .unwrap(); + + let urls: Vec = torrent_announce_urls + .iter() + .map(|torrent_announce_url| torrent_announce_url.tracker_url.to_string()) + .collect(); + + let expected_urls = torrent_file.announce_urls(); + + assert_eq!(urls, expected_urls); + } + + /// Table `torrust_torrent_files` + async fn assert_torrent_files(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { + let db_torrent_files = self.target_database.get_torrent_files(torrent.torrent_id).await.unwrap(); + + if torrent_file.is_a_single_file_torrent() { + let db_torrent_file = &db_torrent_files[0]; + assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); + assert!(db_torrent_file.md5sum.is_none()); + assert_eq!(db_torrent_file.length, torrent_file.info.length.unwrap()); + assert!(db_torrent_file.path.is_none()); + } else { + let files = torrent_file.info.files.as_ref().unwrap(); + + // Files in torrent file + for file in files { + let file_path = file.path.join("/"); + + // Find file in database + let db_torrent_file = db_torrent_files.iter().find(|&f| f.path == Some(file_path.clone())).unwrap(); + + assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); + assert!(db_torrent_file.md5sum.is_none()); + assert_eq!(db_torrent_file.length, file.length); + assert_eq!(db_torrent_file.path, Some(file_path)); + } + } + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs new file mode 100644 index 00000000..31fa8002 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs @@ -0,0 +1,56 @@ +use std::sync::Arc; + +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::TrackerKeyRecordV1; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct TrackerKeyTester { + source_database: Arc, + target_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub tracker_key: TrackerKeyRecordV1, +} + +impl TrackerKeyTester { + pub fn new(source_database: Arc, target_database: Arc, user_id: i64) -> Self { + let tracker_key = TrackerKeyRecordV1 { + key_id: 1, + user_id, + key: "rRstSTM5rx0sgxjLkRSJf3rXODcRBI5T".to_string(), + valid_until: 2_456_956_800, // 11-10-2047 00:00:00 UTC + }; + + Self { + source_database, + target_database, + test_data: TestData { tracker_key }, + } + } + + #[allow(clippy::missing_panics_doc)] + pub async fn load_data_into_source_db(&self) { + self.source_database + .insert_tracker_key(&self.test_data.tracker_key) + .await + .unwrap(); + } + + #[allow(clippy::missing_panics_doc)] + /// Table `torrust_tracker_keys` + pub async fn assert_data_in_target_db(&self) { + let imported_key = self + .target_database + .get_tracker_key(self.test_data.tracker_key.key_id) + .await + .unwrap(); + + assert_eq!(imported_key.tracker_key_id, self.test_data.tracker_key.key_id); + assert_eq!(imported_key.user_id, self.test_data.tracker_key.user_id); + assert_eq!(imported_key.tracker_key, self.test_data.tracker_key.key); + assert_eq!(imported_key.date_expiry, self.test_data.tracker_key.valid_until); + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs new file mode 100644 index 00000000..731fbf16 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs @@ -0,0 +1,111 @@ +use std::sync::Arc; + +use argon2::password_hash::SaltString; +use argon2::{Argon2, PasswordHasher}; +use rand_core::OsRng; +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct UserTester { + source_database: Arc, + target_database: Arc, + execution_time: String, + pub test_data: TestData, +} + +pub struct TestData { + pub user: UserRecordV1, +} + +impl UserTester { + pub fn new( + source_database: Arc, + target_database: Arc, + execution_time: &str, + ) -> Self { + let user = UserRecordV1 { + user_id: 1, + username: "user01".to_string(), + email: "user01@torrust.com".to_string(), + email_verified: true, + password: hashed_valid_password(), + administrator: true, + }; + + Self { + source_database, + target_database, + execution_time: execution_time.to_owned(), + test_data: TestData { user }, + } + } + + #[allow(clippy::missing_panics_doc)] + pub async fn load_data_into_source_db(&self) { + self.source_database.insert_user(&self.test_data.user).await.unwrap(); + } + + pub async fn assert_data_in_target_db(&self) { + self.assert_user().await; + self.assert_user_profile().await; + self.assert_user_authentication().await; + } + + /// Table `torrust_users` + async fn assert_user(&self) { + let imported_user = self.target_database.get_user(self.test_data.user.user_id).await.unwrap(); + + assert_eq!(imported_user.user_id, self.test_data.user.user_id); + assert!(imported_user.date_registered.is_none()); + assert_eq!(imported_user.date_imported.unwrap(), self.execution_time); + assert_eq!(imported_user.administrator, self.test_data.user.administrator); + } + + /// Table `torrust_user_profiles` + async fn assert_user_profile(&self) { + let imported_user_profile = self + .target_database + .get_user_profile(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!(imported_user_profile.user_id, self.test_data.user.user_id); + assert_eq!(imported_user_profile.username, self.test_data.user.username); + assert_eq!(imported_user_profile.email, self.test_data.user.email); + assert_eq!(imported_user_profile.email_verified, self.test_data.user.email_verified); + assert!(imported_user_profile.bio.is_none()); + assert!(imported_user_profile.avatar.is_none()); + } + + /// Table `torrust_user_profiles` + async fn assert_user_authentication(&self) { + let imported_user_authentication = self + .target_database + .get_user_authentication(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!(imported_user_authentication.user_id, self.test_data.user.user_id); + assert_eq!(imported_user_authentication.password_hash, self.test_data.user.password); + } +} + +fn hashed_valid_password() -> String { + hash_password(&valid_password()) +} + +fn valid_password() -> String { + "123456".to_string() +} + +fn hash_password(plain_password: &str) -> String { + let salt = SaltString::generate(&mut OsRng); + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + // Hash password to PHC string ($argon2id$v=19$...) + argon2.hash_password(plain_password.as_bytes(), &salt).unwrap().to_string() +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs new file mode 100644 index 00000000..5a8abbe9 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -0,0 +1,124 @@ +//! You can run this test with: +//! +//! ```text +//! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 +//! ``` +//! +//! or: +//! +//! ```text +//! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture +//! ``` +//! +//! to see the "upgrader" command output. +use std::fs; +use std::path::Path; +use std::sync::Arc; + +use torrust_index::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{datetime_iso_8601, upgrade, Arguments}; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::category_transferrer_tester::CategoryTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::torrent_transferrer_tester::TorrentTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::tracker_key_transferrer_tester::TrackerKeyTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::user_transferrer_tester::UserTester; + +struct TestConfig { + // Directories + pub fixtures_dir: String, + pub upload_path: String, + // Files + pub source_database_file: String, + pub target_database_file: String, +} + +impl Default for TestConfig { + fn default() -> Self { + let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); + let upload_path = format!("{}uploads/", &fixtures_dir); + let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); + let source_database_file = format!("{output_dir}source.db"); + let target_database_file = format!("{output_dir}target.db"); + Self { + fixtures_dir, + upload_path, + source_database_file, + target_database_file, + } + } +} + +#[tokio::test] +async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { + let config = TestConfig::default(); + + let (source_db, target_db) = setup_databases(&config).await; + + // The datetime when the upgrader is executed + let execution_time = datetime_iso_8601(); + + let category_tester = CategoryTester::new(source_db.clone(), target_db.clone()); + let user_tester = UserTester::new(source_db.clone(), target_db.clone(), &execution_time); + let tracker_key_tester = TrackerKeyTester::new(source_db.clone(), target_db.clone(), user_tester.test_data.user.user_id); + let torrent_tester = TorrentTester::new( + source_db.clone(), + target_db.clone(), + &user_tester.test_data.user, + category_tester.get_valid_category_id(), + ); + + // Load data into source database in version v1.0.0 + category_tester.load_data_into_source_db().await; + user_tester.load_data_into_source_db().await; + tracker_key_tester.load_data_into_source_db().await; + torrent_tester.load_data_into_source_db().await; + + // Run the upgrader + upgrade( + &Arguments { + source_database_file: config.source_database_file.clone(), + target_database_file: config.target_database_file.clone(), + upload_path: config.upload_path.clone(), + }, + &execution_time, + ) + .await; + + // Assertions for data transferred to the new database in version v2.0.0 + category_tester.assert_data_in_target_db().await; + user_tester.assert_data_in_target_db().await; + tracker_key_tester.assert_data_in_target_db().await; + torrent_tester.assert_data_in_target_db(&config.upload_path).await; +} + +async fn setup_databases(config: &TestConfig) -> (Arc, Arc) { + // Set up clean source database + reset_databases(&config.source_database_file, &config.target_database_file); + let source_database = source_db_connection(&config.source_database_file).await; + source_database.migrate(&config.fixtures_dir).await; + + // Set up connection for the target database + let target_database = target_db_connection(&config.target_database_file).await; + + (source_database, target_database) +} + +async fn source_db_connection(source_database_file: &str) -> Arc { + Arc::new(SqliteDatabaseV1_0_0::db_connection(source_database_file).await) +} + +async fn target_db_connection(target_database_file: &str) -> Arc { + Arc::new(SqliteDatabaseV2_0_0::db_connection(target_database_file).await) +} + +/// Reset databases from previous executions +fn reset_databases(source_database_file: &str, target_database_file: &str) { + if Path::new(source_database_file).exists() { + fs::remove_file(source_database_file).expect("Can't remove the source DB file."); + } + + if Path::new(target_database_file).exists() { + fs::remove_file(target_database_file).expect("Can't remove the target DB file."); + } +} diff --git a/tests/upgrades/mod.rs b/tests/upgrades/mod.rs new file mode 100644 index 00000000..e22b19a7 --- /dev/null +++ b/tests/upgrades/mod.rs @@ -0,0 +1 @@ +pub mod from_v1_0_0_to_v2_0_0; diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md new file mode 100644 index 00000000..37609149 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -0,0 +1,34 @@ +# Upgrade from v1.0.0 to v2.0.0 + +## How-to + +To upgrade from version `v1.0.0` to `v2.0.0` you have to follow these steps: + +- Back up your current database and the `uploads` folder. You can find which database and upload folder are you using in the `Config.toml` file in the root folder of your installation. +- Set up a local environment exactly as you have it in production with your production data (DB and torrents folder). +- Run the application locally with: `cargo run`. +- Execute the upgrader command: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads` +- A new SQLite file should have been created in the root folder: `data_v2.db` +- Stop the running application and change the DB configuration to use the newly generated configuration: + +```toml +[database] +connect_url = "sqlite://data_v2.db?mode=rwc" +``` + +- Run the application again. +- Perform some tests. +- If all tests pass, stop the production service, replace the DB, and start it again. + +## Tests + +Before replacing the DB in production you can make some tests like: + +- Try to log in with a preexisting user. If you do not know any you can create a new "test" user in production before starting with the upgrade process. Users had a different hash algorithm for the password in v1. +- Try to create a new user. +- Try to upload and download a new torrent containing a single file (with and without md5sum). +- Try to upload and download a new torrent containing a folder. + +## Notes + +The `db_schemas` contains the snapshots of the source and target databases for this upgrade. diff --git a/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql new file mode 100644 index 00000000..08349bb5 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql @@ -0,0 +1,152 @@ +# 20220721205537_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + date_registered DATETIME NOT NULL, + administrator BOOLEAN NOT NULL DEFAULT FALSE +) + +# 20220721210530_torrust_user_authentication.sql + +CREATE TABLE IF NOT EXISTS torrust_user_authentication ( + user_id INTEGER NOT NULL PRIMARY KEY, + password_hash TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220727213942_torrust_user_profiles.sql + +CREATE TABLE IF NOT EXISTS torrust_user_profiles ( + user_id INTEGER NOT NULL PRIMARY KEY, + username VARCHAR(24) NOT NULL UNIQUE, + email VARCHAR(320) UNIQUE, + email_verified BOOL NOT NULL DEFAULT FALSE, + bio TEXT, + avatar TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220727222313_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + tracker_key_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + tracker_key CHAR(32) NOT NULL, + date_expiry BIGINT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220730102607_torrust_user_public_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_user_public_keys ( + public_key_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + public_key CHAR(32) UNIQUE NOT NULL, + date_registered DATETIME NOT NULL, + date_expiry DATETIME NOT NULL, + revoked BOOLEAN NOT NULL DEFAULT FALSE, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220730104552_torrust_user_invitations.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitations ( + invitation_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + public_key CHAR(32) NOT NULL, + signed_digest CHAR(32) NOT NULL, + date_begin DATETIME NOT NULL, + date_expiry DATETIME NOT NULL, + max_uses INTEGER NOT NULL, + personal_message VARCHAR(512), + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(public_key) REFERENCES torrust_user_public_keys(public_key) ON DELETE CASCADE +) + +# 20220730105501_torrust_user_invitation_uses.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitation_uses ( + invitation_use_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + invitation_id INTEGER NOT NULL, + registered_user_id INTEGER NOT NULL, + date_used DATETIME NOT NULL, + FOREIGN KEY(invitation_id) REFERENCES torrust_user_invitations(invitation_id) ON DELETE CASCADE, + FOREIGN KEY(registered_user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220801201435_torrust_user_bans.sql + +CREATE TABLE IF NOT EXISTS torrust_user_bans ( + ban_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + reason TEXT NOT NULL, + date_expiry DATETIME NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220802161524_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES ('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +# 20220810192613_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + uploader_id INTEGER NOT NULL, + category_id INTEGER NOT NULL, + info_hash CHAR(40) UNIQUE NOT NULL, + size BIGINT NOT NULL, + name TEXT NOT NULL, + pieces LONGTEXT NOT NULL, + piece_length BIGINT NOT NULL, + private BOOLEAN NULL DEFAULT NULL, + root_hash BOOLEAN NOT NULL DEFAULT FALSE, + date_uploaded DATETIME NOT NULL, + FOREIGN KEY(uploader_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +) + +# 20220810201538_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + torrent_id INTEGER NOT NULL, + md5sum TEXT NULL DEFAULT NULL, + length BIGINT NOT NULL, + path TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220810201609_torrust_torrent_announce_urls.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_announce_urls ( + announce_url_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + torrent_id INTEGER NOT NULL, + tracker_url VARCHAR(256) NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220812181520_torrust_torrent_info.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_info ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + title VARCHAR(256) UNIQUE NOT NULL, + description TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220812184806_torrust_torrent_tracker_stats.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_tracker_stats ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + tracker_url VARCHAR(256) NOT NULL, + seeders INTEGER NOT NULL DEFAULT 0, + leechers INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + UNIQUE(torrent_id, tracker_url) +) diff --git a/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql new file mode 100644 index 00000000..214c4921 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql @@ -0,0 +1,68 @@ +# 20210831113004_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + username VARCHAR(32) NOT NULL UNIQUE, + email VARCHAR(100) NOT NULL UNIQUE, + email_verified BOOLEAN NOT NULL DEFAULT FALSE, + password TEXT NOT NULL +); + +# 20210904135524_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + key VARCHAR(32) NOT NULL, + valid_until INT(10) NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) +); + +# 20210905160623_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES +('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +# 20210907083424_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + number INTEGER NOT NULL, + path VARCHAR(255) NOT NULL, + length INTEGER NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) +); + +# 20211208143338_torrust_users.sql + +ALTER TABLE torrust_users; +ADD COLUMN administrator BOOLEAN NOT NULL DEFAULT FALSE; + +# 20220308083424_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader VARCHAR(32) NOT NULL, + info_hash VARCHAR(20) UNIQUE NOT NULL, + title VARCHAR(256) UNIQUE NOT NULL, + category_id INTEGER NOT NULL, + description TEXT, + upload_date INT(10) NOT NULL, + file_size BIGINT NOT NULL, + seeders INTEGER NOT NULL, + leechers INTEGER NOT NULL, + FOREIGN KEY(uploader) REFERENCES torrust_users(username) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +); + +# 20220308170028_torrust_categories.sql + +ALTER TABLE torrust_categories +ADD COLUMN icon VARCHAR(32); + diff --git a/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql new file mode 100644 index 00000000..b31aea68 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql @@ -0,0 +1,152 @@ +#20220721205537_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + date_registered TEXT NOT NULL, + administrator BOOL NOT NULL DEFAULT FALSE +); + +#20220721210530_torrust_user_authentication.sql + +CREATE TABLE IF NOT EXISTS torrust_user_authentication ( + user_id INTEGER NOT NULL PRIMARY KEY, + password_hash TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220727213942_torrust_user_profiles.sql + +CREATE TABLE IF NOT EXISTS torrust_user_profiles ( + user_id INTEGER NOT NULL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT UNIQUE, + email_verified BOOL NOT NULL DEFAULT FALSE, + bio TEXT, + avatar TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220727222313_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + tracker_key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + tracker_key TEXT NOT NULL, + date_expiry INTEGER NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220730102607_torrust_user_public_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_user_public_keys ( + public_key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + public_key TEXT UNIQUE NOT NULL, + date_registered TEXT NOT NULL, + date_expiry TEXT NOT NULL, + revoked INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220730104552_torrust_user_invitations.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitations ( + invitation_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + public_key TEXT NOT NULL, + signed_digest TEXT NOT NULL, + date_begin TEXT NOT NULL, + date_expiry TEXT NOT NULL, + max_uses INTEGER NOT NULL, + personal_message TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(public_key) REFERENCES torrust_user_public_keys(public_key) ON DELETE CASCADE +); + +#20220730105501_torrust_user_invitation_uses.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitation_uses ( + invitation_use_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + invitation_id INTEGER NOT NULL, + registered_user_id INTEGER NOT NULL, + date_used TEXT NOT NULL, + FOREIGN KEY(invitation_id) REFERENCES torrust_user_invitations(invitation_id) ON DELETE CASCADE, + FOREIGN KEY(registered_user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220801201435_torrust_user_bans.sql + +CREATE TABLE IF NOT EXISTS torrust_user_bans ( + ban_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + reason TEXT NOT NULL, + date_expiry TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220802161524_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES ('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +#20220810192613_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader_id INTEGER NOT NULL, + category_id INTEGER NOT NULL, + info_hash TEXT UNIQUE NOT NULL, + size INTEGER NOT NULL, + name TEXT NOT NULL, + pieces TEXT NOT NULL, + piece_length INTEGER NOT NULL, + private BOOLEAN NULL DEFAULT NULL, + root_hash INT NOT NULL DEFAULT 0, + date_uploaded TEXT NOT NULL, + FOREIGN KEY(uploader_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +); + +#20220810201538_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + md5sum TEXT NULL DEFAULT NULL, + length BIGINT NOT NULL, + path TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220810201609_torrust_torrent_announce_urls.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_announce_urls ( + announce_url_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + tracker_url TEXT NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220812181520_torrust_torrent_info.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_info ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + title VARCHAR(256) UNIQUE NOT NULL, + description TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220812184806_torrust_torrent_tracker_stats.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_tracker_stats ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + tracker_url VARCHAR(256) NOT NULL, + seeders INTEGER NOT NULL DEFAULT 0, + leechers INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + UNIQUE(torrent_id, tracker_url) +);