diff --git a/.github/workflows/clippy-lint.yaml b/.github/workflows/clippy-lint.yaml index 6480b59d5..fa2c45fac 100644 --- a/.github/workflows/clippy-lint.yaml +++ b/.github/workflows/clippy-lint.yaml @@ -25,7 +25,7 @@ jobs: target: x86_64-unknown-linux-musl steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index b35d1974e..5c20325c1 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -1,5 +1,4 @@ -# Performs test coverage of project's libraries using cargo-tarpaulin and the message-generator, -# and generates results using codecov.io. +# Performs test coverage of project's libraries using cargo-tarpaulin and generates results using codecov.io. # The following flags are set inside `tarpaulin.toml`: # `features = "..."`: Includes the code with the listed features. The following features result in a # tarpaulin error and are NOT included: derive, alloc, arbitrary-derive, attributes, and @@ -37,7 +36,7 @@ jobs: - name: Generate code coverage run: | - ./tarpaulin.sh + ./scripts/tarpaulin.sh - name: Archive Tarpaulin code coverage results uses: actions/upload-artifact@v4 @@ -47,116 +46,3 @@ jobs: protocols/cobertura.xml roles/cobertura.xml utils/cobertura.xml - - message-generator-test: - needs: tarpaulin-test - - name: MG Test - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - uses: actions-rs/toolchain@v1 - with: - toolchain: 1.75.0 - override: true - components: llvm-tools-preview - - - name: Log data from rustc - run: rustc -Vv - - - name: Install cargo-llvm-cov - uses: taiki-e/install-action@cargo-llvm-cov - - - name: Run bad-pool-config-test - run: sh ./test/message-generator/test/bad-pool-config-test/bad-pool-config-test.sh - - - name: Run interop-jd-translator - run: sh ./test/message-generator/test/interop-jd-translator/interop-jd-translator.sh - - #- name: Run interop-jdc-change-upstream - # run: sh ./test/message-generator/test/interop-jdc-change-upstream/interop-jdc-change-upstream.sh - - - name: Run interop-proxy-with-multi-ups - run: sh ./test/message-generator/test/interop-proxy-with-multi-ups/interop-proxy-with-multi-ups.sh - - - name: Run interop-proxy-with-multi-ups-extended - run: sh ./test/message-generator/test/interop-proxy-with-multi-ups-extended/interop-proxy-with-multi-ups-extended.sh - - - name: Run jds-do-not-fail-on-wrong-tsdatasucc - run: sh ./test/message-generator/test/jds-do-not-fail-on-wrong-tsdatasucc/jds-do-not-fail-on-wrong-tsdatasucc.sh - - - name: Run jds-do-not-panic-if-jdc-close-connection - run: sh ./test/message-generator/test/jds-do-not-panic-if-jdc-close-connection/jds-do-not-panic-if-jdc-close-connection.sh - - - name: Run jds-do-not-stackoverflow-when-no-token - run: sh ./test/message-generator/test/jds-do-not-stackoverflow-when-no-token/jds-do-not-stackoverflow-when-no-token.sh - - - name: Run pool-sri-test-1-standard - run: sh ./test/message-generator/test/pool-sri-test-1-standard/pool-sri-test-1-standard.sh - - - name: Run pool-sri-test-close-channel - run: sh ./test/message-generator/test/pool-sri-test-close-channel/pool-sri-test-close-channel.sh - - - name: Run pool-sri-test-extended_0 - run: sh ./test/message-generator/test/pool-sri-test-extended_0/pool-sri-test-extended_0.sh - - - name: Run pool-sri-test-extended_1 - run: sh ./test/message-generator/test/pool-sri-test-extended_1/pool-sri-test-extended_1.sh - - - name: Run pool-sri-test-reject-auth - run: sh ./test/message-generator/test/pool-sri-test-reject-auth/pool-sri-test-reject-auth.sh - - - name: Run standard-coverage - run: sh ./test/message-generator/test/standard-coverage-test/standard-coverage-test.sh - - - name: Run sv1-test - run: sh ./test/message-generator/test/sv1-test/sv1-test.sh - - - name: Run translation-proxy-broke-pool - run: sh ./test/message-generator/test/translation-proxy-broke-pool/translation-proxy-broke-pool.sh - - - name: Run translation-proxy - run: sh ./test/message-generator/test/translation-proxy/translation-proxy.sh - - - name: Coverage report - run: sh ./code-coverage-report.sh - - - name: Archive MG code coverage results - uses: actions/upload-artifact@v4 - with: - name: coverage-report - path: 'target/*.xml' - - - name: Archive log files - if: always() - uses: actions/upload-artifact@v4 - with: - name: logs - path: './utils/message-generator/*.log' - - # codecov: - # needs: message-generator-test - - # name: Codecov Upload - # runs-on: ubuntu-latest - - # steps: - - # - name: Checkout repository - # uses: actions/checkout@v4 - - # - name: Download all workflow run artifacts - # uses: actions/download-artifact@v4 - - # - name: Display structure of downloaded files - # run: ls -R - - # - name: Upload to codecov.io - # uses: codecov/codecov-action@v3 - # with: - # files: coverage-report/*.xml, tarpaulin-report/*.xml - # fail_ci_if_error: true - # token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/fmt.yaml b/.github/workflows/fmt.yaml index 6a4d900ad..b0b3ecc18 100644 --- a/.github/workflows/fmt.yaml +++ b/.github/workflows/fmt.yaml @@ -25,7 +25,7 @@ jobs: target: x86_64-unknown-linux-musl steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: profile: minimal diff --git a/.github/workflows/mg.yaml b/.github/workflows/mg.yaml new file mode 100644 index 000000000..8ee27a9c3 --- /dev/null +++ b/.github/workflows/mg.yaml @@ -0,0 +1,221 @@ +# Runs all Message Generator tests in separate jobs + +name: MG Test + +on: + push: + branches: [ main, dev ] + pull_request: + branches: [ main, dev ] + +jobs: + bad-pool-config-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run bad-pool-config-test + run: sh ./test/message-generator/test/bad-pool-config-test/bad-pool-config-test.sh + + interop-jd-translator: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run interop-jd-translator + run: sh ./test/message-generator/test/interop-jd-translator/interop-jd-translator.sh + + interop-proxy-with-multi-ups: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run interop-proxy-with-multi-ups + run: sh ./test/message-generator/test/interop-proxy-with-multi-ups/interop-proxy-with-multi-ups.sh + + interop-proxy-with-multi-ups-extended: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run interop-proxy-with-multi-ups-extended + run: sh ./test/message-generator/test/interop-proxy-with-multi-ups-extended/interop-proxy-with-multi-ups-extended.sh + + jds-do-not-fail-on-wrong-tsdatasucc: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run jds-do-not-fail-on-wrong-tsdatasucc + run: sh ./test/message-generator/test/jds-do-not-fail-on-wrong-tsdatasucc/jds-do-not-fail-on-wrong-tsdatasucc.sh + + jds-do-not-panic-if-jdc-close-connection: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run jds-do-not-panic-if-jdc-close-connection + run: sh ./test/message-generator/test/jds-do-not-panic-if-jdc-close-connection/jds-do-not-panic-if-jdc-close-connection.sh + + jds-do-not-stackoverflow-when-no-token: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run jds-do-not-stackoverflow-when-no-token + run: sh ./test/message-generator/test/jds-do-not-stackoverflow-when-no-token/jds-do-not-stackoverflow-when-no-token.sh + + jds-receive-solution-while-processing-declared-job: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run jds-receive-solution-while-processing-declared-job + run: sh ./test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.sh + + pool-sri-test-1-standard: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run pool-sri-test-1-standard + run: sh ./test/message-generator/test/pool-sri-test-1-standard/pool-sri-test-1-standard.sh + + pool-sri-test-close-channel: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run pool-sri-test-close-channel + run: sh ./test/message-generator/test/pool-sri-test-close-channel/pool-sri-test-close-channel.sh + + pool-sri-test-extended_0: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run pool-sri-test-extended_0 + run: sh ./test/message-generator/test/pool-sri-test-extended_0/pool-sri-test-extended_0.sh + + pool-sri-test-extended_1: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run pool-sri-test-extended_1 + run: sh ./test/message-generator/test/pool-sri-test-extended_1/pool-sri-test-extended_1.sh + + pool-sri-test-reject-auth: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run pool-sri-test-reject-auth + run: sh ./test/message-generator/test/pool-sri-test-reject-auth/pool-sri-test-reject-auth.sh + + standard-coverage: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run standard-coverage + run: sh ./test/message-generator/test/standard-coverage-test/standard-coverage-test.sh + + sv1-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run sv1-test + run: sh ./test/message-generator/test/sv1-test/sv1-test.sh + + translation-proxy-broke-pool: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run translation-proxy-broke-pool + run: sh ./test/message-generator/test/translation-proxy-broke-pool/translation-proxy-broke-pool.sh + + translation-proxy: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install cargo-llvm-cov + run: cargo install cargo-llvm-cov + - name: Run translation-proxy + run: sh ./test/message-generator/test/translation-proxy/translation-proxy.sh + + translation-proxy-old-share: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run translation-proxy-old-share + run: sh ./test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.sh + + mg-aggregate-results: + name: "Aggregate MG Test Results" + runs-on: ubuntu-latest + if: always() + needs: [ + bad-pool-config-test, + interop-jd-translator, + interop-proxy-with-multi-ups, + interop-proxy-with-multi-ups-extended, + jds-do-not-fail-on-wrong-tsdatasucc, + jds-do-not-panic-if-jdc-close-connection, + jds-do-not-stackoverflow-when-no-token, + jds-receive-solution-while-processing-declared-job, + pool-sri-test-1-standard, + pool-sri-test-close-channel, + pool-sri-test-extended_0, + pool-sri-test-extended_1, + pool-sri-test-reject-auth, + standard-coverage, + sv1-test, + translation-proxy-broke-pool, + translation-proxy, + translation-proxy-old-share + ] + steps: + - name: Aggregate Results + run: | + if [ "${{ needs.bad-pool-config-test.result }}" != "success" ] || + [ "${{ needs.interop-jd-translator.result }}" != "success" ] || + [ "${{ needs.interop-proxy-with-multi-ups.result }}" != "success" ] || + [ "${{ needs.interop-proxy-with-multi-ups-extended.result }}" != "success" ] || + [ "${{ needs.jds-do-not-fail-on-wrong-tsdatasucc.result }}" != "success" ] || + [ "${{ needs.jds-do-not-panic-if-jdc-close-connection.result }}" != "success" ] || + [ "${{ needs.jds-do-not-stackoverflow-when-no-token.result }}" != "success" ] || + [ "${{ needs.jds-receive-solution-while-processing-declared-job.result }}" != "success" ] || + [ "${{ needs.pool-sri-test-1-standard.result }}" != "success" ] || + [ "${{ needs.pool-sri-test-close-channel.result }}" != "success" ] || + [ "${{ needs.pool-sri-test-extended_0.result }}" != "success" ] || + [ "${{ needs.pool-sri-test-extended_1.result }}" != "success" ] || + [ "${{ needs.pool-sri-test-reject-auth.result }}" != "success" ] || + [ "${{ needs.standard-coverage.result }}" != "success" ] || + [ "${{ needs.sv1-test.result }}" != "success" ] || + [ "${{ needs.translation-proxy-broke-pool.result }}" != "success" ] || + [ "${{ needs.translation-proxy.result }}" != "success" ] || + [ "${{ needs.translation-proxy-old-share.result }}" != "success" ]; then + echo "One or more jobs failed." + exit 1 + else + echo "All MG tests completed successfully" + fi \ No newline at end of file diff --git a/.github/workflows/release-bin.yaml b/.github/workflows/release-bin.yaml deleted file mode 100644 index 87f2eaa3e..000000000 --- a/.github/workflows/release-bin.yaml +++ /dev/null @@ -1,419 +0,0 @@ -# This workflow is used to create a new release with a binary distribution or SRI roles -# If the binary releases fails due to not having tags, force run the `autoversion` workflow -# on the main branch and merge the resulting PR to create the tags and move them to the main branch. - -name: Release Binaries - -on: - # Manually run by going to "Actions/Release" in Github and running the workflow - workflow_dispatch: - -jobs: - release_pool: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-20.04, macos-latest] - steps: - - uses: actions/checkout@v4 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Compile Native - run: cargo build --release --locked --manifest-path=roles/pool/Cargo.toml - - - name: Install cross - run: cargo install cross - - - name: Compile Binaries for aarch64-unknown-linux-gnu - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/pool/Cargo.toml --target aarch64-unknown-linux-gnu - - - name: Compile Binaries for arm-unknown-linux-gnueabi - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/pool/Cargo.toml --target arm-unknown-linux-gnueabi - - - name: Install aarch64-apple-darwin target - if: matrix.os == 'macos-latest' - run: rustup target add aarch64-apple-darwin - - - name: Compile MacOS ARM64 - if: matrix.os == 'macos-latest' - run: cargo build --release --locked --manifest-path=roles/pool/Cargo.toml --target=aarch64-apple-darwin - - - name: Upload Linux x86-64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/pool_sv2 - asset_name: pool-sv2-${{ env.RELEASE_VERSION }}-x86_64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS x86-64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/pool_sv2 - asset_name: pool-sv2-${{ env.RELEASE_VERSION }}-x86_64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux aarch64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-unknown-linux-gnu/release/pool_sv2 - asset_name: pool-sv2-${{ env.RELEASE_VERSION }}-aarch64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux ARM binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/arm-unknown-linux-gnueabi/release/pool_sv2 - asset_name: pool-sv2-${{ env.RELEASE_VERSION }}-arm-linux-gnueabi - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS ARM64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-apple-darwin/release/pool_sv2 - asset_name: pool-sv2-${{ env.RELEASE_VERSION }}-aarch64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - release_jdc: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-20.04, macos-latest ] - steps: - - uses: actions/checkout@v4 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - name: Compile Native - run: cargo build --release --locked --manifest-path=roles/jd-client/Cargo.toml - - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Install cross - run: cargo install cross - - - name: Compile Binaries for aarch64-unknown-linux-gnu - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/jd-client/Cargo.toml --target aarch64-unknown-linux-gnu - - - name: Compile Binaries for arm-unknown-linux-gnueabi - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/jd-client/Cargo.toml --target arm-unknown-linux-gnueabi - - - name: Install aarch64-apple-darwin target - if: matrix.os == 'macos-latest' - run: rustup target add aarch64-apple-darwin - - - name: Compile MacOS ARM64 - if: matrix.os == 'macos-latest' - run: cargo build --release --locked --manifest-path=roles/jd-client/Cargo.toml --target=aarch64-apple-darwin - - - name: Upload Linux x86-64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/jd_client - asset_name: jd-client-sv2-${{ env.RELEASE_VERSION }}-x86_64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux aarch64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-unknown-linux-gnu/release/jd_client - asset_name: jd-client-sv2-${{ env.RELEASE_VERSION }}-aarch64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux ARM binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/arm-unknown-linux-gnueabi/release/jd_client - asset_name: jd-client-sv2-${{ env.RELEASE_VERSION }}-arm-linux-gnueabi - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS x86-64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/jd_client - asset_name: jd-client-sv2-${{ env.RELEASE_VERSION }}-x86_64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS ARM64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-apple-darwin/release/jd_client - asset_name: jd-client-sv2-${{ env.RELEASE_VERSION }}-aarch64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - release_jds: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-20.04, macos-latest ] - steps: - - uses: actions/checkout@v4 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Compile Native - run: cargo build --release --locked --manifest-path=roles/jd-server/Cargo.toml - - - name: Install cross - run: cargo install cross - - - name: Compile Binaries for aarch64-unknown-linux-gnu - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/jd-server/Cargo.toml --target aarch64-unknown-linux-gnu - - - name: Compile Binaries for arm-unknown-linux-gnueabi - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/jd-server/Cargo.toml --target arm-unknown-linux-gnueabi - - - name: Install aarch64-apple-darwin target - if: matrix.os == 'macos-latest' - run: rustup target add aarch64-apple-darwin - - - name: Compile MacOS ARM64 - if: matrix.os == 'macos-latest' - run: cargo build --release --locked --manifest-path=roles/jd-server/Cargo.toml --target=aarch64-apple-darwin - - - name: Upload Linux x86-64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/jd_server - asset_name: jd-server-sv2-${{ env.RELEASE_VERSION }}-x86_64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux aarch64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-unknown-linux-gnu/release/jd_server - asset_name: jd-server-sv2-${{ env.RELEASE_VERSION }}-aarch64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux ARM binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/arm-unknown-linux-gnueabi/release/jd_server - asset_name: jd-server-sv2-${{ env.RELEASE_VERSION }}-arm-linux-gnueabi - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS x86-64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/jd_server - asset_name: jd-server-sv2-${{ env.RELEASE_VERSION }}-x86_64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS ARM64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-apple-darwin/release/jd_server - asset_name: jd-server-sv2-${{ env.RELEASE_VERSION }}-aarch64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - release_proxy: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-20.04, macos-latest ] - steps: - - uses: actions/checkout@v4 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Compile Native - run: cargo build --release --locked --manifest-path=roles/mining-proxy/Cargo.toml - - - name: Install cross - run: cargo install cross - - - name: Compile Binaries for aarch64-unknown-linux-gnu - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/mining-proxy/Cargo.toml --target aarch64-unknown-linux-gnu - - - name: Compile Binaries for arm-unknown-linux-gnueabi - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/mining-proxy/Cargo.toml --target arm-unknown-linux-gnueabi - - - name: Install aarch64-apple-darwin target - if: matrix.os == 'macos-latest' - run: rustup target add aarch64-apple-darwin - - - name: Compile MacOS ARM64 - if: matrix.os == 'macos-latest' - run: cargo build --release --locked --manifest-path=roles/mining-proxy/Cargo.toml --target=aarch64-apple-darwin - - - name: Upload Linux x86-64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/mining_proxy_sv2 - asset_name: mining-proxy-sv2-${{ env.RELEASE_VERSION }}-x86_64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux aarch64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-unknown-linux-gnu/release/mining_proxy_sv2 - asset_name: mining-proxy-sv2-${{ env.RELEASE_VERSION }}-aarch64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux ARM binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/arm-unknown-linux-gnueabi/release/mining_proxy_sv2 - asset_name: mining-proxy-sv2-${{ env.RELEASE_VERSION }}-arm-linux-gnueabi - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS x86-64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/mining_proxy_sv2 - asset_name: mining-proxy-sv2-${{ env.RELEASE_VERSION }}-x86_64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS ARM64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-apple-darwin/release/mining_proxy_sv2 - asset_name: mining-proxy-sv2-${{ env.RELEASE_VERSION }}-aarch64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - release_translator: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-20.04, macos-latest ] - steps: - - uses: actions/checkout@v4 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - - name: Compile Native - run: cargo build --release --locked --manifest-path=roles/translator/Cargo.toml - - - name: Install cross - run: cargo install cross - - - name: Compile Binaries for aarch64-unknown-linux-gnu - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/translator/Cargo.toml --target aarch64-unknown-linux-gnu - - - name: Compile Binaries for arm-unknown-linux-gnueabi - if: matrix.os == 'ubuntu-20.04' - run: cross build --release --locked --manifest-path=roles/translator/Cargo.toml --target arm-unknown-linux-gnueabi - - - name: Install aarch64-apple-darwin target - if: matrix.os == 'macos-latest' - run: rustup target add aarch64-apple-darwin - - - name: Compile MacOS ARM64 - if: matrix.os == 'macos-latest' - run: cargo build --release --locked --manifest-path=roles/translator/Cargo.toml --target=aarch64-apple-darwin - - - name: Upload Linux x86-64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/translator_sv2 - asset_name: translator-sv2-${{ env.RELEASE_VERSION }}-x86_64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux aarch64 binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-unknown-linux-gnu/release/translator_sv2 - asset_name: translator-sv2-${{ env.RELEASE_VERSION }}-aarch64-linux-gnu - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload Linux ARM binaries to release - if: matrix.os == 'ubuntu-20.04' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/arm-unknown-linux-gnueabi/release/translator_sv2 - asset_name: translator-sv2-${{ env.RELEASE_VERSION }}-arm-linux-gnueabi - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS x86-64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/release/translator_sv2 - asset_name: translator-sv2-${{ env.RELEASE_VERSION }}-x86_64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} - - - name: Upload MacOS ARM64 binaries to release - if: matrix.os == 'macos-latest' - uses: svenstaro/upload-release-action@v2 - with: - repo_token: ${{ secrets.GITHUB_TOKEN }} - file: roles/target/aarch64-apple-darwin/release/translator_sv2 - asset_name: translator-sv2-${{ env.RELEASE_VERSION }}-aarch64-apple-darwin - tag: ${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release-libs.yaml b/.github/workflows/release-libs.yaml index ac7883dba..24e925196 100644 --- a/.github/workflows/release-libs.yaml +++ b/.github/workflows/release-libs.yaml @@ -11,7 +11,7 @@ name: Release Libs on: - pull_request: + push: branches: - main @@ -22,14 +22,6 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - - name: Run check-versioning-lib-release.sh - run: | - ./check-versioning-lib-release.sh - if [ $? -eq 1 ]; then - echo "Script returned exit code 1, halting the workflow" - exit 1 - fi - - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: diff --git a/.github/workflows/run-and-track-benchmarks-on-main.yaml b/.github/workflows/run-and-track-benchmarks-on-main.yaml index 54a21b9e1..33be08bc5 100644 --- a/.github/workflows/run-and-track-benchmarks-on-main.yaml +++ b/.github/workflows/run-and-track-benchmarks-on-main.yaml @@ -76,7 +76,7 @@ jobs: toolchain: 1.75.0 override: true - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Install Valgrind run: | sudo apt-get update diff --git a/.github/workflows/rust-msrv.yaml b/.github/workflows/rust-msrv.yaml new file mode 100644 index 000000000..3384e4a2f --- /dev/null +++ b/.github/workflows/rust-msrv.yaml @@ -0,0 +1,37 @@ +on: + push: + branches: + - main + - dev + pull_request: + branches: + - main + - dev + +name: MSRV 1.75 Check + +jobs: + + build: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + rust: + - 1.75.0 # MSRV + + steps: + - uses: actions/checkout@v2 + - uses: Swatinem/rust-cache@v1.2.0 + - uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.rust }} + override: true + - name: Build Benches + run: cargo build --manifest-path=benches/Cargo.toml + - name: Build Protocols + run: cargo build --manifest-path=protocols/Cargo.toml + - name: Build Roles + run: cargo build --manifest-path=roles/Cargo.toml + - name: Build Utils + run: cargo build --manifest-path=utils/Cargo.toml diff --git a/.github/workflows/semver-check.yaml b/.github/workflows/semver-check.yaml new file mode 100644 index 000000000..9a2a45ad3 --- /dev/null +++ b/.github/workflows/semver-check.yaml @@ -0,0 +1,131 @@ +name: Semver Check + +on: + push: + branches: + - "main" + - "dev" + pull_request: + branches: + - "main" + - "dev" + +jobs: + semver-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + override: true + + - name: Cache Cargo registry + uses: actions/cache@v2 + with: + path: ~/.cargo/registry + key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-registry- + + - name: Cache Cargo index + uses: actions/cache@v2 + with: + path: ~/.cargo/git + key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo-index- + + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y cmake + + - name: Install cargo-semver-checks + run: cargo install cargo-semver-checks --locked + + - name: Run semver checks for common + working-directory: common + run: cargo semver-checks + + - name: Run semver checks for utils/buffer + working-directory: utils/buffer + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/binary-sv2/no-serde-sv2/codec + working-directory: protocols/v2/binary-sv2/no-serde-sv2/codec + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/binary-sv2/serde-sv2 + working-directory: protocols/v2/binary-sv2/serde-sv2 + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/binary-sv2/binary-sv2 + working-directory: protocols/v2/binary-sv2/binary-sv2 + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/const-sv2 + working-directory: protocols/v2/const-sv2 + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/framing-sv2 + working-directory: protocols/v2/framing-sv2 + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/noise-sv2 + working-directory: protocols/v2/noise-sv2 + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/codec-sv2 + working-directory: protocols/v2/codec-sv2 + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/subprotocols/common-messages + working-directory: protocols/v2/subprotocols/common-messages + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/subprotocols/job-declaration + working-directory: protocols/v2/subprotocols/job-declaration + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/subprotocols/mining + working-directory: protocols/v2/subprotocols/mining + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/subprotocols/template-distribution + working-directory: protocols/v2/subprotocols/template-distribution + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/sv2-ffi + working-directory: protocols/v2/sv2-ffi + run: cargo semver-checks + + - name: Run semver checks for protocols/v2/roles-logic-sv2 + working-directory: protocols/v2/roles-logic-sv2 + run: cargo semver-checks --default-features + + - name: Run semver checks for protocols/v1 + working-directory: protocols/v1 + run: cargo semver-checks + + - name: Run semver checks for utils/bip32-key-derivation + working-directory: utils/bip32-key-derivation + run: cargo semver-checks + + - name: Run semver checks for utils/error-handling + working-directory: utils/error-handling + run: cargo semver-checks + + - name: Run semver checks for utils/key-utils + working-directory: utils/key-utils + run: cargo semver-checks + + - name: Run semver checks for roles/roles-utils/network-helpers + working-directory: roles/roles-utils/network-helpers + run: cargo semver-checks + + - name: Run semver checks for roles/roles-utils/rpc + working-directory: roles/roles-utils/rpc + run: cargo semver-checks \ No newline at end of file diff --git a/.github/workflows/sv2-header-check.yaml b/.github/workflows/sv2-header-check.yaml index 800256ef7..732bd316b 100644 --- a/.github/workflows/sv2-header-check.yaml +++ b/.github/workflows/sv2-header-check.yaml @@ -19,7 +19,7 @@ jobs: target: x86_64-unknown-linux-musl steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: actions-rs/toolchain@v1 with: @@ -29,4 +29,4 @@ jobs: - name: Check sv2 header file is up to date with commit run: | echo Check sv2 header file is up to date with commit - sh ./sv2-header-check.sh + sh ./scripts/sv2-header-check.sh diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 78c5cf37f..5b669ebc0 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -26,7 +26,7 @@ jobs: steps: - name: Install stable toolchain & components - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: profile: minimal toolchain: nightly diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3a70a6a3e..baf13845a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -83,6 +83,8 @@ The SRI project follows an open contributor model, where anyone is welcome to co 2. **Create a Branch** 3. **Commit Your Changes** + + **Note:** Commits should cover both the issue fixed and the solution's rationale. These [guidelines](https://chris.beams.io/posts/git-commit/) should be kept in mind. 4. **Run Tests, Clippy, and Formatter:** diff --git a/INTEROPERABILITY-TESTS.md b/INTEROPERABILITY-TESTS.md index 8791eff81..a78d08564 100644 --- a/INTEROPERABILITY-TESTS.md +++ b/INTEROPERABILITY-TESTS.md @@ -2,6 +2,10 @@ How to test Sv2 compliant software against the SRI implementation. +## Requirements + +- [Cargo LLVM Cov](https://github.com/taiki-e/cargo-llvm-cov#installation) + ## With Message Generator (MG) First thing you need to write a test that can be executed by the message generator. In order to do diff --git a/README.md b/README.md index 71df3afc7..b1b2cd119 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,12 @@ Our roadmap is publicly available, outlining current and future plans. Decisions [View the SRI Roadmap](https://github.com/orgs/stratum-mining/projects/5) +### 🏅 Project Maturity + +Low-level crates (`protocols` directory) are considered **beta** software. Rust API Docs is a [work-in-progress](https://github.com/stratum-mining/stratum/issues/845), and the community should still expect small breaking API changes and patches. + +Application-level crates (`roles` directory) are considered **alpha** software, and bugs are expected. They should be used as a guide on how to consume the low-level crates as dependencies. + ### 🎯 Goals The goals of this project are to provide: diff --git a/benches/Cargo.lock b/benches/Cargo.lock index 946b7c02b..396474838 100644 --- a/benches/Cargo.lock +++ b/benches/Cargo.lock @@ -373,7 +373,7 @@ dependencies = [ [[package]] name = "buffer_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "aes-gcm", ] @@ -497,7 +497,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "codec_sv2" -version = "1.1.0" +version = "1.2.0" dependencies = [ "binary_sv2", "buffer_sv2", @@ -1564,7 +1564,7 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "sv1_api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "binary_sv2", "bitcoin_hashes 0.3.2", diff --git a/benches/Cargo.toml b/benches/Cargo.toml index ecba50ab8..525c526e5 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -12,7 +12,7 @@ serde_json = { version = "1.0.64", default-features = false, features = ["alloc" iai="0.1" mining_sv2 = { path = "../protocols/v2/subprotocols/mining", version = "^1.0.0" } roles_logic_sv2 = { path = "../protocols/v2/roles-logic-sv2", version = "^1.0.0" } -framing_sv2 = { version = "1.1.0", path = "../protocols/v2/framing-sv2" } +framing_sv2 = { version = "2.0.0", path = "../protocols/v2/framing-sv2" } serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } num-bigint = "0.4.3" num-traits = "0.2.15" diff --git a/benches/benches/src/sv2/criterion_sv2_benchmark.rs b/benches/benches/src/sv2/criterion_sv2_benchmark.rs index 7aa35f158..18fab853d 100644 --- a/benches/benches/src/sv2/criterion_sv2_benchmark.rs +++ b/benches/benches/src/sv2/criterion_sv2_benchmark.rs @@ -1,4 +1,4 @@ -use codec_sv2::{Frame, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; use criterion::{black_box, Criterion}; use roles_logic_sv2::{ handlers::{common::ParseUpstreamCommonMessages, mining::ParseUpstreamMiningMessages}, diff --git a/benches/benches/src/sv2/iai_sv2_benchmark.rs b/benches/benches/src/sv2/iai_sv2_benchmark.rs index 2cab39cc4..b049b9dc4 100644 --- a/benches/benches/src/sv2/iai_sv2_benchmark.rs +++ b/benches/benches/src/sv2/iai_sv2_benchmark.rs @@ -1,4 +1,4 @@ -use codec_sv2::{Frame, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; use iai::{black_box, main}; use roles_logic_sv2::{ handlers::{common::ParseUpstreamCommonMessages, mining::ParseUpstreamMiningMessages, SendTo_}, diff --git a/build_header.sh b/build_header.sh deleted file mode 100755 index 3d798731b..000000000 --- a/build_header.sh +++ /dev/null @@ -1,17 +0,0 @@ -#! /bin/sh - -cargo install --version 0.20.0 cbindgen - -rm -f ./sv2.h -touch ./sv2.h - -dir=${1:-protocols} - -cd "$dir" - cbindgen --crate const_sv2 >> ../sv2.h - cbindgen --crate binary_codec_sv2 >> ../sv2.h - cbindgen --crate common_messages_sv2 >> ../sv2.h - cbindgen --crate template_distribution_sv2 >> ../sv2.h - cbindgen --crate codec_sv2 >> ../sv2.h - cbindgen --crate sv2_ffi >> ../sv2.h -cd .. diff --git a/check-versioning-lib-release.sh b/check-versioning-lib-release.sh deleted file mode 100755 index 5fdf6eb91..000000000 --- a/check-versioning-lib-release.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -git fetch --all - -crates=( -"utils/buffer" -"protocols/v2/binary-sv2/no-serde-sv2/derive_codec" -"protocols/v2/binary-sv2/no-serde-sv2/codec" -"protocols/v2/binary-sv2/serde-sv2" -"protocols/v2/binary-sv2/binary-sv2" -"protocols/v2/const-sv2" -"protocols/v2/framing-sv2" -"protocols/v2/noise-sv2" -"protocols/v2/codec-sv2" -"protocols/v2/subprotocols/common-messages" -"protocols/v2/subprotocols/job-declaration" -"protocols/v2/subprotocols/mining" -"protocols/v2/subprotocols/template-distribution" -"protocols/v2/sv2-ffi" -"protocols/v2/roles-logic-sv2" -"protocols/v1" -"utils/bip32-key-derivation" -"utils/error-handling" -"utils/key-utils" -"roles/roles-utils/network-helpers" -"roles/roles-utils/rpc" -"roles/jd-client" -"roles/jd-server" -"roles/mining-proxy" -"roles/pool" -"roles/translator" -) - -# Loop through each crate -for crate in "${crates[@]}"; do - cd "$crate" - - # Check if the branches exist locally, if not, create them - git show-ref --verify --quiet refs/remotes/origin/main || { echo "Branch 'main' not found."; exit 1; } - git show-ref --verify --quiet refs/remotes/origin/dev || { echo "Branch 'dev' not found."; exit 1; } - - # Check if there were any changes between dev and main - git diff --quiet "origin/dev" "origin/main" -- . - if [ $? -ne 0 ]; then - - # Check if crate versions on dev and main are identical - version_dev=$(git show origin/dev:./Cargo.toml | awk -F' = ' '$1 == "version" {gsub(/[ "]+/, "", $2); print $2}') - version_main=$(git show origin/main:./Cargo.toml | awk -F' = ' '$1 == "version" {gsub(/[ "]+/, "", $2); print $2}') - if [ "$version_dev" = "$version_main" ]; then - # this prevents the release PR from being merged, since we do `exit 1`, effectively stopping the Github CI - echo "Changes detected in crate $crate between dev and main branches! Versions on dev and main branches are identical ($version_dev), so you should bump the crate version on dev before merging into main." - exit 1 - else - # this creates a log of version changes, useful for release logs - echo "Changes detected in crate $crate between dev and main branches! Version in dev is: ($version_dev), while version in main is ($version_main)." - fi - fi - - cd - >/dev/null -done \ No newline at end of file diff --git a/examples/interop-cpp/README.md b/examples/interop-cpp/README.md index fff3df9f2..779b2fb37 100644 --- a/examples/interop-cpp/README.md +++ b/examples/interop-cpp/README.md @@ -207,7 +207,7 @@ installation phase of `sv2_ffi` is replaced and `sv2.h` and the newly built `lib in the container (they are installed in `/gnu/store/[hash]-Rust-sv2_ffi-[version]/`). The manifest it expect to find `sv2.h` in the `sv2_ffi` package. Since the `sv2.h` is created manually with -`/build_header.sh`, it is very easy to commit code with an out of date header file. To ensure all commits include +`scripts/build_header.sh`, it is very easy to commit code with an out of date header file. To ensure all commits include the most updated header file, a GitHub Actions check is planned to be added. ## Install cbindgen diff --git a/examples/interop-cpp/run.sh b/examples/interop-cpp/run.sh index fe0dbdd18..31f914e5d 100755 --- a/examples/interop-cpp/run.sh +++ b/examples/interop-cpp/run.sh @@ -14,7 +14,7 @@ cargo build \ -p sv2_ffi && \ cp ../../protocols/target/release/libsv2_ffi.a ./ -../../build_header.sh ../../protocols && mv ../../sv2.h . +../../scripts/build_header.sh ../../protocols && mv ../../scripts/sv2.h . g++ -I ./ ./template-provider/template-provider.cpp libsv2_ffi.a -lpthread -ldl diff --git a/examples/interop-cpp/src/main.rs b/examples/interop-cpp/src/main.rs index 34f6bef09..09950e94b 100644 --- a/examples/interop-cpp/src/main.rs +++ b/examples/interop-cpp/src/main.rs @@ -12,7 +12,7 @@ mod main_ { #[cfg(not(feature = "with_serde"))] mod main_ { - use codec_sv2::{Encoder, Frame, StandardDecoder, StandardSv2Frame}; + use codec_sv2::{Encoder, StandardDecoder, StandardSv2Frame}; use common_messages_sv2::{Protocol, SetupConnection, SetupConnectionError}; use const_sv2::{ CHANNEL_BIT_SETUP_CONNECTION, MESSAGE_TYPE_SETUP_CONNECTION, diff --git a/examples/ping-pong-with-noise/src/node.rs b/examples/ping-pong-with-noise/src/node.rs index 912d6e835..1ae042aa8 100644 --- a/examples/ping-pong-with-noise/src/node.rs +++ b/examples/ping-pong-with-noise/src/node.rs @@ -11,7 +11,7 @@ use async_std::{ }; use core::convert::TryInto; -use codec_sv2::{Frame, HandshakeRole, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{HandshakeRole, StandardEitherFrame, StandardSv2Frame}; use std::time; diff --git a/examples/ping-pong-without-noise/src/node.rs b/examples/ping-pong-without-noise/src/node.rs index 97295d591..21edf617e 100644 --- a/examples/ping-pong-without-noise/src/node.rs +++ b/examples/ping-pong-without-noise/src/node.rs @@ -10,7 +10,7 @@ use async_std::{ task, }; -use codec_sv2::{Frame, StandardDecoder, StandardSv2Frame}; +use codec_sv2::{StandardDecoder, StandardSv2Frame}; #[derive(Debug)] enum Expected { diff --git a/examples/template-provider-test/src/main.rs b/examples/template-provider-test/src/main.rs index 78878227b..5a83aa39a 100644 --- a/examples/template-provider-test/src/main.rs +++ b/examples/template-provider-test/src/main.rs @@ -1,6 +1,6 @@ use async_channel::{Receiver, Sender}; use async_std::net::TcpStream; -use codec_sv2::{Frame, StandardEitherFrame, StandardSv2Frame, Sv2Frame}; +use codec_sv2::{StandardEitherFrame, StandardSv2Frame, Sv2Frame}; use network_helpers::PlainConnection; use roles_logic_sv2::{ parsers::{IsSv2Message, TemplateDistribution}, diff --git a/protocols/Cargo.lock b/protocols/Cargo.lock index 9fdf24f21..03934402e 100644 --- a/protocols/Cargo.lock +++ b/protocols/Cargo.lock @@ -77,7 +77,7 @@ checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" [[package]] name = "binary_codec_sv2" -version = "1.0.0" +version = "1.2.0" dependencies = [ "buffer_sv2", "quickcheck", @@ -85,7 +85,7 @@ dependencies = [ [[package]] name = "binary_sv2" -version = "1.0.0" +version = "1.2.0" dependencies = [ "binary_codec_sv2", "derive_codec_sv2", @@ -138,7 +138,7 @@ dependencies = [ [[package]] name = "buffer_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "aes-gcm", "serde", @@ -152,9 +152,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "cc" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" [[package]] name = "cfg-if" @@ -199,7 +199,7 @@ dependencies = [ [[package]] name = "codec_sv2" -version = "1.1.0" +version = "1.2.1" dependencies = [ "binary_sv2", "buffer_sv2", @@ -212,7 +212,7 @@ dependencies = [ [[package]] name = "common_messages_sv2" -version = "1.0.0" +version = "2.0.0" dependencies = [ "binary_sv2", "const_sv2", @@ -224,7 +224,7 @@ dependencies = [ [[package]] name = "const_sv2" -version = "1.0.0" +version = "2.0.0" dependencies = [ "secp256k1 0.28.2", ] @@ -260,7 +260,7 @@ dependencies = [ [[package]] name = "derive_codec_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "binary_codec_sv2", ] @@ -290,7 +290,7 @@ dependencies = [ [[package]] name = "framing_sv2" -version = "1.1.0" +version = "2.0.0" dependencies = [ "binary_sv2", "buffer_sv2", @@ -356,9 +356,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-conservative" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" +checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" [[package]] name = "humantime" @@ -395,9 +395,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.154" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "log" @@ -501,9 +501,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] @@ -606,7 +606,7 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "roles_logic_sv2" -version = "1.1.0" +version = "1.2.1" dependencies = [ "binary_sv2", "chacha20poly1305", @@ -674,22 +674,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.201" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.201" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.62", + "syn 2.0.66", ] [[package]] @@ -711,12 +711,12 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.62", + "syn 2.0.66", ] [[package]] name = "serde_sv2" -version = "1.0.0" +version = "1.0.1" dependencies = [ "buffer_sv2", "serde", @@ -744,7 +744,7 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "sv1_api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "binary_sv2", "bitcoin_hashes 0.3.2", @@ -785,9 +785,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.62" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f660c3bfcefb88c538776b6685a0c472e3128b51e74d48793dc2a488196e8eb" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -796,7 +796,7 @@ dependencies = [ [[package]] name = "template_distribution_sv2" -version = "1.0.0" +version = "1.0.2" dependencies = [ "binary_sv2", "const_sv2", @@ -842,7 +842,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.62", + "syn 2.0.66", ] [[package]] @@ -994,6 +994,6 @@ checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" diff --git a/protocols/Cargo.toml b/protocols/Cargo.toml index 90d917fdd..d1f21ca36 100644 --- a/protocols/Cargo.toml +++ b/protocols/Cargo.toml @@ -2,7 +2,6 @@ name = "stratum_v2_protocols" version = "1.0.0" authors = ["The Stratum v2 Developers"] edition = "2021" -rust-version = "1.75.0" description = "The Stratum protocol defines how miners, proxies, and pools communicate to contribute hashrate to the Bitcoin network. Stratum v2 is a robust set of primitives which anyone can use to expand the protocol or implement a role." documentation = "https://github.com/stratum-mining/stratum" readme = "README.md" diff --git a/protocols/fuzz-tests/src/main.rs b/protocols/fuzz-tests/src/main.rs index c9623ecb5..bb2364c0f 100644 --- a/protocols/fuzz-tests/src/main.rs +++ b/protocols/fuzz-tests/src/main.rs @@ -2,7 +2,7 @@ use libfuzzer_sys::fuzz_target; use binary_codec_sv2::{Seq064K,U256,B0255,Seq0255}; use binary_codec_sv2::from_bytes; -use codec_sv2::{StandardDecoder,Sv2Frame,Frame}; +use codec_sv2::{StandardDecoder,Sv2Frame}; use roles_logic_sv2::parsers::PoolMessages; type F = Sv2Frame,Vec>; diff --git a/protocols/v1/Cargo.toml b/protocols/v1/Cargo.toml index 1a665b3c1..0e7bd1826 100644 --- a/protocols/v1/Cargo.toml +++ b/protocols/v1/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sv1_api" -version = "1.0.0" +version = "1.0.1" authors = ["user"] edition = "2018" description = "API for bridging SV1 miners to SV2 pools" diff --git a/protocols/v1/src/methods/client_to_server.rs b/protocols/v1/src/methods/client_to_server.rs index 0e837db48..669598eec 100644 --- a/protocols/v1/src/methods/client_to_server.rs +++ b/protocols/v1/src/methods/client_to_server.rs @@ -319,6 +319,8 @@ impl<'a> TryFrom for Subscribe<'a> { let (agent_signature, extranonce1) = match ¶ms[..] { // bosminer subscribe message [JString(a), Null, JString(_), Null] => (a.into(), None), + // bosminer subscribe message + [JString(a), Null] => (a.into(), None), [JString(a), JString(b)] => { (a.into(), Some(Extranonce::try_from(hex::decode(b)?)?)) } diff --git a/protocols/v2/binary-sv2/binary-sv2/Cargo.toml b/protocols/v2/binary-sv2/binary-sv2/Cargo.toml index ae7849aa9..922d491a2 100644 --- a/protocols/v2/binary-sv2/binary-sv2/Cargo.toml +++ b/protocols/v2/binary-sv2/binary-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "binary_sv2" -version = "1.0.0" +version = "1.2.0" authors = ["fi3 "] edition = "2018" description = "Sv2 data format" diff --git a/protocols/v2/binary-sv2/no-serde-sv2/codec/Cargo.toml b/protocols/v2/binary-sv2/no-serde-sv2/codec/Cargo.toml index 5f1f83e12..5394f336c 100644 --- a/protocols/v2/binary-sv2/no-serde-sv2/codec/Cargo.toml +++ b/protocols/v2/binary-sv2/no-serde-sv2/codec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "binary_codec_sv2" -version = "1.0.0" +version = "1.2.0" authors = ["fi3 "] edition = "2018" description = "Sv2 data format" diff --git a/protocols/v2/binary-sv2/no-serde-sv2/codec/src/lib.rs b/protocols/v2/binary-sv2/no-serde-sv2/codec/src/lib.rs index 8f7d96e62..929fb07a0 100644 --- a/protocols/v2/binary-sv2/no-serde-sv2/codec/src/lib.rs +++ b/protocols/v2/binary-sv2/no-serde-sv2/codec/src/lib.rs @@ -25,14 +25,14 @@ use std::io::{Error as E, ErrorKind}; mod codec; mod datatypes; pub use datatypes::{ - PubKey, Seq0255, Seq064K, ShortTxId, Signature, Str0255, Sv2Option, U32AsRef, B016M, B0255, - B032, B064K, U24, U256, + PubKey, Seq0255, Seq064K, ShortTxId, Signature, Str0255, Sv2DataType, Sv2Option, U32AsRef, + B016M, B0255, B032, B064K, U24, U256, }; pub use crate::codec::{ - decodable::Decodable, + decodable::{Decodable, GetMarker}, encodable::{Encodable, EncodableField}, - GetSize, SizeHint, + Fixed, GetSize, SizeHint, }; #[allow(clippy::wrong_self_convention)] @@ -58,7 +58,7 @@ pub mod decodable { } pub mod encodable { - pub use crate::codec::encodable::{Encodable, EncodableField}; + pub use crate::codec::encodable::{Encodable, EncodableField, EncodablePrimitive}; } #[macro_use] diff --git a/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/Cargo.toml b/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/Cargo.toml index 62e62bd9c..93f202d8b 100644 --- a/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/Cargo.toml +++ b/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "derive_codec_sv2" -version = "1.0.0" +version = "1.1.0" authors = ["fi3 "] edition = "2018" description = "Derive macro for Sv2 binary format serializer and deserializer" diff --git a/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/src/lib.rs b/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/src/lib.rs index 6821abb6a..aef883ba1 100644 --- a/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/src/lib.rs +++ b/protocols/v2/binary-sv2/no-serde-sv2/derive_codec/src/lib.rs @@ -2,6 +2,24 @@ extern crate proc_macro; use core::iter::FromIterator; use proc_macro::{Group, TokenStream, TokenTree}; +fn is_already_sized(item: TokenStream) -> bool { + let stream = item.into_iter(); + + for next in stream { + if let TokenTree::Group(g) = next.clone() { + if g.delimiter() == proc_macro::Delimiter::Bracket { + for t in g.stream().into_iter() { + if let TokenTree::Ident(i) = t { + if i.to_string() == "already_sized" { + return true; + } + } + } + } + } + } + false +} fn remove_attributes(item: TokenStream) -> TokenStream { let stream = item.into_iter(); let mut is_attribute = false; @@ -356,8 +374,9 @@ fn get_static_generics(gen: &str) -> &str { } } -#[proc_macro_derive(Encodable)] +#[proc_macro_derive(Encodable, attributes(already_sized))] pub fn encodable(item: TokenStream) -> TokenStream { + let is_already_sized = is_already_sized(item.clone()); let parsed_struct = get_struct_properties(item); let fields = parsed_struct.fields.clone(); @@ -392,6 +411,23 @@ pub fn encodable(item: TokenStream) -> TokenStream { "<'decoder>".to_string() }; + let get_size = if is_already_sized { + String::new() + } else { + format!( + " + impl{} GetSize for {}{} {{ + fn get_size(&self) -> usize {{ + let mut size = 0; + {} + size + }} + }} + ", + impl_generics, parsed_struct.name, parsed_struct.generics, sizes + ) + }; + let result = format!( "mod impl_parse_encodable_{} {{ @@ -408,14 +444,7 @@ pub fn encodable(item: TokenStream) -> TokenStream { }} }} - - impl{} GetSize for {}{} {{ - fn get_size(&self) -> usize {{ - let mut size = 0; - {} - size - }} - }} + {} }}", // imports @@ -428,16 +457,8 @@ pub fn encodable(item: TokenStream) -> TokenStream { parsed_struct.name, parsed_struct.generics, field_into_decoded_field, - // impl Encodable for Struct - //impl{} Encodable<'decoder> for {}{} {{}} - //impl_generics, - //parsed_struct.name, - //parsed_struct.generics, - // impl GetSize for Struct - impl_generics, - parsed_struct.name, - parsed_struct.generics, - sizes, + // impl get_size + get_size, ); //println!("{}", result); diff --git a/protocols/v2/binary-sv2/serde-sv2/Cargo.toml b/protocols/v2/binary-sv2/serde-sv2/Cargo.toml index 5e01eb2d3..2dcc744c8 100644 --- a/protocols/v2/binary-sv2/serde-sv2/Cargo.toml +++ b/protocols/v2/binary-sv2/serde-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "serde_sv2" -version = "1.0.0" +version = "1.0.1" authors = ["fi3 "] edition = "2018" description = "Serlializer and Deserializer for Stratum V2 data format" @@ -13,3 +13,6 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false } buffer_sv2 = {version = "^1.0.0", path = "../../../../utils/buffer"} + +[features] +no_std = [] diff --git a/protocols/v2/binary-sv2/serde-sv2/src/lib.rs b/protocols/v2/binary-sv2/serde-sv2/src/lib.rs index a573bf3bb..004a4dfce 100644 --- a/protocols/v2/binary-sv2/serde-sv2/src/lib.rs +++ b/protocols/v2/binary-sv2/serde-sv2/src/lib.rs @@ -72,7 +72,7 @@ //! [rkyv1]: https://docs.rs/rkyv/0.4.3/rkyv //! [rkyv2]: https://davidkoloski.me/blog/rkyv-is-faster-than/ -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] #[macro_use] extern crate alloc; diff --git a/protocols/v2/binary-sv2/serde-sv2/src/primitives/sequences/seq0255.rs b/protocols/v2/binary-sv2/serde-sv2/src/primitives/sequences/seq0255.rs index 2f01bce3c..331edb34e 100644 --- a/protocols/v2/binary-sv2/serde-sv2/src/primitives/sequences/seq0255.rs +++ b/protocols/v2/binary-sv2/serde-sv2/src/primitives/sequences/seq0255.rs @@ -259,6 +259,15 @@ impl<'a> From> for Seq0255<'a, u32> { } } +impl<'a> From>> for Seq0255<'a, U256<'a>> { + fn from(v: Vec>) -> Self { + Seq0255 { + seq: None, + data: Some(v), + } + } +} + impl<'a> From> for Vec { fn from(v: Seq0255) -> Self { if let Some(inner) = v.data { diff --git a/protocols/v2/codec-sv2/Cargo.toml b/protocols/v2/codec-sv2/Cargo.toml index 75645d56b..20b0288af 100644 --- a/protocols/v2/codec-sv2/Cargo.toml +++ b/protocols/v2/codec-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "codec_sv2" -version = "1.1.0" +version = "1.2.1" authors = ["fi3 "] edition = "2018" description = "Sv2 data format" @@ -9,10 +9,10 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", default-features = false, optional = true } -framing_sv2 = { version = "1.1.0", path = "../../../protocols/v2/framing-sv2" } +framing_sv2 = { version = "^2.0.0", path = "../../../protocols/v2/framing-sv2" } noise_sv2 = { version = "1.0", path = "../../../protocols/v2/noise-sv2", optional=true} binary_sv2 = { version = "1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2" } -const_sv2 = { version = "1.0.0", path = "../../../protocols/v2/const-sv2"} +const_sv2 = { version = "2.0.0", path = "../../../protocols/v2/const-sv2"} buffer_sv2 = { version = "1.0.0", path = "../../../utils/buffer"} tracing = { version = "0.1"} @@ -21,3 +21,4 @@ tracing = { version = "0.1"} [features] with_serde = ["binary_sv2/with_serde", "serde", "framing_sv2/with_serde", "buffer_sv2/with_serde"] with_buffer_pool = ["framing_sv2/with_buffer_pool"] +no_std = [] \ No newline at end of file diff --git a/protocols/v2/codec-sv2/src/decoder.rs b/protocols/v2/codec-sv2/src/decoder.rs index 760861cfc..4d1440018 100644 --- a/protocols/v2/codec-sv2/src/decoder.rs +++ b/protocols/v2/codec-sv2/src/decoder.rs @@ -8,11 +8,11 @@ pub use buffer_sv2::AeadBuffer; pub use const_sv2::{SV2_FRAME_CHUNK_SIZE, SV2_FRAME_HEADER_SIZE}; use core::marker::PhantomData; #[cfg(feature = "noise_sv2")] -use framing_sv2::framing2::HandShakeFrame; +use framing_sv2::framing::HandShakeFrame; #[cfg(feature = "noise_sv2")] -use framing_sv2::header::NoiseHeader; +use framing_sv2::header::{NOISE_HEADER_ENCRYPTED_SIZE, NOISE_HEADER_SIZE}; use framing_sv2::{ - framing2::{EitherFrame, Frame as F_, Sv2Frame}, + framing::{Frame, Sv2Frame}, header::Header, }; #[cfg(feature = "noise_sv2")] @@ -36,7 +36,7 @@ use crate::State; #[cfg(feature = "noise_sv2")] pub type StandardNoiseDecoder = WithNoise; -pub type StandardEitherFrame = EitherFrame::Slice>; +pub type StandardEitherFrame = Frame::Slice>; pub type StandardSv2Frame = Sv2Frame::Slice>; pub type StandardDecoder = WithoutNoise; @@ -51,14 +51,14 @@ pub struct WithNoise { #[cfg(feature = "noise_sv2")] impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> WithNoise { #[inline] - pub fn next_frame(&mut self, state: &mut State) -> Result> { + pub fn next_frame(&mut self, state: &mut State) -> Result> { match state { State::HandShake(_) => unreachable!(), State::NotInitialized(msg_len) => { let hint = *msg_len - self.noise_buffer.as_ref().len(); match hint { 0 => { - self.missing_noise_b = NoiseHeader::HEADER_SIZE; + self.missing_noise_b = NOISE_HEADER_SIZE; Ok(self.while_handshaking()) } _ => { @@ -71,20 +71,20 @@ impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> Wit let hint = if IsBuffer::len(&self.sv2_buffer) < SV2_FRAME_HEADER_SIZE { let len = IsBuffer::len(&self.noise_buffer); let src = self.noise_buffer.get_data_by_ref(len); - if src.len() < NoiseHeader::SIZE { - NoiseHeader::SIZE - src.len() + if src.len() < NOISE_HEADER_ENCRYPTED_SIZE { + NOISE_HEADER_ENCRYPTED_SIZE - src.len() } else { 0 } } else { - let src = self.sv2_buffer.get_data_by_ref_(SV2_FRAME_HEADER_SIZE); + let src = self.sv2_buffer.get_data_by_ref(SV2_FRAME_HEADER_SIZE); let header = Header::from_bytes(src)?; header.encrypted_len() - IsBuffer::len(&self.noise_buffer) }; match hint { 0 => { - self.missing_noise_b = NoiseHeader::SIZE; + self.missing_noise_b = NOISE_HEADER_ENCRYPTED_SIZE; self.decode_noise_frame(noise_codec) } _ => { @@ -97,23 +97,20 @@ impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> Wit } #[inline] - fn decode_noise_frame( - &mut self, - noise_codec: &mut NoiseCodec, - ) -> Result> { + fn decode_noise_frame(&mut self, noise_codec: &mut NoiseCodec) -> Result> { match ( IsBuffer::len(&self.noise_buffer), IsBuffer::len(&self.sv2_buffer), ) { // HERE THE SV2 HEADER IS READY TO BE DECRYPTED - (NoiseHeader::SIZE, 0) => { + (NOISE_HEADER_ENCRYPTED_SIZE, 0) => { let src = self.noise_buffer.get_data_owned(); - let decrypted_header = self.sv2_buffer.get_writable(NoiseHeader::SIZE); + let decrypted_header = self.sv2_buffer.get_writable(NOISE_HEADER_ENCRYPTED_SIZE); decrypted_header.copy_from_slice(src.as_ref()); self.sv2_buffer.as_ref(); noise_codec.decrypt(&mut self.sv2_buffer)?; let header = - Header::from_bytes(self.sv2_buffer.get_data_by_ref_(SV2_FRAME_HEADER_SIZE))?; + Header::from_bytes(self.sv2_buffer.get_data_by_ref(SV2_FRAME_HEADER_SIZE))?; self.missing_noise_b = header.encrypted_len(); Err(Error::MissingBytes(header.encrypted_len())) } @@ -135,7 +132,7 @@ impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> Wit let decrypted_payload = self.sv2_buffer.get_writable(end - start); decrypted_payload.copy_from_slice(&encrypted_payload.as_ref()[start..end]); self.sv2_buffer.danger_set_start(decrypted_len); - noise_codec.decrypt(&mut self.sv2_buffer).unwrap(); + noise_codec.decrypt(&mut self.sv2_buffer)?; start = end; end = (start + SV2_FRAME_CHUNK_SIZE).min(encrypted_payload_len); decrypted_len += self.sv2_buffer.as_ref().len(); @@ -148,7 +145,7 @@ impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> Wit } } - fn while_handshaking(&mut self) -> EitherFrame { + fn while_handshaking(&mut self) -> Frame { let src = self.noise_buffer.get_data_owned().as_mut().to_vec(); // below is inffalible as noise frame length has been already checked @@ -161,6 +158,9 @@ impl<'a, T: Serialize + GetSize + Deserialize<'a>, B: IsBuffer + AeadBuffer> Wit pub fn writable(&mut self) -> &mut [u8] { self.noise_buffer.get_writable(self.missing_noise_b) } + pub fn droppable(&self) -> bool { + self.noise_buffer.is_droppable() && self.sv2_buffer.is_droppable() + } } #[cfg(feature = "noise_sv2")] diff --git a/protocols/v2/codec-sv2/src/encoder.rs b/protocols/v2/codec-sv2/src/encoder.rs index d2855742f..21618fda5 100644 --- a/protocols/v2/codec-sv2/src/encoder.rs +++ b/protocols/v2/codec-sv2/src/encoder.rs @@ -5,11 +5,11 @@ pub use const_sv2::{AEAD_MAC_LEN, SV2_FRAME_CHUNK_SIZE, SV2_FRAME_HEADER_SIZE}; #[cfg(feature = "noise_sv2")] use core::convert::TryInto; use core::marker::PhantomData; +use framing_sv2::framing::Sv2Frame; #[cfg(feature = "noise_sv2")] -use framing_sv2::framing2::{EitherFrame, HandShakeFrame}; -use framing_sv2::framing2::{Frame as F_, Sv2Frame}; +use framing_sv2::framing::{Frame, HandShakeFrame}; #[allow(unused_imports)] -pub use framing_sv2::header::NoiseHeader; +pub use framing_sv2::header::NOISE_HEADER_ENCRYPTED_SIZE; #[cfg(feature = "noise_sv2")] use tracing::error; @@ -43,7 +43,7 @@ pub struct NoiseEncoder { } #[cfg(feature = "noise_sv2")] -type Item = EitherFrame; +type Item = Frame; #[cfg(feature = "noise_sv2")] impl NoiseEncoder { @@ -76,7 +76,7 @@ impl NoiseEncoder { } else { SV2_FRAME_CHUNK_SIZE + start - AEAD_MAC_LEN }; - let mut encrypted_len = NoiseHeader::SIZE; + let mut encrypted_len = NOISE_HEADER_ENCRYPTED_SIZE; while start < sv2.len() { let to_encrypt = self.noise_buffer.get_writable(end - start); @@ -113,6 +113,10 @@ impl NoiseEncoder { } Ok(()) } + + pub fn droppable(&self) -> bool { + self.noise_buffer.is_droppable() && self.sv2_buffer.is_droppable() + } } #[cfg(feature = "noise_sv2")] diff --git a/protocols/v2/codec-sv2/src/lib.rs b/protocols/v2/codec-sv2/src/lib.rs index f5cbc013d..a4eec1efd 100644 --- a/protocols/v2/codec-sv2/src/lib.rs +++ b/protocols/v2/codec-sv2/src/lib.rs @@ -1,4 +1,6 @@ -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] + +pub use framing_sv2::framing::Frame; extern crate alloc; @@ -22,15 +24,15 @@ pub use encoder::Encoder; pub use encoder::NoiseEncoder; #[cfg(feature = "noise_sv2")] -pub use framing_sv2::framing2::HandShakeFrame; -pub use framing_sv2::framing2::{Frame, Sv2Frame}; +pub use framing_sv2::framing::HandShakeFrame; +pub use framing_sv2::framing::Sv2Frame; #[cfg(feature = "noise_sv2")] pub use noise_sv2::{self, Initiator, NoiseCodec, Responder}; pub use buffer_sv2; -pub use framing_sv2::{self, framing2::handshake_message_to_frame as h2f}; +pub use framing_sv2::{self, framing::handshake_message_to_frame as h2f}; #[cfg(feature = "noise_sv2")] #[derive(Debug)] diff --git a/protocols/v2/const-sv2/Cargo.toml b/protocols/v2/const-sv2/Cargo.toml index af3067193..1a74cfa0b 100644 --- a/protocols/v2/const-sv2/Cargo.toml +++ b/protocols/v2/const-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "const_sv2" -version = "1.0.0" +version = "2.0.0" authors = ["fi3 "] edition = "2018" description = "Sv2 constatnts" @@ -14,3 +14,6 @@ secp256k1 = { version = "0.28.2", default-features = false, features =["hashes", #[dev-dependencies] #cbindgen = "0.16.0" + +[features] +no_std = [] diff --git a/protocols/v2/const-sv2/src/lib.rs b/protocols/v2/const-sv2/src/lib.rs index 1a32c1b8f..c9cda8277 100644 --- a/protocols/v2/const-sv2/src/lib.rs +++ b/protocols/v2/const-sv2/src/lib.rs @@ -1,5 +1,5 @@ //! Central repository for all the sv2 constants -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] pub const EXTENSION_TYPE_NO_EXTENSION: u16 = 0; @@ -39,9 +39,8 @@ pub const NOISE_HASHED_PROTOCOL_NAME_CHACHA: [u8; 32] = [ pub const NOISE_SUPPORTED_CIPHERS_MESSAGE: [u8; 5] = [1, 0x47, 0x53, 0x45, 0x41]; pub const SV2_MINING_PROTOCOL_DISCRIMINANT: u8 = 0; -pub const SV2_JOB_NEG_PROTOCOL_DISCRIMINANT: u8 = 1; +pub const SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT: u8 = 1; pub const SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT: u8 = 2; -pub const SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT: u8 = 3; // COMMON MESSAGES TYPES pub const MESSAGE_TYPE_SETUP_CONNECTION: u8 = 0x0; diff --git a/protocols/v2/framing-sv2/Cargo.toml b/protocols/v2/framing-sv2/Cargo.toml index 67465688d..4450a4653 100644 --- a/protocols/v2/framing-sv2/Cargo.toml +++ b/protocols/v2/framing-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "framing_sv2" -version = "1.1.0" +version = "2.0.0" authors = ["fi3 "] edition = "2018" description = "Sv2 frames" @@ -12,10 +12,11 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", default-features = false, optional = true } -const_sv2 = { version = "^1.0.0", path = "../../../protocols/v2/const-sv2"} +const_sv2 = { version = "^2.0.0", path = "../../../protocols/v2/const-sv2"} binary_sv2 = { version = "^1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2" } buffer_sv2 = { version = "^1.0.0", path = "../../../utils/buffer", optional=true } [features] +no_std = [] with_serde = ["binary_sv2/with_serde", "serde", "buffer_sv2/with_serde"] with_buffer_pool = ["binary_sv2/with_buffer_pool", "buffer_sv2"] diff --git a/protocols/v2/framing-sv2/src/error.rs b/protocols/v2/framing-sv2/src/error.rs index 808b47d51..44b0c95ce 100644 --- a/protocols/v2/framing-sv2/src/error.rs +++ b/protocols/v2/framing-sv2/src/error.rs @@ -24,8 +24,13 @@ impl fmt::Display for Error { ExpectedSv2Frame => { write!(f, "Expected `Sv2Frame`, received `HandshakeFrame`") } - UnexpectedHeaderLength(i) => { - write!(f, "Unexpected `Header` length: `{}`", i) + UnexpectedHeaderLength(actual_size) => { + write!( + f, + "Unexpected `Header` length: `{}`, should be equal or more to {}", + actual_size, + const_sv2::SV2_FRAME_HEADER_SIZE + ) } } } diff --git a/protocols/v2/framing-sv2/src/framing2.rs b/protocols/v2/framing-sv2/src/framing.rs similarity index 57% rename from protocols/v2/framing-sv2/src/framing2.rs rename to protocols/v2/framing-sv2/src/framing.rs index 4ba896063..616d53354 100644 --- a/protocols/v2/framing-sv2/src/framing2.rs +++ b/protocols/v2/framing-sv2/src/framing.rs @@ -1,77 +1,41 @@ -use crate::{ - header::{Header, NoiseHeader}, - Error, -}; +use crate::{header::Header, Error}; use alloc::vec::Vec; use binary_sv2::{to_writer, GetSize, Serialize}; use core::convert::TryFrom; -const NOISE_MAX_LEN: usize = const_sv2::NOISE_FRAME_MAX_SIZE; - #[cfg(not(feature = "with_buffer_pool"))] type Slice = Vec; #[cfg(feature = "with_buffer_pool")] type Slice = buffer_sv2::Slice; -impl Sv2Frame { - /// Maps a `Sv2Frame` to `Sv2Frame` by applying `fun`, - /// which is assumed to be a closure that converts `A` to `C` - pub fn map(self, fun: fn(A) -> C) -> Sv2Frame { - let serialized = self.serialized; - let header = self.header; - let payload = self.payload.map(fun); - Sv2Frame { - header, - payload, - serialized, +/// A wrapper to be used in a context we need a generic reference to a frame +/// but it doesn't matter which kind of frame it is (`Sv2Frame` or `HandShakeFrame`) +#[derive(Debug)] +pub enum Frame { + HandShake(HandShakeFrame), + Sv2(Sv2Frame), +} + +impl + AsRef<[u8]>> Frame { + pub fn encoded_length(&self) -> usize { + match &self { + Self::HandShake(frame) => frame.encoded_length(), + Self::Sv2(frame) => frame.encoded_length(), } } } -pub trait Frame<'a, T: Serialize + GetSize>: Sized { - type Buffer: AsMut<[u8]>; - type Deserialized; - - /// Write the serialized `Frame` into `dst`. - fn serialize(self, dst: &mut [u8]) -> Result<(), Error>; - - /// Get the payload - fn payload(&'a mut self) -> &'a mut [u8]; - - /// Returns `Some(self.header)` when the frame has a header (`Sv2Frame`), returns `None` where it doesn't (`HandShakeFrame`). - fn get_header(&self) -> Option; - - /// Try to build a `Frame` from raw bytes. - /// Checks if the payload has the correct size (as stated in the `Header`). - /// Returns `Self` on success, or the number of the bytes needed to complete the frame - /// as an error. Nothing is assumed or checked about the correctness of the payload. - fn from_bytes(bytes: Self::Buffer) -> Result; - - /// Builds a `Frame` from raw bytes. - /// Does not check if the payload has the correct size (as stated in the `Header`). - /// Nothing is assumed or checked about the correctness of the payload. - fn from_bytes_unchecked(bytes: Self::Buffer) -> Self; - - /// Helps to determine if the frame size encoded in a byte array correctly representing the size of the frame. - /// - Returns `0` if the byte slice is of the expected size according to the header. - /// - Returns a negative value if the byte slice is smaller than a Noise Frame header; this value - /// represents how many bytes are missing. - /// - Returns a positive value if the byte slice is longer than expected; this value - /// indicates the surplus of bytes beyond the expected size. - fn size_hint(bytes: &[u8]) -> isize; - - /// Returns the size of the `Frame` payload. - fn encoded_length(&self) -> usize; +impl From for Frame { + fn from(v: HandShakeFrame) -> Self { + Self::HandShake(v) + } +} - /// Try to build a `Frame` from a serializable payload. - /// Returns `Some(Self)` if the size of the payload fits in the frame, `None` otherwise. - fn from_message( - message: T, - message_type: u8, - extension_type: u16, - channel_msg: bool, - ) -> Option; +impl From> for Frame { + fn from(v: Sv2Frame) -> Self { + Self::Sv2(v) + } } /// Abstraction for a SV2 Frame. @@ -83,40 +47,12 @@ pub struct Sv2Frame { serialized: Option, } -impl Default for Sv2Frame { - fn default() -> Self { - Sv2Frame { - header: Header::default(), - payload: None, - serialized: None, - } - } -} - -/// Abstraction for a Noise Handshake Frame -/// Contains only a `Slice` payload with a fixed length -/// Only used during Noise Handshake process -#[derive(Debug)] -pub struct HandShakeFrame { - payload: Slice, -} - -impl HandShakeFrame { - /// Returns payload of `HandShakeFrame` as a `Vec` - pub fn get_payload_when_handshaking(&self) -> Vec { - self.payload[0..].to_vec() - } -} - -impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for Sv2Frame { - type Buffer = B; - type Deserialized = B; - +impl + AsRef<[u8]>> Sv2Frame { /// Write the serialized `Sv2Frame` into `dst`. /// This operation when called on an already serialized frame is very cheap. /// When called on a non serialized frame, it is not so cheap (because it serializes it). #[inline] - fn serialize(self, dst: &mut [u8]) -> Result<(), Error> { + pub fn serialize(self, dst: &mut [u8]) -> Result<(), Error> { if let Some(mut serialized) = self.serialized { dst.swap_with_slice(serialized.as_mut()); Ok(()) @@ -142,7 +78,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for /// This function is only intended as a fast way to get a reference to an /// already serialized payload. If the frame has not yet been /// serialized, this function should never be used (it will panic). - fn payload(&'a mut self) -> &'a mut [u8] { + pub fn payload(&mut self) -> &mut [u8] { if let Some(serialized) = self.serialized.as_mut() { &mut serialized.as_mut()[Header::SIZE..] } else { @@ -152,7 +88,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for } /// `Sv2Frame` always returns `Some(self.header)`. - fn get_header(&self) -> Option { + pub fn get_header(&self) -> Option { Some(self.header) } @@ -160,7 +96,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for /// Returns a `Sv2Frame` on success, or the number of the bytes needed to complete the frame /// as an error. `Self.serialized` is `Some`, but nothing is assumed or checked about the correctness of the payload. #[inline] - fn from_bytes(mut bytes: Self::Buffer) -> Result { + pub fn from_bytes(mut bytes: B) -> Result { let hint = Self::size_hint(bytes.as_mut()); if hint == 0 { @@ -171,7 +107,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for } #[inline] - fn from_bytes_unchecked(mut bytes: Self::Buffer) -> Self { + pub fn from_bytes_unchecked(mut bytes: B) -> Self { // Unchecked function caller is supposed to already know that the passed bytes are valid let header = Header::from_bytes(bytes.as_mut()).expect("Invalid header"); Self { @@ -189,7 +125,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for /// - Returns a positive value if the byte slice is longer than expected; this value /// indicates the surplus of bytes beyond the expected size. #[inline] - fn size_hint(bytes: &[u8]) -> isize { + pub fn size_hint(bytes: &[u8]) -> isize { match Header::from_bytes(bytes) { Err(_) => { // Returns how many bytes are missing from the expected frame size @@ -210,7 +146,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for /// If `Sv2Frame` is serialized, returns the length of `self.serialized`, /// otherwise, returns the length of `self.payload`. #[inline] - fn encoded_length(&self) -> usize { + pub fn encoded_length(&self) -> usize { if let Some(serialized) = self.serialized.as_ref() { serialized.as_ref().len() } else if let Some(payload) = self.payload.as_ref() { @@ -223,7 +159,7 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for /// Tries to build a `Sv2Frame` from a non-serialized payload. /// Returns a `Sv2Frame` if the size of the payload fits in the frame, `None` otherwise. - fn from_message( + pub fn from_message( message: T, message_type: u8, extension_type: u16, @@ -239,84 +175,70 @@ impl<'a, T: Serialize + GetSize, B: AsMut<[u8]> + AsRef<[u8]>> Frame<'a, T> for } } -impl<'a> Frame<'a, Slice> for HandShakeFrame { - type Buffer = Slice; - type Deserialized = &'a mut [u8]; - - /// Put the Noise Frame payload into `dst` - #[inline] - fn serialize(mut self, dst: &mut [u8]) -> Result<(), Error> { - dst.swap_with_slice(self.payload.as_mut()); - Ok(()) +impl Sv2Frame { + /// Maps a `Sv2Frame` to `Sv2Frame` by applying `fun`, + /// which is assumed to be a closure that converts `A` to `C` + pub fn map(self, fun: fn(A) -> C) -> Sv2Frame { + let serialized = self.serialized; + let header = self.header; + let payload = self.payload.map(fun); + Sv2Frame { + header, + payload, + serialized, + } } +} - /// Get the Noise Frame payload - #[inline] - fn payload(&'a mut self) -> &'a mut [u8] { - &mut self.payload[NoiseHeader::HEADER_SIZE..] +impl TryFrom> for Sv2Frame { + type Error = Error; + + fn try_from(v: Frame) -> Result { + match v { + Frame::Sv2(frame) => Ok(frame), + Frame::HandShake(_) => Err(Error::ExpectedSv2Frame), + } } +} + +/// Abstraction for a Noise Handshake Frame +/// Contains only a `Slice` payload with a fixed length +/// Only used during Noise Handshake process +#[derive(Debug)] +pub struct HandShakeFrame { + payload: Slice, +} - /// `HandShakeFrame` always returns `None`. - fn get_header(&self) -> Option { - None +impl HandShakeFrame { + /// Returns payload of `HandShakeFrame` as a `Vec` + pub fn get_payload_when_handshaking(&self) -> Vec { + self.payload[0..].to_vec() } /// Builds a `HandShakeFrame` from raw bytes. Nothing is assumed or checked about the correctness of the payload. - fn from_bytes(bytes: Self::Buffer) -> Result { + pub fn from_bytes(bytes: Slice) -> Result { Ok(Self::from_bytes_unchecked(bytes)) } #[inline] - fn from_bytes_unchecked(bytes: Self::Buffer) -> Self { + pub fn from_bytes_unchecked(bytes: Slice) -> Self { Self { payload: bytes } } - /// After parsing the expected `HandShakeFrame` size from `bytes`, this function helps to determine if this value - /// correctly representing the size of the frame. - /// - Returns `0` if the byte slice is of the expected size according to the header. - /// - Returns a negative value if the byte slice is smaller than a Noise Frame header; this value - /// represents how many bytes are missing. - /// - Returns a positive value if the byte slice is longer than expected; this value - /// indicates the surplus of bytes beyond the expected size. - #[inline] - fn size_hint(bytes: &[u8]) -> isize { - if bytes.len() < NoiseHeader::HEADER_SIZE { - return (NoiseHeader::HEADER_SIZE - bytes.len()) as isize; - }; - - let len_b = &bytes[NoiseHeader::LEN_OFFSET..NoiseHeader::HEADER_SIZE]; - let expected_len = u16::from_le_bytes([len_b[0], len_b[1]]) as usize; - - if bytes.len() - NoiseHeader::HEADER_SIZE == expected_len { - 0 - } else { - expected_len as isize - (bytes.len() - NoiseHeader::HEADER_SIZE) as isize - } - } - /// Returns the size of the `HandShakeFrame` payload. #[inline] fn encoded_length(&self) -> usize { self.payload.len() } +} - /// Tries to build a `HandShakeFrame` frame from a byte slice. - /// Returns a `HandShakeFrame` if the size of the payload fits in the frame, `None` otherwise. - /// This is quite inefficient, and should be used only to build `HandShakeFrames` - // TODO check if is used only to build `HandShakeFrames` - #[allow(clippy::useless_conversion)] - fn from_message( - message: Slice, - _message_type: u8, - _extension_type: u16, - _channel_msg: bool, - ) -> Option { - if message.len() <= NOISE_MAX_LEN { - Some(Self { - payload: message.into(), - }) - } else { - None +impl TryFrom> for HandShakeFrame { + type Error = Error; + + fn try_from(v: Frame) -> Result { + match v { + Frame::HandShake(frame) => Ok(frame), + Frame::Sv2(_) => Err(Error::ExpectedHandshakeFrame), } } } @@ -345,57 +267,6 @@ fn update_extension_type(extension_type: u16, channel_msg: bool) -> u16 { } } -/// A wrapper to be used in a context we need a generic reference to a frame -/// but it doesn't matter which kind of frame it is (`Sv2Frame` or `HandShakeFrame`) -#[derive(Debug)] -pub enum EitherFrame { - HandShake(HandShakeFrame), - Sv2(Sv2Frame), -} - -impl + AsRef<[u8]>> EitherFrame { - pub fn encoded_length(&self) -> usize { - match &self { - Self::HandShake(frame) => frame.encoded_length(), - Self::Sv2(frame) => frame.encoded_length(), - } - } -} - -impl TryFrom> for HandShakeFrame { - type Error = Error; - - fn try_from(v: EitherFrame) -> Result { - match v { - EitherFrame::HandShake(frame) => Ok(frame), - EitherFrame::Sv2(_) => Err(Error::ExpectedHandshakeFrame), - } - } -} - -impl TryFrom> for Sv2Frame { - type Error = Error; - - fn try_from(v: EitherFrame) -> Result { - match v { - EitherFrame::Sv2(frame) => Ok(frame), - EitherFrame::HandShake(_) => Err(Error::ExpectedSv2Frame), - } - } -} - -impl From for EitherFrame { - fn from(v: HandShakeFrame) -> Self { - Self::HandShake(v) - } -} - -impl From> for EitherFrame { - fn from(v: Sv2Frame) -> Self { - Self::Sv2(v) - } -} - #[cfg(test)] use binary_sv2::binary_codec_sv2; diff --git a/protocols/v2/framing-sv2/src/header.rs b/protocols/v2/framing-sv2/src/header.rs index 05272b52a..3b3226156 100644 --- a/protocols/v2/framing-sv2/src/header.rs +++ b/protocols/v2/framing-sv2/src/header.rs @@ -7,68 +7,65 @@ use binary_sv2::{Deserialize, Serialize, U24}; use const_sv2::{AEAD_MAC_LEN, SV2_FRAME_CHUNK_SIZE}; use core::convert::TryInto; +// Previously `NoiseHeader::SIZE` +pub const NOISE_HEADER_ENCRYPTED_SIZE: usize = const_sv2::ENCRYPTED_SV2_FRAME_HEADER_SIZE; +// Previously `NoiseHeader::LEN_OFFSET` +pub const NOISE_HEADER_LEN_OFFSET: usize = const_sv2::NOISE_FRAME_HEADER_LEN_OFFSET; +// Previously `NoiseHeader::HEADER_SIZE` +pub const NOISE_HEADER_SIZE: usize = const_sv2::NOISE_FRAME_HEADER_SIZE; + /// Abstraction for a SV2 Frame Header. #[derive(Debug, Serialize, Deserialize, Copy, Clone)] pub struct Header { - extension_type: u16, // TODO use specific type? - msg_type: u8, // TODO use specific type? + /// Unique identifier of the extension describing this protocol message. Most significant bit + /// (i.e.bit 15, 0-indexed, aka channel_msg) indicates a message which is specific to a channel, + /// whereas if the most significant bit is unset, the message is to be interpreted by the + /// immediate receiving device. Note that the channel_msg bit is ignored in the extension + /// lookup, i.e.an extension_type of 0x8ABC is for the same "extension" as 0x0ABC. If the + /// channel_msg bit is set, the first four bytes of the payload field is a U32 representing the + /// channel_id this message is destined for. Note that for the Job Declaration and Template + /// Distribution Protocols the channel_msg bit is always unset. + extension_type: u16, // fix: use U16 type + /// Unique identifier of the extension describing this protocol message + msg_type: u8, // fix: use specific type? + /// Length of the protocol message, not including this header msg_length: U24, } -impl Default for Header { - fn default() -> Self { - Header { - extension_type: 0, - msg_type: 0, - // converting 0_32 into a U24 never panic - msg_length: 0_u32.try_into().unwrap(), - } - } -} - impl Header { - pub const LEN_OFFSET: usize = const_sv2::SV2_FRAME_HEADER_LEN_OFFSET; - pub const LEN_SIZE: usize = const_sv2::SV2_FRAME_HEADER_LEN_END; - pub const LEN_END: usize = Self::LEN_OFFSET + Self::LEN_SIZE; - pub const SIZE: usize = const_sv2::SV2_FRAME_HEADER_SIZE; - /// Construct a `Header` from ray bytes + /// Construct a `Header` from raw bytes #[inline] pub fn from_bytes(bytes: &[u8]) -> Result { if bytes.len() < Self::SIZE { - return Err(Error::UnexpectedHeaderLength( - (Self::SIZE - bytes.len()) as isize, - )); + return Err(Error::UnexpectedHeaderLength(bytes.len() as isize)); }; - let extension_type = u16::from_le_bytes([bytes[0], bytes[1]]); let msg_type = bytes[2]; - let msg_length = u32::from_le_bytes([bytes[3], bytes[4], bytes[5], 0]); - + let msg_length: U24 = u32::from_le_bytes([bytes[3], bytes[4], bytes[5], 0]).try_into()?; Ok(Self { extension_type, msg_type, - // Converting and u32 with the most significant byte set to 0 to and U24 never panic - msg_length: msg_length.try_into().unwrap(), + msg_length, }) } /// Get the payload length #[allow(clippy::len_without_is_empty)] #[inline] - pub fn len(&self) -> usize { + pub(crate) fn len(&self) -> usize { let inner: u32 = self.msg_length.into(); inner as usize } /// Construct a `Header` from payload length, type and extension type. #[inline] - pub fn from_len(len: u32, message_type: u8, extension_type: u16) -> Option
{ + pub(crate) fn from_len(msg_length: u32, msg_type: u8, extension_type: u16) -> Option
{ Some(Self { extension_type, - msg_type: message_type, - msg_length: len.try_into().ok()?, + msg_type, + msg_length: msg_length.try_into().ok()?, }) } @@ -83,9 +80,11 @@ impl Header { } /// Check if `Header` represents a channel message + /// + /// A header can represent a channel message if the MSB(Most Significant Bit) is set. pub fn channel_msg(&self) -> bool { - let mask = 0b0000_0000_0000_0001; - self.extension_type & mask == self.extension_type + const CHANNEL_MSG_MASK: u16 = 0b0000_0000_0000_0001; + self.extension_type & CHANNEL_MSG_MASK == self.extension_type } /// Calculate the length of the encrypted `Header` @@ -100,10 +99,33 @@ impl Header { } } -pub struct NoiseHeader {} +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + #[test] + fn test_header_from_bytes() { + let bytes = vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06]; + let header = Header::from_bytes(&bytes).unwrap(); + assert_eq!(header.extension_type, 0x0201); + assert_eq!(header.msg_type, 0x03); + assert_eq!(header.msg_length, 0x060504_u32.try_into().unwrap()); + } + + #[test] + fn test_header_from_len() { + let header = Header::from_len(0x1234, 0x56, 0x789a).unwrap(); + assert_eq!(header.extension_type, 0x789a); + assert_eq!(header.msg_type, 0x56); + assert_eq!(header.msg_length, 0x1234_u32.try_into().unwrap()); -impl NoiseHeader { - pub const SIZE: usize = const_sv2::ENCRYPTED_SV2_FRAME_HEADER_SIZE; - pub const LEN_OFFSET: usize = const_sv2::NOISE_FRAME_HEADER_LEN_OFFSET; - pub const HEADER_SIZE: usize = const_sv2::NOISE_FRAME_HEADER_SIZE; + let extension_type = 0; + let msg_type = 0x1; + let msg_length = 0x1234_u32; + let header = Header::from_len(msg_length, msg_type, extension_type).unwrap(); + assert_eq!(header.extension_type, 0); + assert_eq!(header.msg_type, 0x1); + assert_eq!(header.msg_length, 0x1234_u32.try_into().unwrap()); + } } diff --git a/protocols/v2/framing-sv2/src/lib.rs b/protocols/v2/framing-sv2/src/lib.rs index 34fe8708b..cf792e65d 100644 --- a/protocols/v2/framing-sv2/src/lib.rs +++ b/protocols/v2/framing-sv2/src/lib.rs @@ -19,11 +19,11 @@ //! //! The `with_serde` feature flag is only used for the Message Generator, and deprecated for any other kind of usage. It will likely be fully deprecated in the future. -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] extern crate alloc; /// SV2 framing types -pub mod framing2; +pub mod framing; /// SV2 framing errors pub mod error; diff --git a/protocols/v2/noise-sv2/Cargo.toml b/protocols/v2/noise-sv2/Cargo.toml index 7316fddb8..c627bc1af 100644 --- a/protocols/v2/noise-sv2/Cargo.toml +++ b/protocols/v2/noise-sv2/Cargo.toml @@ -13,7 +13,7 @@ rand = {version = "0.8.5", default-features = false, features = ["std","std_rng" aes-gcm = "0.10.2" chacha20poly1305 = "0.10.1" rand_chacha = "0.3.1" -const_sv2 = { version = "^1.0.0", path = "../../../protocols/v2/const-sv2"} +const_sv2 = { version = "^2.0.0", path = "../../../protocols/v2/const-sv2"} [dev-dependencies] quickcheck = "1.0.3" diff --git a/protocols/v2/roles-logic-sv2/Cargo.toml b/protocols/v2/roles-logic-sv2/Cargo.toml index 4254d6a1f..2b7c34610 100644 --- a/protocols/v2/roles-logic-sv2/Cargo.toml +++ b/protocols/v2/roles-logic-sv2/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "roles_logic_sv2" -version = "1.1.0" +version = "1.2.1" edition = "2018" description = "Common handlers for use within SV2 roles" license = "MIT OR Apache-2.0" @@ -12,12 +12,12 @@ repository = "https://github.com/stratum-mining/stratum" stratum-common = { version="1.0.0", path = "../../../common", features=["bitcoin"]} serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false, optional = true} binary_sv2 = {version = "^1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2", default-features = true } -common_messages_sv2 = { path = "../../../protocols/v2/subprotocols/common-messages", version = "^1.0.0" } +common_messages_sv2 = { path = "../../../protocols/v2/subprotocols/common-messages", version = "^2.0.0" } mining_sv2 = { path = "../../../protocols/v2/subprotocols/mining", version = "^1.0.0" } -template_distribution_sv2 = { path = "../../../protocols/v2/subprotocols/template-distribution", version = "^1.0.0" } +template_distribution_sv2 = { path = "../../../protocols/v2/subprotocols/template-distribution", version = "^1.0.1" } job_declaration_sv2 = { path = "../../../protocols/v2/subprotocols/job-declaration", version = "^1.0.0" } -const_sv2 = { version = "^1.0.0", path = "../../../protocols/v2/const-sv2"} -framing_sv2 = { version = "^1.1.0", path = "../../../protocols/v2/framing-sv2" } +const_sv2 = { version = "^2.0.0", path = "../../../protocols/v2/const-sv2"} +framing_sv2 = { version = "^2.0.0", path = "../../../protocols/v2/framing-sv2" } tracing = { version = "0.1"} chacha20poly1305 = { version = "0.10.1"} nohash-hasher = "0.2.0" diff --git a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs index 8bf351a04..7e3f66cec 100644 --- a/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs +++ b/protocols/v2/roles-logic-sv2/src/channel_logic/channel_factory.rs @@ -826,14 +826,17 @@ impl ChannelFactory { if tracing::level_enabled!(tracing::Level::DEBUG) || tracing::level_enabled!(tracing::Level::TRACE) { - debug!("Bitcoin target: {:?}", bitcoin_target); + let bitcoin_target_log: binary_sv2::U256 = bitcoin_target.clone().into(); + let mut bitcoin_target_log = bitcoin_target_log.to_vec(); + bitcoin_target_log.reverse(); + debug!("Bitcoin target : {:?}", bitcoin_target_log.to_hex()); let upstream_target: binary_sv2::U256 = upstream_target.clone().into(); let mut upstream_target = upstream_target.to_vec(); upstream_target.reverse(); debug!("Upstream target: {:?}", upstream_target.to_vec().to_hex()); let mut hash = hash; hash.reverse(); - debug!("Hash: {:?}", hash.to_vec().to_hex()); + debug!("Hash : {:?}", hash.to_vec().to_hex()); } let hash: Target = hash.into(); diff --git a/protocols/v2/roles-logic-sv2/src/parsers.rs b/protocols/v2/roles-logic-sv2/src/parsers.rs index b0f5f44a0..0274ce785 100644 --- a/protocols/v2/roles-logic-sv2/src/parsers.rs +++ b/protocols/v2/roles-logic-sv2/src/parsers.rs @@ -13,7 +13,7 @@ use binary_sv2::GetSize; use binary_sv2::{from_bytes, Deserialize}; -use framing_sv2::framing2::{Frame, Sv2Frame}; +use framing_sv2::framing::Sv2Frame; use const_sv2::{ CHANNEL_BIT_ALLOCATE_MINING_JOB_TOKEN, CHANNEL_BIT_ALLOCATE_MINING_JOB_TOKEN_SUCCESS, @@ -1106,11 +1106,18 @@ impl<'a> TryFrom<(u8, &'a mut [u8])> for PoolMessages<'a> { let is_common: Result = v.0.try_into(); let is_mining: Result = v.0.try_into(); let is_job_declaration: Result = v.0.try_into(); - match (is_common, is_mining, is_job_declaration) { - (Ok(_), Err(_), Err(_)) => Ok(Self::Common(v.try_into()?)), - (Err(_), Ok(_), Err(_)) => Ok(Self::Mining(v.try_into()?)), - (Err(_), Err(_), Ok(_)) => Ok(Self::JobDeclaration(v.try_into()?)), - (Err(e), Err(_), Err(_)) => Err(e), + let is_template_distribution: Result = v.0.try_into(); + match ( + is_common, + is_mining, + is_job_declaration, + is_template_distribution, + ) { + (Ok(_), Err(_), Err(_), Err(_)) => Ok(Self::Common(v.try_into()?)), + (Err(_), Ok(_), Err(_), Err(_)) => Ok(Self::Mining(v.try_into()?)), + (Err(_), Err(_), Ok(_), Err(_)) => Ok(Self::JobDeclaration(v.try_into()?)), + (Err(_), Err(_), Err(_), Ok(_)) => Ok(Self::TemplateDistribution(v.try_into()?)), + (Err(e), Err(_), Err(_), Err(_)) => Err(e), // This is an impossible state is safe to panic here _ => panic!(), } diff --git a/protocols/v2/subprotocols/common-messages/Cargo.toml b/protocols/v2/subprotocols/common-messages/Cargo.toml index 2f803c25e..783c208df 100644 --- a/protocols/v2/subprotocols/common-messages/Cargo.toml +++ b/protocols/v2/subprotocols/common-messages/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "common_messages_sv2" -version = "1.0.0" +version = "2.0.0" authors = ["fi3 "] edition = "2018" description = "Sv2 subprotocol common messages" @@ -12,11 +12,12 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", default-features = false, optional= true } binary_sv2 = {version = "^1.0.0", path = "../../../../protocols/v2/binary-sv2/binary-sv2" } -const_sv2 = {version = "^1.0.0", path = "../../../../protocols/v2/const-sv2"} +const_sv2 = {version = "^2.0.0", path = "../../../../protocols/v2/const-sv2"} quickcheck = { version = "1.0.3", optional=true } quickcheck_macros = { version = "1", optional=true } serde_repr = {version= "0.1.10", optional=true} [features] +no_std = [] with_serde = ["binary_sv2/with_serde", "serde", "serde_repr"] prop_test = ["quickcheck"] diff --git a/protocols/v2/subprotocols/common-messages/src/lib.rs b/protocols/v2/subprotocols/common-messages/src/lib.rs index 33b2baf2b..cc0df87a2 100644 --- a/protocols/v2/subprotocols/common-messages/src/lib.rs +++ b/protocols/v2/subprotocols/common-messages/src/lib.rs @@ -1,4 +1,4 @@ -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] //! Common messages for [stratum v2][Sv2] //! The following protocol messages are common across all of the sv2 (sub)protocols. @@ -33,7 +33,7 @@ pub extern "C" fn _c_export_setup_conn_succ(_a: SetupConnectionSuccess) {} impl ChannelEndpointChanged { pub fn from_gen(g: &mut Gen) -> Self { ChannelEndpointChanged { - channel_id: u32::arbitrary(g).try_into().unwrap(), + channel_id: u32::arbitrary(g), } } } @@ -70,11 +70,11 @@ impl SetupConnection<'static> { SetupConnection { protocol, - min_version: u16::arbitrary(g).try_into().unwrap(), - max_version: u16::arbitrary(g).try_into().unwrap(), - flags: u32::arbitrary(g).try_into().unwrap(), + min_version: u16::arbitrary(g), + max_version: u16::arbitrary(g), + flags: u32::arbitrary(g), endpoint_host, - endpoint_port: u16::arbitrary(g).try_into().unwrap(), + endpoint_port: u16::arbitrary(g), vendor, hardware_version, firmware, @@ -92,7 +92,7 @@ impl SetupConnectionError<'static> { let error_code: binary_sv2::Str0255 = error_code.try_into().unwrap(); SetupConnectionError { - flags: u32::arbitrary(g).try_into().unwrap(), + flags: u32::arbitrary(g), error_code, } } @@ -102,8 +102,8 @@ impl SetupConnectionError<'static> { impl SetupConnectionSuccess { pub fn from_gen(g: &mut Gen) -> Self { SetupConnectionSuccess { - used_version: u16::arbitrary(g).try_into().unwrap(), - flags: u32::arbitrary(g).try_into().unwrap(), + used_version: u16::arbitrary(g), + flags: u32::arbitrary(g), } } } diff --git a/protocols/v2/subprotocols/common-messages/src/setup_connection.rs b/protocols/v2/subprotocols/common-messages/src/setup_connection.rs index f0f730aae..f214802ea 100644 --- a/protocols/v2/subprotocols/common-messages/src/setup_connection.rs +++ b/protocols/v2/subprotocols/common-messages/src/setup_connection.rs @@ -7,8 +7,8 @@ use binary_sv2::{ }; use binary_sv2::{Deserialize, GetSize, Serialize, Str0255}; use const_sv2::{ - SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT, SV2_JOB_NEG_PROTOCOL_DISCRIMINANT, - SV2_MINING_PROTOCOL_DISCRIMINANT, SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT, + SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT, SV2_MINING_PROTOCOL_DISCRIMINANT, + SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT, }; use core::convert::TryFrom; #[cfg(not(feature = "with_serde"))] @@ -54,11 +54,11 @@ pub struct SetupConnection<'decoder> { impl<'decoder> SetupConnection<'decoder> { pub fn set_requires_standard_job(&mut self) { - self.flags |= 0b_0000_0000_0000_0000_0000_0000_0000_0001 + self.flags |= 0b_0000_0000_0000_0000_0000_0000_0000_0001; } pub fn set_async_job_nogotiation(&mut self) { - self.flags |= 0b_0000_0000_0000_0000_0000_0000_0000_0001 + self.flags |= 0b_0000_0000_0000_0000_0000_0000_0000_0001; } /// Check if passed flags support self flag @@ -69,13 +69,24 @@ impl<'decoder> SetupConnection<'decoder> { // [1] [1] -> true // [0] [1] -> false Protocol::MiningProtocol => { + // Evaluates protocol requirements based on flag bits. + // + // Checks if the current protocol meets the required flags for work selection and version rolling + // by reversing the bits of `available_flags` and `required_flags`. It extracts the 30th and 29th + // bits to determine if work selection and version rolling are needed. + // + // Returns `true` if: + // - The work selection requirement is satisfied or not needed. + // - The version rolling requirement is satisfied or not needed. + // + // Otherwise, returns `false`. let available = available_flags.reverse_bits(); let required_flags = required_flags.reverse_bits(); - let requires_work_selection_passed = (required_flags >> 30) > 0; - let requires_version_rolling_passed = (required_flags >> 29) > 0; + let requires_work_selection_passed = required_flags >> 30 > 0; + let requires_version_rolling_passed = required_flags >> 29 > 0; - let requires_work_selection_self = (available >> 30) > 0; - let requires_version_rolling_self = (available >> 29) > 0; + let requires_work_selection_self = available >> 30 > 0; + let requires_version_rolling_self = available >> 29 > 0; let work_selection = !requires_work_selection_self || requires_work_selection_passed; @@ -84,8 +95,34 @@ impl<'decoder> SetupConnection<'decoder> { work_selection && version_rolling } - // TODO - _ => todo!(), + Protocol::JobDeclarationProtocol => { + // Determines if asynchronous job mining is required based on flag bits. + // + // Reverses the bits of `available_flags` and `required_flags`, extracts the 31st bit from each, + // and evaluates if the condition is met using these bits. Returns `true` or `false` based on: + // - True if `requires_async_job_mining_self` is true, or both are true. + // - False if `requires_async_job_mining_self` is false and `requires_async_job_mining_passed` is true. + // - True otherwise. + let available = available_flags.reverse_bits(); + let required = required_flags.reverse_bits(); + + let requires_async_job_mining_passed = (required >> 31) & 1 > 0; + let requires_async_job_mining_self = (available >> 31) & 1 > 0; + + match ( + requires_async_job_mining_self, + requires_async_job_mining_passed, + ) { + (true, true) => true, + (true, false) => true, + (false, true) => false, + (false, false) => true, + } + } + Protocol::TemplateDistributionProtocol => { + // These protocols do not define flags for setting up a connection. + false + } } } @@ -283,18 +320,16 @@ impl<'a> From> for CSetupConnectionError { } /// MiningProtocol = [`SV2_MINING_PROTOCOL_DISCRIMINANT`], -/// JobDeclarationProtocol = [`SV2_JOB_NEG_PROTOCOL_DISCRIMINANT`], +/// JobDeclarationProtocol = [`SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT`], /// TemplateDistributionProtocol = [`SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT`], -/// JobDistributionProtocol = [`SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT`], #[cfg_attr(feature = "with_serde", derive(Serialize_repr, Deserialize_repr))] #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] #[allow(clippy::enum_variant_names)] pub enum Protocol { MiningProtocol = SV2_MINING_PROTOCOL_DISCRIMINANT, - JobDeclarationProtocol = SV2_JOB_NEG_PROTOCOL_DISCRIMINANT, + JobDeclarationProtocol = SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT, TemplateDistributionProtocol = SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT, - JobDistributionProtocol = SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT, } #[cfg(not(feature = "with_serde"))] @@ -310,7 +345,7 @@ impl<'decoder> binary_sv2::Decodable<'decoder> for Protocol { fn get_structure( _: &[u8], ) -> core::result::Result, binary_sv2::Error> { - let field: FieldMarker = 0_u8.into(); + let field: FieldMarker = (0_u8).into(); Ok(alloc::vec![field]) } fn from_decoded_fields( @@ -329,9 +364,8 @@ impl TryFrom for Protocol { fn try_from(value: u8) -> Result { match value { SV2_MINING_PROTOCOL_DISCRIMINANT => Ok(Protocol::MiningProtocol), - SV2_JOB_NEG_PROTOCOL_DISCRIMINANT => Ok(Protocol::JobDeclarationProtocol), + SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT => Ok(Protocol::JobDeclarationProtocol), SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT => Ok(Protocol::TemplateDistributionProtocol), - SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT => Ok(Protocol::JobDistributionProtocol), _ => Err(()), } } @@ -348,9 +382,8 @@ impl From for u8 { fn from(val: Protocol) -> Self { match val { Protocol::MiningProtocol => SV2_MINING_PROTOCOL_DISCRIMINANT, - Protocol::JobDeclarationProtocol => SV2_JOB_NEG_PROTOCOL_DISCRIMINANT, + Protocol::JobDeclarationProtocol => SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT, Protocol::TemplateDistributionProtocol => SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT, - Protocol::JobDistributionProtocol => SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT, } } } @@ -391,13 +424,23 @@ mod test { #[test] fn test_check_flag() { let protocol = crate::Protocol::MiningProtocol; - let flag_avaiable = 0b_0000_0000_0000_0000_0000_0000_0000_0000; + let flag_available = 0b_0000_0000_0000_0000_0000_0000_0000_0000; let flag_required = 0b_0000_0000_0000_0000_0000_0000_0000_0001; assert!(SetupConnection::check_flags( protocol, - flag_avaiable, + flag_available, flag_required )); + + let protocol = crate::Protocol::JobDeclarationProtocol; + + let available_flags = 0b_1000_0000_0000_0000_0000_0000_0000_0000; + let required_flags = 0b_1000_0000_0000_0000_0000_0000_0000_0000; + assert!(SetupConnection::check_flags( + protocol, + available_flags, + required_flags + )); } #[test] diff --git a/protocols/v2/subprotocols/job-declaration/Cargo.toml b/protocols/v2/subprotocols/job-declaration/Cargo.toml index 9db52a792..2ee4313f4 100644 --- a/protocols/v2/subprotocols/job-declaration/Cargo.toml +++ b/protocols/v2/subprotocols/job-declaration/Cargo.toml @@ -11,7 +11,8 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", default-features = false, optional= true } binary_sv2 = {version = "^1.0.0", path = "../../../../protocols/v2/binary-sv2/binary-sv2" } -const_sv2 = {version = "^1.0.0", path = "../../../../protocols/v2/const-sv2"} +const_sv2 = {version = "^2.0.0", path = "../../../../protocols/v2/const-sv2"} [features] +no_std = [] with_serde = ["binary_sv2/with_serde", "serde"] diff --git a/protocols/v2/subprotocols/job-declaration/src/lib.rs b/protocols/v2/subprotocols/job-declaration/src/lib.rs index 2f4bc42cc..fa03ca00a 100644 --- a/protocols/v2/subprotocols/job-declaration/src/lib.rs +++ b/protocols/v2/subprotocols/job-declaration/src/lib.rs @@ -1,4 +1,4 @@ -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] //! # Job Declaration Protocol //! diff --git a/protocols/v2/subprotocols/job-declaration/src/submit_solution.rs b/protocols/v2/subprotocols/job-declaration/src/submit_solution.rs index 4b7c30e4c..9687313ce 100644 --- a/protocols/v2/subprotocols/job-declaration/src/submit_solution.rs +++ b/protocols/v2/subprotocols/job-declaration/src/submit_solution.rs @@ -30,5 +30,6 @@ impl<'d> GetSize for SubmitSolutionJd<'d> { + self.ntime.get_size() + self.nonce.get_size() + self.nbits.get_size() + + self.version.get_size() } } diff --git a/protocols/v2/subprotocols/mining/Cargo.toml b/protocols/v2/subprotocols/mining/Cargo.toml index 493b77d63..c98bb7ec9 100644 --- a/protocols/v2/subprotocols/mining/Cargo.toml +++ b/protocols/v2/subprotocols/mining/Cargo.toml @@ -13,11 +13,12 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", default-features = false, optional= true } binary_sv2 = {version = "^1.0.0", path = "../../../../protocols/v2/binary-sv2/binary-sv2" } -const_sv2 = {version = "^1.0.0", path = "../../../../protocols/v2/const-sv2"} +const_sv2 = {version = "^2.0.0", path = "../../../../protocols/v2/const-sv2"} [dev-dependencies] quickcheck = "1.0.3" quickcheck_macros = "1" [features] +no_std = [] with_serde = ["binary_sv2/with_serde", "serde"] diff --git a/protocols/v2/subprotocols/mining/src/close_channel.rs b/protocols/v2/subprotocols/mining/src/close_channel.rs index 4957f8266..48de6d608 100644 --- a/protocols/v2/subprotocols/mining/src/close_channel.rs +++ b/protocols/v2/subprotocols/mining/src/close_channel.rs @@ -39,9 +39,9 @@ impl<'d> GetSize for CloseChannel<'d> { #[cfg(feature = "with_serde")] impl<'a> CloseChannel<'a> { pub fn into_static(self) -> CloseChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> CloseChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/lib.rs b/protocols/v2/subprotocols/mining/src/lib.rs index bfd0e4114..e82809a79 100644 --- a/protocols/v2/subprotocols/mining/src/lib.rs +++ b/protocols/v2/subprotocols/mining/src/lib.rs @@ -1,4 +1,4 @@ -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] //! # Mining Protocol //! ## Channels diff --git a/protocols/v2/subprotocols/mining/src/new_mining_job.rs b/protocols/v2/subprotocols/mining/src/new_mining_job.rs index 5b720f5e1..1e876adab 100644 --- a/protocols/v2/subprotocols/mining/src/new_mining_job.rs +++ b/protocols/v2/subprotocols/mining/src/new_mining_job.rs @@ -231,18 +231,18 @@ mod tests { #[cfg(feature = "with_serde")] impl<'a> NewExtendedMiningJob<'a> { pub fn into_static(self) -> NewExtendedMiningJob<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> NewExtendedMiningJob<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> NewMiningJob<'a> { pub fn into_static(self) -> NewMiningJob<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> NewMiningJob<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/open_channel.rs b/protocols/v2/subprotocols/mining/src/open_channel.rs index 4c86f9183..d64b1d0a7 100644 --- a/protocols/v2/subprotocols/mining/src/open_channel.rs +++ b/protocols/v2/subprotocols/mining/src/open_channel.rs @@ -375,45 +375,45 @@ mod tests { #[cfg(feature = "with_serde")] impl<'a> OpenExtendedMiningChannel<'a> { pub fn into_static(self) -> OpenExtendedMiningChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> OpenExtendedMiningChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> OpenExtendedMiningChannelSuccess<'a> { pub fn into_static(self) -> OpenExtendedMiningChannelSuccess<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> OpenExtendedMiningChannelSuccess<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> OpenMiningChannelError<'a> { pub fn into_static(self) -> OpenMiningChannelError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> OpenMiningChannelError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> OpenStandardMiningChannel<'a> { pub fn into_static(self) -> OpenStandardMiningChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> OpenStandardMiningChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> OpenStandardMiningChannelSuccess<'a> { pub fn into_static(self) -> OpenStandardMiningChannelSuccess<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> OpenStandardMiningChannelSuccess<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/reconnect.rs b/protocols/v2/subprotocols/mining/src/reconnect.rs index cb71c9da2..18201ab92 100644 --- a/protocols/v2/subprotocols/mining/src/reconnect.rs +++ b/protocols/v2/subprotocols/mining/src/reconnect.rs @@ -40,9 +40,9 @@ impl<'d> GetSize for Reconnect<'d> { #[cfg(feature = "with_serde")] impl<'a> Reconnect<'a> { pub fn into_static(self) -> Reconnect<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> Reconnect<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/set_custom_mining_job.rs b/protocols/v2/subprotocols/mining/src/set_custom_mining_job.rs index fbc12b4ba..1d937d9ab 100644 --- a/protocols/v2/subprotocols/mining/src/set_custom_mining_job.rs +++ b/protocols/v2/subprotocols/mining/src/set_custom_mining_job.rs @@ -129,27 +129,27 @@ impl<'d> GetSize for SetCustomMiningJobError<'d> { #[cfg(feature = "with_serde")] impl<'a> SetCustomMiningJob<'a> { pub fn into_static(self) -> SetCustomMiningJob<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetCustomMiningJob<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> SetCustomMiningJobError<'a> { pub fn into_static(self) -> SetCustomMiningJobError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetCustomMiningJobError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl SetCustomMiningJobSuccess { pub fn into_static(self) -> SetCustomMiningJobSuccess { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetCustomMiningJobSuccess { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/set_extranonce_prefix.rs b/protocols/v2/subprotocols/mining/src/set_extranonce_prefix.rs index b8fef3259..140fdf1d3 100644 --- a/protocols/v2/subprotocols/mining/src/set_extranonce_prefix.rs +++ b/protocols/v2/subprotocols/mining/src/set_extranonce_prefix.rs @@ -31,9 +31,9 @@ impl<'d> GetSize for SetExtranoncePrefix<'d> { #[cfg(feature = "with_serde")] impl<'a> SetExtranoncePrefix<'a> { pub fn into_static(self) -> SetExtranoncePrefix<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetExtranoncePrefix<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/set_group_channel.rs b/protocols/v2/subprotocols/mining/src/set_group_channel.rs index 65338b17b..f00d8d5a3 100644 --- a/protocols/v2/subprotocols/mining/src/set_group_channel.rs +++ b/protocols/v2/subprotocols/mining/src/set_group_channel.rs @@ -42,9 +42,9 @@ impl<'d> GetSize for SetGroupChannel<'d> { #[cfg(feature = "with_serde")] impl<'a> SetGroupChannel<'a> { pub fn into_static(self) -> SetGroupChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetGroupChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/set_new_prev_hash.rs b/protocols/v2/subprotocols/mining/src/set_new_prev_hash.rs index 94efdcb4c..7b76b1ef2 100644 --- a/protocols/v2/subprotocols/mining/src/set_new_prev_hash.rs +++ b/protocols/v2/subprotocols/mining/src/set_new_prev_hash.rs @@ -47,9 +47,9 @@ impl<'d> GetSize for SetNewPrevHash<'d> { #[cfg(feature = "with_serde")] impl<'a> SetNewPrevHash<'a> { pub fn into_static(self) -> SetNewPrevHash<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetNewPrevHash<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/set_target.rs b/protocols/v2/subprotocols/mining/src/set_target.rs index a4804ba3b..fba42a02f 100644 --- a/protocols/v2/subprotocols/mining/src/set_target.rs +++ b/protocols/v2/subprotocols/mining/src/set_target.rs @@ -37,9 +37,9 @@ impl<'d> GetSize for SetTarget<'d> { #[cfg(feature = "with_serde")] impl<'a> SetTarget<'a> { pub fn into_static(self) -> SetTarget<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SetTarget<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/submit_shares.rs b/protocols/v2/subprotocols/mining/src/submit_shares.rs index 1a6f489d6..5c8bea64f 100644 --- a/protocols/v2/subprotocols/mining/src/submit_shares.rs +++ b/protocols/v2/subprotocols/mining/src/submit_shares.rs @@ -157,18 +157,18 @@ impl<'d> GetSize for SubmitSharesError<'d> { #[cfg(feature = "with_serde")] impl<'a> SubmitSharesError<'a> { pub fn into_static(self) -> SubmitSharesError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SubmitSharesError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> SubmitSharesExtended<'a> { pub fn into_static(self) -> SubmitSharesExtended<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> SubmitSharesExtended<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/mining/src/update_channel.rs b/protocols/v2/subprotocols/mining/src/update_channel.rs index e165662c5..a8843213c 100644 --- a/protocols/v2/subprotocols/mining/src/update_channel.rs +++ b/protocols/v2/subprotocols/mining/src/update_channel.rs @@ -61,18 +61,18 @@ impl<'d> GetSize for UpdateChannelError<'d> { #[cfg(feature = "with_serde")] impl<'a> UpdateChannel<'a> { pub fn into_static(self) -> UpdateChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> UpdateChannel<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } #[cfg(feature = "with_serde")] impl<'a> UpdateChannelError<'a> { pub fn into_static(self) -> UpdateChannelError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> UpdateChannelError<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/subprotocols/template-distribution/Cargo.toml b/protocols/v2/subprotocols/template-distribution/Cargo.toml index 38cd6f56f..b0120836d 100644 --- a/protocols/v2/subprotocols/template-distribution/Cargo.toml +++ b/protocols/v2/subprotocols/template-distribution/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "template_distribution_sv2" -version = "1.0.0" +version = "1.0.2" authors = ["fi3 "] edition = "2018" description = "Sv2 template distribution subprotocol" @@ -11,11 +11,12 @@ repository = "https://github.com/stratum-mining/stratum" [dependencies] serde = { version = "1.0.89", default-features = false, optional= true } -binary_sv2 = { version = "^1.0.0", path = "../../../../protocols/v2/binary-sv2/binary-sv2" } -const_sv2 = { version = "^1.0.0", path = "../../../../protocols/v2/const-sv2"} +binary_sv2 = { version = "^1.0.1", path = "../../../../protocols/v2/binary-sv2/binary-sv2" } +const_sv2 = { version = "^2.0.0", path = "../../../../protocols/v2/const-sv2"} quickcheck = { version = "1.0.3", optional=true } quickcheck_macros = { version = "1", optional=true } [features] +no_std = [] with_serde = ["binary_sv2/with_serde", "serde"] prop_test = ["quickcheck"] diff --git a/protocols/v2/subprotocols/template-distribution/src/lib.rs b/protocols/v2/subprotocols/template-distribution/src/lib.rs index 2f1549f2d..11f33afe3 100644 --- a/protocols/v2/subprotocols/template-distribution/src/lib.rs +++ b/protocols/v2/subprotocols/template-distribution/src/lib.rs @@ -1,4 +1,4 @@ -#![no_std] +#![cfg_attr(feature = "no_std", no_std)] //! # Template Distribution Protocol //! The Template Distribution protocol is used to receive updates of the block template to use in diff --git a/protocols/v2/subprotocols/template-distribution/src/new_template.rs b/protocols/v2/subprotocols/template-distribution/src/new_template.rs index c2ee52d7a..6c1f3b435 100644 --- a/protocols/v2/subprotocols/template-distribution/src/new_template.rs +++ b/protocols/v2/subprotocols/template-distribution/src/new_template.rs @@ -7,6 +7,8 @@ use binary_sv2::Error; use binary_sv2::{Deserialize, Seq0255, Serialize, B0255, B064K, U256}; #[cfg(not(feature = "with_serde"))] use core::convert::TryInto; +#[cfg(all(feature = "with_serde", not(feature = "no_std")))] +use std::convert::TryInto; /// ## NewTemplate (Server -> Client) /// The primary template-providing function. Note that the coinbase_tx_outputs bytes will appear @@ -156,10 +158,10 @@ impl<'d> GetSize for NewTemplate<'d> { #[cfg(feature = "with_serde")] impl<'a> NewTemplate<'a> { pub fn into_static(self) -> NewTemplate<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } pub fn as_static(&self) -> NewTemplate<'static> { - panic!("This function shouldn't be called by the Messaege Generator"); + panic!("This function shouldn't be called by the Message Generator"); } } diff --git a/protocols/v2/sv2-ffi/Cargo.toml b/protocols/v2/sv2-ffi/Cargo.toml index 7930f5805..6498b1b5b 100644 --- a/protocols/v2/sv2-ffi/Cargo.toml +++ b/protocols/v2/sv2-ffi/Cargo.toml @@ -12,10 +12,10 @@ crate-type = ["staticlib"] [dependencies] codec_sv2 = { path = "../../../protocols/v2/codec-sv2", version = "^1.0.0" } -const_sv2 = { path = "../../../protocols/v2/const-sv2", version = "^1.0.0" } +const_sv2 = { path = "../../../protocols/v2/const-sv2", version = "^2.0.0" } binary_sv2 = { path = "../../../protocols/v2/binary-sv2/binary-sv2", version = "^1.0.0" } -common_messages_sv2 = { path = "../../../protocols/v2/subprotocols/common-messages", version = "^1.0.0" } -template_distribution_sv2 = { path = "../../../protocols/v2/subprotocols/template-distribution", version = "^1.0.0" } +common_messages_sv2 = { path = "../../../protocols/v2/subprotocols/common-messages", version = "^2.0.0" } +template_distribution_sv2 = { path = "../../../protocols/v2/subprotocols/template-distribution", version = "^1.0.1" } [dev-dependencies] quickcheck = "1.0.3" diff --git a/protocols/v2/sv2-ffi/src/lib.rs b/protocols/v2/sv2-ffi/src/lib.rs index 346d497b9..9befa0ca7 100644 --- a/protocols/v2/sv2-ffi/src/lib.rs +++ b/protocols/v2/sv2-ffi/src/lib.rs @@ -4,7 +4,7 @@ use std::{ fmt::{Display, Formatter}, }; -use codec_sv2::{Encoder, Frame, StandardDecoder, StandardSv2Frame}; +use codec_sv2::{Encoder, StandardDecoder, StandardSv2Frame}; use common_messages_sv2::{ CSetupConnection, CSetupConnectionError, ChannelEndpointChanged, SetupConnection, SetupConnectionError, SetupConnectionSuccess, diff --git a/protocols/v2/sv2-ffi/sv2.h b/protocols/v2/sv2-ffi/sv2.h index 8aa7fdae3..8c941fff9 100644 --- a/protocols/v2/sv2-ffi/sv2.h +++ b/protocols/v2/sv2-ffi/sv2.h @@ -38,12 +38,10 @@ static const uintptr_t INITIATOR_EXPECTED_HANDSHAKE_MESSAGE_SIZE = ((ELLSWIFT_EN static const uint8_t SV2_MINING_PROTOCOL_DISCRIMINANT = 0; -static const uint8_t SV2_JOB_NEG_PROTOCOL_DISCRIMINANT = 1; +static const uint8_t SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT = 1; static const uint8_t SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT = 2; -static const uint8_t SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT = 3; - static const uint8_t MESSAGE_TYPE_SETUP_CONNECTION = 0; static const uint8_t MESSAGE_TYPE_SETUP_CONNECTION_SUCCESS = 1; @@ -271,14 +269,12 @@ void _c_export_cvec2(CVec2 _a); #include /// MiningProtocol = [`SV2_MINING_PROTOCOL_DISCRIMINANT`], -/// JobDeclarationProtocol = [`SV2_JOB_NEG_PROTOCOL_DISCRIMINANT`], +/// JobDeclarationProtocol = [`SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT`], /// TemplateDistributionProtocol = [`SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT`], -/// JobDistributionProtocol = [`SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT`], enum class Protocol : uint8_t { MiningProtocol = SV2_MINING_PROTOCOL_DISCRIMINANT, - JobDeclarationProtocol = SV2_JOB_NEG_PROTOCOL_DISCRIMINANT, + JobDeclarationProtocol = SV2_JOB_DECLARATION_PROTOCOL_DISCRIMINANT, TemplateDistributionProtocol = SV2_TEMPLATE_DISTR_PROTOCOL_DISCRIMINANT, - JobDistributionProtocol = SV2_JOB_DISTR_PROTOCOL_DISCRIMINANT, }; /// ## ChannelEndpointChanged (Server -> Client) diff --git a/roles/Cargo.lock b/roles/Cargo.lock index 7b588075e..d46de28ea 100644 --- a/roles/Cargo.lock +++ b/roles/Cargo.lock @@ -52,12 +52,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" - [[package]] name = "ahash" version = "0.7.8" @@ -316,6 +310,17 @@ version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" +[[package]] +name = "async-trait" +version = "0.1.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.61", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -368,14 +373,14 @@ checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" [[package]] name = "binary_codec_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "buffer_sv2", ] [[package]] name = "binary_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "binary_codec_sv2", "derive_codec_sv2", @@ -437,6 +442,9 @@ name = "bitflags" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +dependencies = [ + "serde", +] [[package]] name = "block-buffer" @@ -481,7 +489,7 @@ dependencies = [ [[package]] name = "buffer_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "aes-gcm", "serde", @@ -594,7 +602,7 @@ checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "codec_sv2" -version = "1.1.0" +version = "1.2.1" dependencies = [ "binary_sv2", "buffer_sv2", @@ -613,7 +621,7 @@ checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "common_messages_sv2" -version = "1.0.0" +version = "2.0.0" dependencies = [ "binary_sv2", "const_sv2", @@ -628,13 +636,62 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" +dependencies = [ + "async-trait", + "convert_case", + "json5", + "lazy_static", + "nom", + "pathdiff", + "ron", + "rust-ini", + "serde", + "serde_json", + "toml", + "yaml-rust", +] + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "const_sv2" -version = "1.0.0" +version = "2.0.0" dependencies = [ "secp256k1 0.28.2", ] +[[package]] +name = "convert_case" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "cpufeatures" version = "0.2.12" @@ -650,6 +707,12 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-common" version = "0.1.6" @@ -696,6 +759,15 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "dlv-list" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] + [[package]] name = "env_logger" version = "0.7.1" @@ -800,7 +872,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "framing_sv2" -version = "1.1.0" +version = "2.0.0" dependencies = [ "binary_sv2", "buffer_sv2", @@ -995,23 +1067,19 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.7.2" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash 0.3.8", - "autocfg", + "ahash", + "serde", ] [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.8", - "serde", -] +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" [[package]] name = "hashbrown" @@ -1208,6 +1276,7 @@ dependencies = [ "binary_sv2", "buffer_sv2", "codec_sv2", + "config", "error_handling", "framing_sv2", "futures", @@ -1218,7 +1287,6 @@ dependencies = [ "serde", "stratum-common", "tokio", - "toml", "tracing", "tracing-subscriber", ] @@ -1231,6 +1299,7 @@ dependencies = [ "binary_sv2", "buffer_sv2", "codec_sv2", + "config", "const_sv2", "error_handling", "hashbrown 0.11.2", @@ -1246,7 +1315,6 @@ dependencies = [ "serde_json", "stratum-common", "tokio", - "toml", "tracing", "tracing-subscriber", ] @@ -1268,6 +1336,17 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + [[package]] name = "key-utils" version = "1.1.0" @@ -1298,6 +1377,12 @@ version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "linux-raw-sys" version = "0.3.8" @@ -1335,13 +1420,18 @@ version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "mining-device" version = "0.1.1" dependencies = [ "async-channel 1.9.0", "async-recursion 0.3.2", - "async-std", "binary_sv2", "buffer_sv2", "clap", @@ -1354,6 +1444,7 @@ dependencies = [ "roles_logic_sv2", "sha2 0.10.8", "stratum-common", + "tokio", "tracing", "tracing-subscriber", ] @@ -1367,6 +1458,7 @@ dependencies = [ "binary_sv2", "buffer_sv2", "codec_sv2", + "config", "const_sv2", "futures", "key-utils", @@ -1377,7 +1469,6 @@ dependencies = [ "serde", "stratum-common", "tokio", - "toml", "tracing", "tracing-subscriber", ] @@ -1443,6 +1534,16 @@ dependencies = [ "secp256k1 0.28.2", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1512,6 +1613,16 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "ordered-multimap" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" +dependencies = [ + "dlv-list", + "hashbrown 0.13.2", +] + [[package]] name = "overload" version = "0.1.1" @@ -1547,6 +1658,57 @@ dependencies = [ "windows-targets 0.52.5", ] +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.61", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + [[package]] name = "pin-project" version = "1.1.5" @@ -1653,6 +1815,7 @@ dependencies = [ "binary_sv2", "buffer_sv2", "codec_sv2", + "config", "const_sv2", "error_handling", "hex", @@ -1665,7 +1828,6 @@ dependencies = [ "serde", "stratum-common", "tokio", - "toml", "tracing", "tracing-subscriber", ] @@ -1780,7 +1942,7 @@ checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "roles_logic_sv2" -version = "1.1.0" +version = "1.2.1" dependencies = [ "binary_sv2", "chacha20poly1305", @@ -1796,6 +1958,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64", + "bitflags 2.5.0", + "serde", + "serde_derive", +] + [[package]] name = "rpc_sv2" version = "1.0.0" @@ -1810,6 +1984,16 @@ dependencies = [ "stratum-common", ] +[[package]] +name = "rust-ini" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -1925,9 +2109,18 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +dependencies = [ + "serde", +] + [[package]] name = "serde_sv2" -version = "1.0.0" +version = "1.0.1" dependencies = [ "buffer_sv2", "serde", @@ -2053,7 +2246,7 @@ dependencies = [ [[package]] name = "sv1_api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "binary_sv2", "bitcoin_hashes 0.3.2", @@ -2090,7 +2283,7 @@ dependencies = [ [[package]] name = "template_distribution_sv2" -version = "1.0.0" +version = "1.0.2" dependencies = [ "binary_sv2", "const_sv2", @@ -2105,6 +2298,26 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "thiserror" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.61", +] + [[package]] name = "thread_local" version = "1.1.8" @@ -2115,11 +2328,20 @@ dependencies = [ "once_cell", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tokio" -version = "1.37.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -2136,9 +2358,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", @@ -2160,11 +2382,36 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.6" -source = "git+https://github.com/diondokter/toml-rs?rev=c4161aa#c4161aa70202b3992dbec79b76e7a8659713b604" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ - "hashbrown 0.7.2", "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] @@ -2264,6 +2511,7 @@ dependencies = [ "binary_sv2", "buffer_sv2", "codec_sv2", + "config", "error_handling", "framing_sv2", "futures", @@ -2279,7 +2527,6 @@ dependencies = [ "sv1_api", "tokio", "tokio-util", - "toml", "tracing", "tracing-subscriber", ] @@ -2296,12 +2543,24 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "unicode-segmentation" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" + [[package]] name = "universal-hash" version = "0.5.1" @@ -2603,6 +2862,24 @@ version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +[[package]] +name = "winnow" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +dependencies = [ + "memchr", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zeroize" version = "1.7.0" diff --git a/roles/Cargo.toml b/roles/Cargo.toml index 2155409ba..109cfd0ee 100644 --- a/roles/Cargo.toml +++ b/roles/Cargo.toml @@ -2,7 +2,6 @@ name = "stratum_v2_roles" version = "0.1.0" authors = ["The Stratum v2 Developers"] edition = "2021" -rust-version = "1.75.0" description = "The Stratum protocol defines how miners, proxies, and pools communicate to contribute hashrate to the Bitcoin network. Stratum v2 is a robust set of primitives which anyone can use to expand the protocol or implement a role." documentation = "https://github.com/stratum-mining/stratum" readme = "README.md" diff --git a/roles/jd-client/Cargo.toml b/roles/jd-client/Cargo.toml index 4c79016d0..dc2d46079 100644 --- a/roles/jd-client/Cargo.toml +++ b/roles/jd-client/Cargo.toml @@ -17,13 +17,13 @@ async-recursion = "0.3.2" binary_sv2 = { version = "^1.0.0", path = "../../protocols/v2/binary-sv2/binary-sv2" } buffer_sv2 = { version = "^1.0.0", path = "../../utils/buffer" } codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2", "with_buffer_pool"] } -framing_sv2 = { version = "^1.1.0", path = "../../protocols/v2/framing-sv2" } +framing_sv2 = { version = "^2.0.0", path = "../../protocols/v2/framing-sv2" } network_helpers_sv2 = { version = "2.0.0", path = "../roles-utils/network-helpers", features=["with_tokio", "with_buffer_pool"] } roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } serde = { version = "1.0.89", default-features = false, features = ["derive", "alloc"] } futures = "0.3.25" tokio = { version = "1", features = ["full"] } -toml = { version = "0.5.6", git = "https://github.com/diondokter/toml-rs", default-features = false, rev = "c4161aa" } +ext-config = { version = "0.14.0", features = ["toml"], package = "config" } tracing = { version = "0.1" } tracing-subscriber = { version = "0.3" } error_handling = { version = "1.0.0", path = "../../utils/error-handling" } diff --git a/roles/jd-client/src/lib/downstream.rs b/roles/jd-client/src/lib/downstream.rs index ddc6e5bd0..92e5a874a 100644 --- a/roles/jd-client/src/lib/downstream.rs +++ b/roles/jd-client/src/lib/downstream.rs @@ -21,7 +21,7 @@ use roles_logic_sv2::{ }; use tracing::{debug, error, info, warn}; -use codec_sv2::{Frame, HandshakeRole, Responder, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{HandshakeRole, Responder, StandardEitherFrame, StandardSv2Frame}; use key_utils::{Secp256k1PublicKey, Secp256k1SecretKey}; use stratum_common::bitcoin::{consensus::Decodable, TxOut}; @@ -32,8 +32,8 @@ pub type EitherFrame = StandardEitherFrame; /// 1 to 1 connection with a downstream node that implement the mining (sub)protocol can be either /// a mining device or a downstream proxy. -/// A downstream can only be linked with an upstream at a time. Support multi upstrems for -/// downstream do no make much sense. +/// A downstream can only be linked with an upstream at a time. Support multi upstreams for +/// downstream do not make much sense. #[derive(Debug)] pub struct DownstreamMiningNode { receiver: Receiver, @@ -47,7 +47,7 @@ pub struct DownstreamMiningNode { miner_coinbase_output: Vec, // used to retreive the job id of the share that we send upstream last_template_id: u64, - jd: Option>>, + pub jd: Option>>, } #[allow(clippy::large_enum_variant)] @@ -181,7 +181,7 @@ impl DownstreamMiningNode { } } - /// Send SetupConnectionSuccess to donwstream and start processing new messages coming from + /// Send SetupConnectionSuccess to downstream and start processing new messages coming from /// downstream pub async fn start( self_mutex: &Arc>, @@ -225,7 +225,7 @@ impl DownstreamMiningNode { // mining channel success fn set_channel_factory(self_mutex: Arc>) { if !self_mutex.safe_lock(|s| s.status.is_solo_miner()).unwrap() { - // Safe unwrap already checked if it contains an upstream withe `is_solo_miner` + // Safe unwrap already checked if it contains an upstream with `is_solo_miner` let upstream = self_mutex .safe_lock(|s| s.status.get_upstream().unwrap()) .unwrap(); @@ -376,12 +376,13 @@ impl DownstreamMiningNode { let to_send = to_send.into_values(); for message in to_send { let message = if let Mining::NewExtendedMiningJob(job) = message { - let jd = self_mutex.safe_lock(|s| s.jd.clone()).unwrap().unwrap(); - jd.safe_lock(|jd| jd.coinbase_tx_prefix = job.coinbase_tx_prefix.clone()) - .unwrap(); - jd.safe_lock(|jd| jd.coinbase_tx_suffix = job.coinbase_tx_suffix.clone()) + if let Some(jd) = self_mutex.safe_lock(|s| s.jd.clone()).unwrap() { + jd.safe_lock(|jd| { + jd.coinbase_tx_prefix = job.coinbase_tx_prefix.clone(); + jd.coinbase_tx_suffix = job.coinbase_tx_suffix.clone(); + }) .unwrap(); - + } Mining::NewExtendedMiningJob(job) } else { message @@ -514,7 +515,7 @@ impl fn handle_update_channel( &mut self, - _: UpdateChannel, + m: UpdateChannel, ) -> Result, Error> { if !self.status.is_solo_miner() { // Safe unwrap alreay checked if it cointains upstream with is_solo_miner @@ -522,7 +523,16 @@ impl self.status.get_upstream().unwrap(), )) } else { - todo!() + let maximum_target = + roles_logic_sv2::utils::hash_rate_to_target(m.nominal_hash_rate.into(), 10.0)?; + self.status + .get_channel() + .update_target_for_channel(m.channel_id, maximum_target.clone().into()); + let set_target = SetTarget { + channel_id: m.channel_id, + maximum_target, + }; + Ok(SendTo::Respond(Mining::SetTarget(set_target))) } } diff --git a/roles/jd-client/src/lib/error.rs b/roles/jd-client/src/lib/error.rs index 685137457..c3e24bd4d 100644 --- a/roles/jd-client/src/lib/error.rs +++ b/roles/jd-client/src/lib/error.rs @@ -1,3 +1,4 @@ +use ext_config::ConfigError; use std::fmt; use roles_logic_sv2::mining_sv2::{ExtendedExtranonce, NewExtendedMiningJob, SetCustomMiningJob}; @@ -30,8 +31,8 @@ pub enum Error<'a> { VecToSlice32(Vec), /// Errors on bad CLI argument input. BadCliArgs, - /// Errors on bad `toml` deserialize. - BadTomlDeserialize(toml::de::Error), + /// Errors on bad `config` TOML deserialize. + BadConfigDeserialize(ConfigError), /// Errors from `binary_sv2` crate. BinarySv2(binary_sv2::Error), /// Errors on bad noise handshake. @@ -63,7 +64,7 @@ impl<'a> fmt::Display for Error<'a> { use Error::*; match self { BadCliArgs => write!(f, "Bad CLI arg input"), - BadTomlDeserialize(ref e) => write!(f, "Bad `toml` deserialize: `{:?}`", e), + BadConfigDeserialize(ref e) => write!(f, "Bad `config` TOML deserialize: `{:?}`", e), BinarySv2(ref e) => write!(f, "Binary SV2 error: `{:?}`", e), CodecNoise(ref e) => write!(f, "Noise error: `{:?}", e), FramingSv2(ref e) => write!(f, "Framing SV2 error: `{:?}`", e), @@ -119,9 +120,9 @@ impl<'a> From for Error<'a> { } } -impl<'a> From for Error<'a> { - fn from(e: toml::de::Error) -> Self { - Error::BadTomlDeserialize(e) +impl<'a> From for Error<'a> { + fn from(e: ConfigError) -> Self { + Error::BadConfigDeserialize(e) } } @@ -209,12 +210,6 @@ impl<'a> } } -impl<'a> From> for Error<'a> { - fn from(e: Vec) -> Self { - Error::VecToSlice32(e) - } -} - impl<'a> From for Error<'a> { fn from(e: ParseLengthError) -> Self { Error::Uint256Conversion(e) diff --git a/roles/jd-client/src/lib/job_declarator/message_handler.rs b/roles/jd-client/src/lib/job_declarator/message_handler.rs index 7516f24f9..72d58a912 100644 --- a/roles/jd-client/src/lib/job_declarator/message_handler.rs +++ b/roles/jd-client/src/lib/job_declarator/message_handler.rs @@ -55,12 +55,20 @@ impl ParseServerJobDeclarationMessages for JobDeclarator { ) -> Result { let tx_list = self .last_declare_mining_jobs_sent - .get(&message.request_id) - .unwrap() - .clone() - .unwrap() - .tx_list - .into_inner(); + .iter() + .find_map(|entry| { + if let Some((id, last_declare_job)) = entry { + if *id == message.request_id { + Some(last_declare_job.clone().tx_list.into_inner()) + } else { + None + } + } else { + None + } + }) + .ok_or_else(|| Error::UnknownRequestId(message.request_id))?; + let unknown_tx_position_list: Vec = message.unknown_tx_position_list.into_inner(); let missing_transactions: Vec = unknown_tx_position_list .iter() diff --git a/roles/jd-client/src/lib/job_declarator/mod.rs b/roles/jd-client/src/lib/job_declarator/mod.rs index abaf852ca..29fb2e4f2 100644 --- a/roles/jd-client/src/lib/job_declarator/mod.rs +++ b/roles/jd-client/src/lib/job_declarator/mod.rs @@ -17,7 +17,6 @@ use tokio::task::AbortHandle; use tracing::{error, info}; use async_recursion::async_recursion; -use codec_sv2::Frame; use nohash_hasher::BuildNoHashHasher; use roles_logic_sv2::{ handlers::job_declaration::ParseServerJobDeclarationMessages, @@ -55,7 +54,7 @@ pub struct JobDeclarator { req_ids: Id, min_extranonce_size: u16, // (Sent DeclareMiningJob, is future, template id, merkle path) - last_declare_mining_jobs_sent: HashMap>, + last_declare_mining_jobs_sent: [Option<(u32, LastDeclareJob)>; 2], last_set_new_prev_hash: Option>, set_new_prev_hash_counter: u8, #[allow(clippy::type_complexity)] @@ -115,7 +114,7 @@ impl JobDeclarator { allocated_tokens: vec![], req_ids: Id::new(), min_extranonce_size, - last_declare_mining_jobs_sent: HashMap::with_capacity(10), + last_declare_mining_jobs_sent: [None, None], last_set_new_prev_hash: None, future_jobs: HashMap::with_hasher(BuildNoHashHasher::default()), up, @@ -130,18 +129,26 @@ impl JobDeclarator { Ok(self_) } - fn get_last_declare_job_sent(self_mutex: &Arc>, request_id: u32) -> LastDeclareJob { + fn get_last_declare_job_sent( + self_mutex: &Arc>, + request_id: u32, + ) -> Option { self_mutex .safe_lock(|s| { - s.last_declare_mining_jobs_sent - .get(&request_id) - .expect("LastDeclareJob not found") - .clone() - .expect("unreachable code") + for (id, job) in s.last_declare_mining_jobs_sent.iter().flatten() { + if *id == request_id { + return Some(job.to_owned()); + } + } + None }) .unwrap() } + /// We maintain a window of 2 jobs. If more than 2 blocks are found, + /// the ordering will depend on the request ID. Only the 2 most recent request + /// IDs will be kept in memory, while the rest will be discarded. + /// More information can be found here: https://github.com/stratum-mining/stratum/pull/904#discussion_r1609469048 fn update_last_declare_job_sent( self_mutex: &Arc>, request_id: u32, @@ -149,13 +156,20 @@ impl JobDeclarator { ) { self_mutex .safe_lock(|s| { - //check hashmap size in order to not let it grow indefinetely - if s.last_declare_mining_jobs_sent.len() < 10 { - s.last_declare_mining_jobs_sent.insert(request_id, Some(j)); - } else if let Some(min_key) = s.last_declare_mining_jobs_sent.keys().min().cloned() + if let Some(empty_index) = s + .last_declare_mining_jobs_sent + .iter() + .position(|entry| entry.is_none()) + { + s.last_declare_mining_jobs_sent[empty_index] = Some((request_id, j)); + } else if let Some((min_index, _)) = s + .last_declare_mining_jobs_sent + .iter() + .enumerate() + .filter_map(|(i, entry)| entry.as_ref().map(|(id, _)| (i, id))) + .min_by_key(|&(_, id)| id) { - s.last_declare_mining_jobs_sent.remove(&min_key); - s.last_declare_mining_jobs_sent.insert(request_id, Some(j)); + s.last_declare_mining_jobs_sent[min_index] = Some((request_id, j)); } }) .unwrap(); @@ -289,8 +303,7 @@ impl JobDeclarator { match next_message_to_send { Ok(SendTo::None(Some(JobDeclaration::DeclareMiningJobSuccess(m)))) => { let new_token = m.new_mining_job_token; - let last_declare = - Self::get_last_declare_job_sent(&self_mutex, m.request_id); + let last_declare = Self::get_last_declare_job_sent(&self_mutex, m.request_id).unwrap_or_else(|| panic!("Failed to get last declare job: job not found, Request Id: {:?}.", m.request_id)); let mut last_declare_mining_job_sent = last_declare.declare_job; let is_future = last_declare.template.future_template; let id = last_declare.template.template_id; diff --git a/roles/jd-client/src/lib/job_declarator/setup_connection.rs b/roles/jd-client/src/lib/job_declarator/setup_connection.rs index 063592c40..0e7b6fd8a 100644 --- a/roles/jd-client/src/lib/job_declarator/setup_connection.rs +++ b/roles/jd-client/src/lib/job_declarator/setup_connection.rs @@ -1,5 +1,5 @@ use async_channel::{Receiver, Sender}; -use codec_sv2::{Frame, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; use roles_logic_sv2::{ common_messages_sv2::{Protocol, SetupConnection}, handlers::common::{ParseUpstreamCommonMessages, SendTo}, diff --git a/roles/jd-client/src/lib/mod.rs b/roles/jd-client/src/lib/mod.rs index 489759420..db52a6c39 100644 --- a/roles/jd-client/src/lib/mod.rs +++ b/roles/jd-client/src/lib/mod.rs @@ -8,6 +8,22 @@ pub mod upstream_sv2; use std::{sync::atomic::AtomicBool, time::Duration}; +use job_declarator::JobDeclarator; +use proxy_config::ProxyConfig; +use template_receiver::TemplateRx; + +use async_channel::{bounded, unbounded}; +use futures::{select, FutureExt}; +use roles_logic_sv2::utils::Mutex; +use std::{ + net::{IpAddr, SocketAddr}, + str::FromStr, + sync::Arc, +}; +use tokio::task::AbortHandle; + +use tracing::{error, info}; + /// Is used by the template receiver and the downstream. When a NewTemplate is received the context /// that is running the template receiver set this value to false and then the message is sent to /// the context that is running the Downstream that do something and then set it back to true. @@ -15,7 +31,7 @@ use std::{sync::atomic::AtomicBool, time::Duration}; /// In the meantime if the context that is running the template receiver receives a SetNewPrevHash /// it wait until the value of this global is true before doing anything. /// -/// Acuire and Release memory ordering is used. +/// Acquire and Release memory ordering is used. /// /// Memory Ordering Explanation: /// We use Acquire-Release ordering instead of SeqCst or Relaxed for the following reasons: @@ -31,6 +47,307 @@ use std::{sync::atomic::AtomicBool, time::Duration}; /// between all the contexts is not necessary. pub static IS_NEW_TEMPLATE_HANDLED: AtomicBool = AtomicBool::new(true); +/// Job Declarator Client (or JDC) is the role which is Miner-side, in charge of creating new +/// mining jobs from the templates received by the Template Provider to which it is connected. It +/// declares custom jobs to the JDS, in order to start working on them. +/// JDC is also responsible for putting in action the Pool-fallback mechanism, automatically +/// switching to backup Pools in case of declared custom jobs refused by JDS (which is Pool side). +/// As a solution of last-resort, it is able to switch to Solo Mining until new safe Pools appear +/// in the market. +pub struct JobDeclaratorClient { + /// Configuration of the proxy server [`JobDeclaratorClient`] is connected to. + config: ProxyConfig, +} + +impl JobDeclaratorClient { + pub fn new(config: ProxyConfig) -> Self { + Self { config } + } + + pub async fn start(self) { + let mut upstream_index = 0; + let mut interrupt_signal_future = Box::pin(tokio::signal::ctrl_c().fuse()); + + // Channel used to manage failed tasks + let (tx_status, rx_status) = unbounded(); + + let task_collector = Arc::new(Mutex::new(vec![])); + + let proxy_config = &self.config; + + loop { + let task_collector = task_collector.clone(); + let tx_status = tx_status.clone(); + if let Some(upstream) = proxy_config.upstreams.get(upstream_index) { + self.initialize_jd(tx_status.clone(), task_collector.clone(), upstream.clone()) + .await; + } else { + self.initialize_jd_as_solo_miner(tx_status.clone(), task_collector.clone()) + .await; + } + // Check all tasks if is_finished() is true, if so exit + loop { + let task_status = select! { + task_status = rx_status.recv().fuse() => task_status, + interrupt_signal = interrupt_signal_future => { + match interrupt_signal { + Ok(()) => { + info!("Interrupt received"); + }, + Err(err) => { + error!("Unable to listen for interrupt signal: {}", err); + // we also shut down in case of error + }, + } + std::process::exit(0); + } + }; + let task_status: status::Status = task_status.unwrap(); + + match task_status.state { + // Should only be sent by the downstream listener + status::State::DownstreamShutdown(err) => { + error!("SHUTDOWN from: {}", err); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + task_collector + .safe_lock(|s| { + for handle in s { + handle.abort(); + } + }) + .unwrap(); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + break; + } + status::State::UpstreamShutdown(err) => { + error!("SHUTDOWN from: {}", err); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + task_collector + .safe_lock(|s| { + for handle in s { + handle.abort(); + } + }) + .unwrap(); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + break; + } + status::State::UpstreamRogue => { + error!("Changin Pool"); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + task_collector + .safe_lock(|s| { + for handle in s { + handle.abort(); + } + }) + .unwrap(); + upstream_index += 1; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + break; + } + status::State::Healthy(msg) => { + info!("HEALTHY message: {}", msg); + } + } + } + } + } + + async fn initialize_jd_as_solo_miner( + &self, + tx_status: async_channel::Sender>, + task_collector: Arc>>, + ) { + let proxy_config = &self.config; + let timeout = proxy_config.timeout; + let miner_tx_out = proxy_config::get_coinbase_output(proxy_config).unwrap(); + + // When Downstream receive a share that meets bitcoin target it transformit in a + // SubmitSolution and send it to the TemplateReceiver + let (send_solution, recv_solution) = bounded(10); + + // Format `Downstream` connection address + let downstream_addr = SocketAddr::new( + IpAddr::from_str(&proxy_config.downstream_address).unwrap(), + proxy_config.downstream_port, + ); + + // Wait for downstream to connect + let downstream = downstream::listen_for_downstream_mining( + downstream_addr, + None, + send_solution, + proxy_config.withhold, + proxy_config.authority_public_key, + proxy_config.authority_secret_key, + proxy_config.cert_validity_sec, + task_collector.clone(), + status::Sender::Downstream(tx_status.clone()), + miner_tx_out.clone(), + None, + ) + .await + .unwrap(); + + // Initialize JD part + let mut parts = proxy_config.tp_address.split(':'); + let ip_tp = parts.next().unwrap().to_string(); + let port_tp = parts.next().unwrap().parse::().unwrap(); + + TemplateRx::connect( + SocketAddr::new(IpAddr::from_str(ip_tp.as_str()).unwrap(), port_tp), + recv_solution, + status::Sender::TemplateReceiver(tx_status.clone()), + None, + downstream, + task_collector, + Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), + miner_tx_out.clone(), + proxy_config.tp_authority_public_key, + false, + ) + .await; + } + + async fn initialize_jd( + &self, + tx_status: async_channel::Sender>, + task_collector: Arc>>, + upstream_config: proxy_config::Upstream, + ) { + let proxy_config = &self.config; + let timeout = proxy_config.timeout; + let test_only_do_not_send_solution_to_tp = proxy_config + .test_only_do_not_send_solution_to_tp + .unwrap_or(false); + + // Format `Upstream` connection address + let mut parts = upstream_config.pool_address.split(':'); + let address = parts + .next() + .unwrap_or_else(|| panic!("Invalid pool address {}", upstream_config.pool_address)); + let port = parts + .next() + .and_then(|p| p.parse::().ok()) + .unwrap_or_else(|| panic!("Invalid pool address {}", upstream_config.pool_address)); + let upstream_addr = SocketAddr::new( + IpAddr::from_str(address).unwrap_or_else(|_| { + panic!("Invalid pool address {}", upstream_config.pool_address) + }), + port, + ); + + // When Downstream receive a share that meets bitcoin target it transformit in a + // SubmitSolution and send it to the TemplateReceiver + let (send_solution, recv_solution) = bounded(10); + + // Instantiate a new `Upstream` (SV2 Pool) + let upstream = match upstream_sv2::Upstream::new( + upstream_addr, + upstream_config.authority_pubkey, + 0, // TODO + upstream_config.pool_signature.clone(), + status::Sender::Upstream(tx_status.clone()), + task_collector.clone(), + Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), + ) + .await + { + Ok(upstream) => upstream, + Err(e) => { + error!("Failed to create upstream: {}", e); + panic!() + } + }; + + match upstream_sv2::Upstream::setup_connection( + upstream.clone(), + proxy_config.min_supported_version, + proxy_config.max_supported_version, + ) + .await + { + Ok(_) => info!("Connected to Upstream!"), + Err(e) => { + error!("Failed to connect to Upstream EXITING! : {}", e); + panic!() + } + } + + // Start receiving messages from the SV2 Upstream role + if let Err(e) = upstream_sv2::Upstream::parse_incoming(upstream.clone()) { + error!("failed to create sv2 parser: {}", e); + panic!() + } + + // Format `Downstream` connection address + let downstream_addr = SocketAddr::new( + IpAddr::from_str(&proxy_config.downstream_address).unwrap(), + proxy_config.downstream_port, + ); + + // Initialize JD part + let mut parts = proxy_config.tp_address.split(':'); + let ip_tp = parts.next().unwrap().to_string(); + let port_tp = parts.next().unwrap().parse::().unwrap(); + + let mut parts = upstream_config.jd_address.split(':'); + let ip_jd = parts.next().unwrap().to_string(); + let port_jd = parts.next().unwrap().parse::().unwrap(); + let jd = match JobDeclarator::new( + SocketAddr::new(IpAddr::from_str(ip_jd.as_str()).unwrap(), port_jd), + upstream_config.authority_pubkey.into_bytes(), + proxy_config.clone(), + upstream.clone(), + task_collector.clone(), + ) + .await + { + Ok(c) => c, + Err(e) => { + let _ = tx_status + .send(status::Status { + state: status::State::UpstreamShutdown(e), + }) + .await; + return; + } + }; + + // Wait for downstream to connect + let downstream = downstream::listen_for_downstream_mining( + downstream_addr, + Some(upstream), + send_solution, + proxy_config.withhold, + proxy_config.authority_public_key, + proxy_config.authority_secret_key, + proxy_config.cert_validity_sec, + task_collector.clone(), + status::Sender::Downstream(tx_status.clone()), + vec![], + Some(jd.clone()), + ) + .await + .unwrap(); + + TemplateRx::connect( + SocketAddr::new(IpAddr::from_str(ip_tp.as_str()).unwrap(), port_tp), + recv_solution, + status::Sender::TemplateReceiver(tx_status.clone()), + Some(jd.clone()), + downstream, + task_collector, + Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), + vec![], + proxy_config.tp_authority_public_key, + test_only_do_not_send_solution_to_tp, + ) + .await; + } +} + #[derive(Debug)] pub struct PoolChangerTrigger { timeout: Duration, diff --git a/roles/jd-client/src/lib/status.rs b/roles/jd-client/src/lib/status.rs index 44e6056d2..292a4037a 100644 --- a/roles/jd-client/src/lib/status.rs +++ b/roles/jd-client/src/lib/status.rs @@ -84,7 +84,7 @@ async fn send_status( outcome } -// this is called by `error_handling::handle_result!` +// This is called by `error_handling::handle_result!` pub async fn handle_error( sender: &Sender, e: error::Error<'static>, @@ -94,8 +94,8 @@ pub async fn handle_error( Error::VecToSlice32(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, // Errors on bad CLI argument input. Error::BadCliArgs => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad `toml` deserialize. - Error::BadTomlDeserialize(_) => { + // Errors on bad `config` TOML deserialize. + Error::BadConfigDeserialize(_) => { send_status(sender, e, error_handling::ErrorBranch::Break).await } // Errors from `binary_sv2` crate. diff --git a/roles/jd-client/src/lib/template_receiver/mod.rs b/roles/jd-client/src/lib/template_receiver/mod.rs index 02d3d0497..cfc1eec0a 100644 --- a/roles/jd-client/src/lib/template_receiver/mod.rs +++ b/roles/jd-client/src/lib/template_receiver/mod.rs @@ -1,6 +1,6 @@ use super::{job_declarator::JobDeclarator, status, PoolChangerTrigger}; use async_channel::{Receiver, Sender}; -use codec_sv2::{Frame, HandshakeRole, Initiator, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{HandshakeRole, Initiator, StandardEitherFrame, StandardSv2Frame}; use error_handling::handle_result; use key_utils::Secp256k1PublicKey; use network_helpers_sv2::noise_connection_tokio::Connection; @@ -57,9 +57,16 @@ impl TemplateRx { test_only_do_not_send_solution_to_tp: bool, ) { let mut encoded_outputs = vec![]; - miner_coinbase_outputs - .consensus_encode(&mut encoded_outputs) - .expect("Invalid coinbase output in config"); + // jd is set to None in initialize_jd_as_solo_miner (in this case we need to take the first output as done by JDS) + if jd.is_none() { + miner_coinbase_outputs[0] + .consensus_encode(&mut encoded_outputs) + .expect("Invalid coinbase output in config"); + } else { + miner_coinbase_outputs + .consensus_encode(&mut encoded_outputs) + .expect("Invalid coinbase output in config"); + } let stream = tokio::net::TcpStream::connect(address).await.unwrap(); let initiator = match authority_public_key { diff --git a/roles/jd-client/src/lib/template_receiver/setup_connection.rs b/roles/jd-client/src/lib/template_receiver/setup_connection.rs index 45f48a2f4..505b945c3 100644 --- a/roles/jd-client/src/lib/template_receiver/setup_connection.rs +++ b/roles/jd-client/src/lib/template_receiver/setup_connection.rs @@ -1,5 +1,5 @@ use async_channel::{Receiver, Sender}; -use codec_sv2::{Frame, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; use roles_logic_sv2::{ common_messages_sv2::{Protocol, SetupConnection}, handlers::common::{ParseUpstreamCommonMessages, SendTo}, diff --git a/roles/jd-client/src/lib/upstream_sv2/upstream.rs b/roles/jd-client/src/lib/upstream_sv2/upstream.rs index 857cbd308..b04efa335 100644 --- a/roles/jd-client/src/lib/upstream_sv2/upstream.rs +++ b/roles/jd-client/src/lib/upstream_sv2/upstream.rs @@ -11,7 +11,7 @@ use super::super::{ }; use async_channel::{Receiver, Sender}; use binary_sv2::{Seq0255, U256}; -use codec_sv2::{Frame, HandshakeRole, Initiator}; +use codec_sv2::{HandshakeRole, Initiator}; use error_handling::handle_result; use key_utils::Secp256k1PublicKey; use network_helpers_sv2::noise_connection_tokio::Connection; diff --git a/roles/jd-client/src/main.rs b/roles/jd-client/src/main.rs index ac246987e..763fb30be 100644 --- a/roles/jd-client/src/main.rs +++ b/roles/jd-client/src/main.rs @@ -1,43 +1,39 @@ #![allow(special_module_name)] - mod args; mod lib; use lib::{ error::{Error, ProxyResult}, - job_declarator::JobDeclarator, proxy_config::ProxyConfig, - status, - template_receiver::TemplateRx, - PoolChangerTrigger, + status, JobDeclaratorClient, }; use args::Args; -use async_channel::{bounded, unbounded}; -use futures::{select, FutureExt}; -use roles_logic_sv2::utils::Mutex; -use std::{ - net::{IpAddr, SocketAddr}, - str::FromStr, - sync::Arc, - time::Duration, -}; -use tokio::task::AbortHandle; - -use tracing::{error, info}; +use ext_config::{Config, File, FileFormat}; +use tracing::error; -/// Process CLI args, if any. +/// Process CLI args and load configuration. #[allow(clippy::result_large_err)] fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> { - let args = match Args::from_args() { - Ok(cfg) => cfg, - Err(help) => { - error!("{}", help); - return Err(Error::BadCliArgs); - } - }; - let config_file = std::fs::read_to_string(args.config_path)?; - Ok(toml::from_str::(&config_file)?) + // Parse CLI arguments + let args = Args::from_args().map_err(|help| { + error!("{}", help); + Error::BadCliArgs + })?; + + // Build configuration from the provided file path + let config_path = args.config_path.to_str().ok_or_else(|| { + error!("Invalid configuration path."); + Error::BadCliArgs + })?; + + let settings = Config::builder() + .add_source(File::new(config_path, FileFormat::Toml)) + .build()?; + + // Deserialize settings into ProxyConfig + let config = settings.try_deserialize::()?; + Ok(config) } /// TODO on the setup phase JDC must send a random nonce to bitcoind and JDS used for the tx @@ -46,12 +42,12 @@ fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> { /// TODO on setupconnection with bitcoind (TP) JDC must signal that want a tx short hash list with /// the templates /// -/// TODO JDC must handle TxShortHahhList message +/// TODO JDC must handle TxShortHashList message /// /// This will start: /// 1. An Upstream, this will connect with the mining Pool /// 2. A listner that will wait for a mining downstream with ExtendedChannel capabilities (tproxy, -/// minin-proxy) +/// mining-proxy) /// 3. A JobDeclarator, this will connect with the job-declarator-server /// 4. A TemplateRx, this will connect with bitcoind /// @@ -78,15 +74,15 @@ fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> { /// Then we receive CommitMiningJobSuccess and we use the new token to send SetCustomMiningJob to /// the pool. /// When we receive SetCustomMiningJobSuccess we set in Upstream job_id equal to the one received -/// in SetCustomMiningJobSuccess so that we sill send shares upstream with the right job_id. +/// in SetCustomMiningJobSuccess so that we still send shares upstream with the right job_id. /// /// The above procedure, let us send NewExtendedMiningJob downstream right after a NewTemplate has /// been received this will reduce the time that pass from a NewTemplate and the mining-device /// starting to mine on the new job. /// /// In the case a future NewTemplate the SetCustomMiningJob is sent only if the canditate become -/// the actual NewTemplate so that we do not send a lot of usless future Job to the pool. That -/// means that SetCustomMiningJob is sent only when a NewTemplate becom "active" +/// the actual NewTemplate so that we do not send a lot of useless future Job to the pool. That +/// means that SetCustomMiningJob is sent only when a NewTemplate become "active" /// /// The JobDeclarator always have 2 avaiable token, that means that whenever a token is used to /// commit a job with upstream we require a new one. Having always a token when needed means that @@ -96,296 +92,14 @@ fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> { #[tokio::main] async fn main() { tracing_subscriber::fmt::init(); - - let mut upstream_index = 0; - let mut interrupt_signal_future = Box::pin(tokio::signal::ctrl_c().fuse()); - - // Channel used to manage failed tasks - let (tx_status, rx_status) = unbounded(); - - let task_collector = Arc::new(Mutex::new(vec![])); - let proxy_config = match process_cli_args() { Ok(p) => p, - Err(_) => return, - }; - - loop { - { - let task_collector = task_collector.clone(); - let tx_status = tx_status.clone(); - - if let Some(upstream) = proxy_config.upstreams.get(upstream_index) { - let initialize = initialize_jd( - tx_status.clone(), - task_collector, - upstream.clone(), - proxy_config.timeout, - ); - tokio::task::spawn(initialize); - } else { - let initialize = initialize_jd_as_solo_miner( - tx_status.clone(), - task_collector, - proxy_config.timeout, - ); - tokio::task::spawn(initialize); - } - } - // Check all tasks if is_finished() is true, if so exit - loop { - let task_status = select! { - task_status = rx_status.recv().fuse() => task_status, - interrupt_signal = interrupt_signal_future => { - match interrupt_signal { - Ok(()) => { - info!("Interrupt received"); - }, - Err(err) => { - error!("Unable to listen for interrupt signal: {}", err); - // we also shut down in case of error - }, - } - std::process::exit(0); - } - }; - let task_status: status::Status = task_status.unwrap(); - - match task_status.state { - // Should only be sent by the downstream listener - status::State::DownstreamShutdown(err) => { - error!("SHUTDOWN from: {}", err); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - task_collector - .safe_lock(|s| { - for handle in s { - handle.abort(); - } - }) - .unwrap(); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - break; - } - status::State::UpstreamShutdown(err) => { - error!("SHUTDOWN from: {}", err); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - task_collector - .safe_lock(|s| { - for handle in s { - handle.abort(); - } - }) - .unwrap(); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - break; - } - status::State::UpstreamRogue => { - error!("Changin Pool"); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - task_collector - .safe_lock(|s| { - for handle in s { - handle.abort(); - } - }) - .unwrap(); - upstream_index += 1; - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - break; - } - status::State::Healthy(msg) => { - info!("HEALTHY message: {}", msg); - } - } - } - } -} -async fn initialize_jd_as_solo_miner( - tx_status: async_channel::Sender>, - task_collector: Arc>>, - timeout: Duration, -) { - let proxy_config = process_cli_args().unwrap(); - let miner_tx_out = lib::proxy_config::get_coinbase_output(&proxy_config).unwrap(); - - // When Downstream receive a share that meets bitcoin target it transformit in a - // SubmitSolution and send it to the TemplateReceiver - let (send_solution, recv_solution) = bounded(10); - - // Format `Downstream` connection address - let downstream_addr = SocketAddr::new( - IpAddr::from_str(&proxy_config.downstream_address).unwrap(), - proxy_config.downstream_port, - ); - - // Wait for downstream to connect - let downstream = lib::downstream::listen_for_downstream_mining( - downstream_addr, - None, - send_solution, - proxy_config.withhold, - proxy_config.authority_public_key, - proxy_config.authority_secret_key, - proxy_config.cert_validity_sec, - task_collector.clone(), - status::Sender::Downstream(tx_status.clone()), - miner_tx_out.clone(), - None, - ) - .await - .unwrap(); - - // Initialize JD part - let mut parts = proxy_config.tp_address.split(':'); - let ip_tp = parts.next().unwrap().to_string(); - let port_tp = parts.next().unwrap().parse::().unwrap(); - - TemplateRx::connect( - SocketAddr::new(IpAddr::from_str(ip_tp.as_str()).unwrap(), port_tp), - recv_solution, - status::Sender::TemplateReceiver(tx_status.clone()), - None, - downstream, - task_collector, - Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), - miner_tx_out.clone(), - proxy_config.tp_authority_public_key, - false, - ) - .await; -} - -async fn initialize_jd( - tx_status: async_channel::Sender>, - task_collector: Arc>>, - upstream_config: lib::proxy_config::Upstream, - timeout: Duration, -) { - let proxy_config = process_cli_args().unwrap(); - let test_only_do_not_send_solution_to_tp = proxy_config - .test_only_do_not_send_solution_to_tp - .unwrap_or(false); - - // Format `Upstream` connection address - let mut parts = upstream_config.pool_address.split(':'); - let address = parts - .next() - .unwrap_or_else(|| panic!("Invalid pool address {}", upstream_config.pool_address)); - let port = parts - .next() - .and_then(|p| p.parse::().ok()) - .unwrap_or_else(|| panic!("Invalid pool address {}", upstream_config.pool_address)); - let upstream_addr = SocketAddr::new( - IpAddr::from_str(address) - .unwrap_or_else(|_| panic!("Invalid pool address {}", upstream_config.pool_address)), - port, - ); - - // When Downstream receive a share that meets bitcoin target it transformit in a - // SubmitSolution and send it to the TemplateReceiver - let (send_solution, recv_solution) = bounded(10); - - // Instantiate a new `Upstream` (SV2 Pool) - let upstream = match lib::upstream_sv2::Upstream::new( - upstream_addr, - upstream_config.authority_pubkey, - 0, // TODO - upstream_config.pool_signature.clone(), - status::Sender::Upstream(tx_status.clone()), - task_collector.clone(), - Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), - ) - .await - { - Ok(upstream) => upstream, - Err(e) => { - error!("Failed to create upstream: {}", e); - panic!() - } - }; - - // Start receiving messages from the SV2 Upstream role - if let Err(e) = lib::upstream_sv2::Upstream::parse_incoming(upstream.clone()) { - error!("failed to create sv2 parser: {}", e); - panic!() - } - - match lib::upstream_sv2::Upstream::setup_connection( - upstream.clone(), - proxy_config.min_supported_version, - proxy_config.max_supported_version, - ) - .await - { - Ok(_) => info!("Connected to Upstream!"), Err(e) => { - error!("Failed to connect to Upstream EXITING! : {}", e); - panic!() - } - } - - // Format `Downstream` connection address - let downstream_addr = SocketAddr::new( - IpAddr::from_str(&proxy_config.downstream_address).unwrap(), - proxy_config.downstream_port, - ); - - // Initialize JD part - let mut parts = proxy_config.tp_address.split(':'); - let ip_tp = parts.next().unwrap().to_string(); - let port_tp = parts.next().unwrap().parse::().unwrap(); - - let mut parts = upstream_config.jd_address.split(':'); - let ip_jd = parts.next().unwrap().to_string(); - let port_jd = parts.next().unwrap().parse::().unwrap(); - let jd = match JobDeclarator::new( - SocketAddr::new(IpAddr::from_str(ip_jd.as_str()).unwrap(), port_jd), - upstream_config.authority_pubkey.into_bytes(), - proxy_config.clone(), - upstream.clone(), - task_collector.clone(), - ) - .await - { - Ok(c) => c, - Err(e) => { - let _ = tx_status - .send(status::Status { - state: status::State::UpstreamShutdown(e), - }) - .await; + error!("Job Declarator Client Config error: {}", e); return; } }; - // Wait for downstream to connect - let downstream = lib::downstream::listen_for_downstream_mining( - downstream_addr, - Some(upstream), - send_solution, - proxy_config.withhold, - proxy_config.authority_public_key, - proxy_config.authority_secret_key, - proxy_config.cert_validity_sec, - task_collector.clone(), - status::Sender::Downstream(tx_status.clone()), - vec![], - Some(jd.clone()), - ) - .await - .unwrap(); - - TemplateRx::connect( - SocketAddr::new(IpAddr::from_str(ip_tp.as_str()).unwrap(), port_tp), - recv_solution, - status::Sender::TemplateReceiver(tx_status.clone()), - Some(jd.clone()), - downstream, - task_collector, - Arc::new(Mutex::new(PoolChangerTrigger::new(timeout))), - vec![], - proxy_config.tp_authority_public_key, - test_only_do_not_send_solution_to_tp, - ) - .await; + let jdc = JobDeclaratorClient::new(proxy_config); + jdc.start().await; } diff --git a/roles/jd-server/Cargo.toml b/roles/jd-server/Cargo.toml index 6f55ece06..e8d82438a 100644 --- a/roles/jd-server/Cargo.toml +++ b/roles/jd-server/Cargo.toml @@ -15,13 +15,13 @@ async-channel = "1.5.1" binary_sv2 = { version = "^1.0.0", path = "../../protocols/v2/binary-sv2/binary-sv2" } buffer_sv2 = { version = "^1.0.0", path = "../../utils/buffer" } codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2"] } -const_sv2 = { version = "^1.0.0", path = "../../protocols/v2/const-sv2" } +const_sv2 = { version = "^2.0.0", path = "../../protocols/v2/const-sv2" } network_helpers_sv2 = { version = "2.0.0", path = "../roles-utils/network-helpers", features = ["with_tokio"] } noise_sv2 = { version = "1.1.0", path = "../../protocols/v2/noise-sv2" } rand = "0.8.4" roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } tokio = { version = "1", features = ["full"] } -toml = { version = "0.5.6", git = "https://github.com/diondokter/toml-rs", default-features = false, rev = "c4161aa" } +ext-config = { version = "0.14.0", features = ["toml"], package = "config" } tracing = { version = "0.1" } tracing-subscriber = "0.3" error_handling = { version = "1.0.0", path = "../../utils/error-handling" } @@ -31,4 +31,4 @@ serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = hashbrown = { version = "0.11", default-features = false, features = ["ahash", "serde"] } key-utils = { version = "^1.0.0", path = "../../utils/key-utils" } rpc_sv2 = { version = "1.0.0", path = "../roles-utils/rpc" } -hex = "0.4.3" +hex = "0.4.3" \ No newline at end of file diff --git a/roles/jd-server/config-examples/jds-config-hosted-example.toml b/roles/jd-server/config-examples/jds-config-hosted-example.toml index c8a75ef0f..999a69ae3 100644 --- a/roles/jd-server/config-examples/jds-config-hosted-example.toml +++ b/roles/jd-server/config-examples/jds-config-hosted-example.toml @@ -1,3 +1,6 @@ +# Async Job Support +async_mining_allowed = true + # SRI Pool config authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" @@ -18,8 +21,8 @@ coinbase_outputs = [ # SRI Pool JD config listen_jd_address = "0.0.0.0:34264" # RPC config for mempool (it can be also the same TP if correctly configured) -core_rpc_url = "http://127.0.0.1" -core_rpc_port = 18332 +core_rpc_url = "http://75.119.150.111" +core_rpc_port = 48332 core_rpc_user = "username" core_rpc_pass = "password" # Time interval used for JDS mempool update diff --git a/roles/jd-server/config-examples/jds-config-local-example.toml b/roles/jd-server/config-examples/jds-config-local-example.toml index 07aabd514..dc8ce0555 100644 --- a/roles/jd-server/config-examples/jds-config-local-example.toml +++ b/roles/jd-server/config-examples/jds-config-local-example.toml @@ -1,3 +1,6 @@ +# Async Job Support +async_mining_allowed = true + # SRI Pool config authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" @@ -19,7 +22,7 @@ coinbase_outputs = [ listen_jd_address = "127.0.0.1:34264" # RPC config for mempool (it can be also the same TP if correctly configured) core_rpc_url = "http://127.0.0.1" -core_rpc_port = 18332 +core_rpc_port = 48332 core_rpc_user = "username" core_rpc_pass = "password" # Time interval used for JDS mempool update diff --git a/roles/jd-server/src/lib/job_declarator/message_handler.rs b/roles/jd-server/src/lib/job_declarator/message_handler.rs index 96887bdd0..b71eb96ca 100644 --- a/roles/jd-server/src/lib/job_declarator/message_handler.rs +++ b/roles/jd-server/src/lib/job_declarator/message_handler.rs @@ -49,7 +49,7 @@ impl ParseClientJobDeclarationMessages for JobDeclaratorDownstream { request_id: message.request_id, mining_job_token: token.to_le_bytes().to_vec().try_into().unwrap(), coinbase_output_max_additional_size: 100, - async_mining_allowed: true, + async_mining_allowed: self.async_mining_allowed, coinbase_output: self.coinbase_output.clone().try_into().unwrap(), }; let message_enum = JobDeclaration::AllocateMiningJobTokenSuccess(message_success); diff --git a/roles/jd-server/src/lib/job_declarator/mod.rs b/roles/jd-server/src/lib/job_declarator/mod.rs index 56d56223c..d1dfcdf05 100644 --- a/roles/jd-server/src/lib/job_declarator/mod.rs +++ b/roles/jd-server/src/lib/job_declarator/mod.rs @@ -2,13 +2,16 @@ pub mod message_handler; use super::{error::JdsError, mempool::JDsMempool, status, Configuration, EitherFrame, StdFrame}; use async_channel::{Receiver, Sender}; use binary_sv2::{B0255, U256}; -use codec_sv2::{Frame, HandshakeRole, Responder}; +use codec_sv2::{HandshakeRole, Responder}; +use core::panic; use error_handling::handle_result; use key_utils::{Secp256k1PublicKey, Secp256k1SecretKey, SignatureService}; use network_helpers_sv2::noise_connection_tokio::Connection; use nohash_hasher::BuildNoHashHasher; use roles_logic_sv2::{ - common_messages_sv2::SetupConnectionSuccess, + common_messages_sv2::{ + Protocol, SetupConnection, SetupConnectionError, SetupConnectionSuccess, + }, handlers::job_declaration::{ParseClientJobDeclarationMessages, SendTo}, job_declaration_sv2::{DeclareMiningJob, SubmitSolutionJd}, parsers::{JobDeclaration, PoolMessages as JdsMessages}, @@ -44,6 +47,7 @@ pub struct AddTrasactionsToMempool { #[derive(Debug)] pub struct JobDeclaratorDownstream { + async_mining_allowed: bool, sender: Sender, receiver: Receiver, // TODO this should be computed for each new template so that fees are included @@ -67,6 +71,7 @@ pub struct JobDeclaratorDownstream { impl JobDeclaratorDownstream { pub fn new( + async_mining_allowed: bool, receiver: Receiver, sender: Sender, config: &Configuration, @@ -86,6 +91,7 @@ impl JobDeclaratorDownstream { .expect("Invalid coinbase output in config"); Self { + async_mining_allowed, receiver, sender, coinbase_output, @@ -175,8 +181,10 @@ impl JobDeclaratorDownstream { for transaction in job_transactions { match transaction { TransactionState::PresentInMempool(txid) => known_transactions.push(txid), - TransactionState::Missing => continue, - }; + TransactionState::Missing => { + continue; + } + } } known_transactions } @@ -230,26 +238,26 @@ impl JobDeclaratorDownstream { Ok(SendTo::Respond(m)) => { match m { JobDeclaration::AllocateMiningJobToken(_) => { - error!("Send unexpected message: AMJT") + error!("Send unexpected message: AMJT"); } JobDeclaration::AllocateMiningJobTokenSuccess(_) => { - debug!("Send message: AMJTS") + debug!("Send message: AMJTS"); } JobDeclaration::DeclareMiningJob(_) => { error!("Send unexpected message: DMJ"); } JobDeclaration::DeclareMiningJobError(_) => { - debug!("Send nmessage: DMJE") + debug!("Send nmessage: DMJE"); } JobDeclaration::DeclareMiningJobSuccess(_) => { debug!("Send message: DMJS. Updating the JDS mempool."); Self::send_txs_to_mempool(self_mutex.clone()).await; } JobDeclaration::IdentifyTransactions(_) => { - debug!("Send message: IT") + debug!("Send message: IT"); } JobDeclaration::IdentifyTransactionsSuccess(_) => { - error!("Send unexpected message: ITS") + error!("Send unexpected message: ITS"); } JobDeclaration::ProvideMissingTransactions(_) => { debug!("Send message: PMT. Updating the JDS mempool."); @@ -266,10 +274,17 @@ impl JobDeclaratorDownstream { error!("JD Server: unexpected relay new message {:?}", message); } Ok(SendTo::RelayNewMessageToRemote(remote, message)) => { - error!("JD Server: unexpected relay new message to remote. Remote: {:?}, Message: {:?}", remote, message); + error!( + "JD Server: unexpected relay new message to remote. Remote: {:?}, Message: {:?}", + remote, + message + ); } Ok(SendTo::RelaySameMessageToRemote(remote)) => { - error!("JD Server: unexpected relay same message to remote. Remote: {:?}", remote); + error!( + "JD Server: unexpected relay same message to remote. Remote: {:?}", + remote + ); } Ok(SendTo::Multiple(multiple)) => { error!("JD Server: unexpected multiple messages: {:?}", multiple); @@ -279,7 +294,9 @@ impl JobDeclaratorDownstream { Some(JobDeclaration::SubmitSolution(message)) => { match Self::collect_txs_in_job(self_mutex.clone()) { Ok(_) => { - info!("All transactions in downstream job are recognized correctly by the JD Server"); + info!( + "All transactions in downstream job are recognized correctly by the JD Server" + ); let hexdata = match JobDeclaratorDownstream::get_block_hex( self_mutex.clone(), @@ -288,9 +305,9 @@ impl JobDeclaratorDownstream { Ok(inner) => inner, Err(e) => { error!( - "Received solution but encountered error: {:?}", - e - ); + "Received solution but encountered error: {:?}", + e + ); recv.close(); //TODO should we brake it? break; @@ -303,7 +320,7 @@ impl JobDeclaratorDownstream { // TODO print here the ip of the downstream let known_transactions = JobDeclaratorDownstream::get_transactions_in_job( - self_mutex.clone(), + self_mutex.clone() ); let retrieve_transactions = AddTrasactionsToMempoolInner { @@ -316,25 +333,23 @@ impl JobDeclaratorDownstream { .unwrap(); tokio::select! { _ = JDsMempool::add_tx_data_to_mempool(mempool, retrieve_transactions) => { - let hexdata = match JobDeclaratorDownstream::get_block_hex( + match JobDeclaratorDownstream::get_block_hex( self_mutex.clone(), message.clone(), ) { - Ok(inner) => inner, + Ok(hexdata) => { + let _ = new_block_sender.send(hexdata).await; + }, Err(e) => { - error!( - "Error retrieving transactions: {:?}", - e + handle_result!( + tx_status, + Err(*e) ); - recv.close(); - //TODO should we brake it? - break; } }; - let _ = new_block_sender.send(hexdata).await; } _ = tokio::time::sleep(Duration::from_secs(60)) => {} - }; + } } }; } @@ -437,54 +452,95 @@ impl JobDeclarator { new_block_sender: Sender, sender_add_txs_to_mempool: Sender, ) { - let listner = TcpListener::bind(&config.listen_jd_address).await.unwrap(); - while let Ok((stream, _)) = listner.accept().await { + let listener = TcpListener::bind(&config.listen_jd_address).await.unwrap(); + + while let Ok((stream, _)) = listener.accept().await { let responder = Responder::from_authority_kp( &config.authority_public_key.into_bytes(), &config.authority_secret_key.into_bytes(), std::time::Duration::from_secs(config.cert_validity_sec), ) .unwrap(); + let addr = stream.peer_addr(); if let Ok((receiver, sender, _, _)) = Connection::new(stream, HandshakeRole::Responder(responder)).await { - let setup_message_from_proxy_jd = receiver.recv().await.unwrap(); - info!( - "Setup connection message from proxy: {:?}", - setup_message_from_proxy_jd - ); + match receiver.recv().await { + Ok(EitherFrame::Sv2(mut sv2_message)) => { + debug!("Received SV2 message: {:?}", sv2_message); + let payload = sv2_message.payload(); - let setup_connection_success_to_proxy = SetupConnectionSuccess { - used_version: 2, - // Setup flags for async_mining_allowed - flags: 0b_0000_0000_0000_0000_0000_0000_0000_0001, - }; - let sv2_frame: StdFrame = - JdsMessages::Common(setup_connection_success_to_proxy.into()) - .try_into() - .unwrap(); - let sv2_frame = sv2_frame.into(); - info!("Sending success message for proxy"); - sender.send(sv2_frame).await.unwrap(); + if let Ok(setup_connection) = + binary_sv2::from_bytes::(payload) + { + let flag = setup_connection.flags; + let is_valid = SetupConnection::check_flags( + Protocol::JobDeclarationProtocol, + config.async_mining_allowed as u32, + flag, + ); - let jddownstream = Arc::new(Mutex::new(JobDeclaratorDownstream::new( - receiver.clone(), - sender.clone(), - &config, - mempool.clone(), - // each downstream has its own sender (multi producer single consumer) - sender_add_txs_to_mempool.clone(), - ))); + if is_valid { + let success_message = SetupConnectionSuccess { + used_version: 2, + flags: (setup_connection.flags & 1u32), + }; + info!("Sending success message for proxy"); + let sv2_frame: StdFrame = JdsMessages::Common(success_message.into()) + .try_into() + .expect("Failed to convert setup connection response message to standard frame"); - JobDeclaratorDownstream::start( - jddownstream, - status_tx.clone(), - new_block_sender.clone(), - ); + sender.send(sv2_frame.into()).await.unwrap(); + + let jddownstream = + Arc::new(Mutex::new(JobDeclaratorDownstream::new( + (setup_connection.flags & 1u32) != 0u32, // this takes a bool instead of u32 + receiver.clone(), + sender.clone(), + &config, + mempool.clone(), + sender_add_txs_to_mempool.clone(), // each downstream has its own sender (multi producer single consumer) + ))); + + JobDeclaratorDownstream::start( + jddownstream, + status_tx.clone(), + new_block_sender.clone(), + ); + } else { + let error_message = SetupConnectionError { + flags: flag, + error_code: "unsupported-feature-flags" + .to_string() + .into_bytes() + .try_into() + .unwrap(), + }; + info!("Sending error message for proxy"); + let sv2_frame: StdFrame = JdsMessages::Common(error_message.into()) + .try_into() + .expect("Failed to convert setup connection response message to standard frame"); + + sender.send(sv2_frame.into()).await.unwrap(); + } + } else { + error!("Error parsing SetupConnection message"); + } + } + Ok(EitherFrame::HandShake(handshake_message)) => { + error!( + "Unexpected handshake message from upstream: {:?} at {:?}", + handshake_message, addr + ); + } + Err(e) => { + error!("Error receiving message: {:?}", e); + } + } } else { - error!("Can not connect {:?}", addr); + error!("Cannot connect to {:?}", addr); } } } diff --git a/roles/jd-server/src/lib/mod.rs b/roles/jd-server/src/lib/mod.rs index a76c80cf1..07f7c6603 100644 --- a/roles/jd-server/src/lib/mod.rs +++ b/roles/jd-server/src/lib/mod.rs @@ -57,6 +57,8 @@ pub struct CoinbaseOutput { #[derive(Debug, Deserialize, Clone)] pub struct Configuration { + #[serde(default = "default_true")] + pub async_mining_allowed: bool, pub listen_jd_address: String, pub authority_public_key: Secp256k1PublicKey, pub authority_secret_key: Secp256k1SecretKey, @@ -70,6 +72,10 @@ pub struct Configuration { pub mempool_update_interval: Duration, } +fn default_true() -> bool { + true +} + fn duration_from_toml<'de, D>(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -98,3 +104,81 @@ where _ => Err(serde::de::Error::custom("Unsupported duration unit")), } } +#[cfg(test)] +mod tests { + use ext_config::{Config, File, FileFormat}; + use std::path::PathBuf; + + use super::*; + + fn load_config(path: &str) -> Configuration { + let config_path = PathBuf::from(path); + assert!( + config_path.exists(), + "No config file found at {:?}", + config_path + ); + + let config_path = config_path.to_str().unwrap(); + + let settings = Config::builder() + .add_source(File::new(&config_path, FileFormat::Toml)) + .build() + .expect("Failed to build config"); + + settings.try_deserialize().expect("Failed to parse config") + } + + #[test] + fn test_get_coinbase_output_non_empty() { + let config = load_config("config-examples/jds-config-hosted-example.toml"); + let outputs = get_coinbase_output(&config).expect("Failed to get coinbase output"); + + let expected_output = CoinbaseOutput_ { + output_script_type: "P2WPKH".to_string(), + output_script_value: + "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075".to_string(), + }; + let expected_script: Script = expected_output.try_into().unwrap(); + let expected_transaction_output = TxOut { + value: 0, + script_pubkey: expected_script, + }; + + assert_eq!(outputs[0], expected_transaction_output); + } + + #[test] + fn test_get_coinbase_output_empty() { + let mut config = load_config("config-examples/jds-config-hosted-example.toml"); + config.coinbase_outputs.clear(); + + let result = get_coinbase_output(&config); + assert!( + matches!(result, Err(Error::EmptyCoinbaseOutputs)), + "Expected an error for empty coinbase outputs" + ); + } + + #[test] + fn test_try_from_valid_input() { + let input = CoinbaseOutput { + output_script_type: "P2PKH".to_string(), + output_script_value: + "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075".to_string(), + }; + let result: Result = (&input).try_into(); + assert!(result.is_ok()); + } + + #[test] + fn test_try_from_invalid_input() { + let input = CoinbaseOutput { + output_script_type: "INVALID".to_string(), + output_script_value: + "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075".to_string(), + }; + let result: Result = (&input).try_into(); + assert!(matches!(result, Err(Error::UnknownOutputScriptType))); + } +} diff --git a/roles/jd-server/src/lib/status.rs b/roles/jd-server/src/lib/status.rs index 83a50026f..fe9981617 100644 --- a/roles/jd-server/src/lib/status.rs +++ b/roles/jd-server/src/lib/status.rs @@ -36,7 +36,7 @@ pub struct Status { pub state: State, } -/// this function is used to discern which componnent experienced the event. +/// this function is used to discern which component experienced the event. /// With this knowledge we can wrap the status message with information (`State` variants) so /// the main status loop can decide what should happen async fn send_status( @@ -129,3 +129,282 @@ pub async fn handle_error(sender: &Sender, e: JdsError) -> error_handling::Error } } } + +#[cfg(test)] +mod tests { + use std::{convert::TryInto, io::Error}; + + use super::*; + use async_channel::{bounded, RecvError}; + use roles_logic_sv2::mining_sv2::OpenMiningChannelError; + + #[tokio::test] + async fn test_send_status_downstream_listener_shutdown() { + let (tx, rx) = bounded(1); + let sender = Sender::DownstreamListener(tx); + let error = JdsError::ChannelRecv(async_channel::RecvError); + + send_status(&sender, error, error_handling::ErrorBranch::Continue).await; + match rx.recv().await { + Ok(status) => match status.state { + State::DownstreamShutdown(e) => { + assert_eq!(e.to_string(), "Channel recv failed: `RecvError`") + } + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_send_status_upstream_shutdown() { + let (tx, rx) = bounded(1); + let sender = Sender::Upstream(tx); + let error = JdsError::MempoolError(crate::mempool::error::JdsMempoolError::EmptyMempool); + let error_string = error.to_string(); + send_status(&sender, error, error_handling::ErrorBranch::Continue).await; + + match rx.recv().await { + Ok(status) => match status.state { + State::TemplateProviderShutdown(e) => assert_eq!(e.to_string(), error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_io_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::Io(Error::new(std::io::ErrorKind::Interrupted, "IO error")); + let error_string = error.to_string(); + + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_channel_send_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::ChannelSend(Box::new("error")); + let error_string = error.to_string(); + + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_channel_receive_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::ChannelRecv(RecvError); + let error_string = error.to_string(); + + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::DownstreamShutdown(e) => assert_eq!(e.to_string(), error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_binary_sv2_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::BinarySv2(binary_sv2::Error::IoError); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_codec_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::Codec(codec_sv2::Error::InvalidStepForInitiator); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_noise_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::Noise(noise_sv2::Error::HandshakeNotFinalized); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_roles_logic_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::RolesLogic(roles_logic_sv2::Error::BadPayloadSize); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_custom_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::Custom("error".to_string()); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_framing_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::Framing(codec_sv2::framing_sv2::Error::ExpectedHandshakeFrame); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_poison_lock_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::PoisonLock("error".to_string()); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_impossible_to_reconstruct_block_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::ImpossibleToReconstructBlock("Impossible".to_string()); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_no_last_declared_job_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::NoLastDeclaredJob; + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::Healthy(e) => assert_eq!(e, error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_last_mempool_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let error = JdsError::MempoolError(crate::mempool::error::JdsMempoolError::EmptyMempool); + let error_string = error.to_string(); + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::TemplateProviderShutdown(e) => assert_eq!(e.to_string(), error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } + + #[tokio::test] + async fn test_handle_error_sv2_protocol_error() { + let (tx, rx) = bounded(1); + let sender = Sender::Downstream(tx); + let inner: [u8; 32] = rand::random(); + let value = inner.to_vec().try_into().unwrap(); + let error = JdsError::Sv2ProtocolError(( + 12, + Mining::OpenMiningChannelError(OpenMiningChannelError { + request_id: 1, + error_code: value, + }), + )); + let error_string = "12"; + handle_error(&sender, error).await; + match rx.recv().await { + Ok(status) => match status.state { + State::DownstreamInstanceDropped(e) => assert_eq!(e.to_string(), error_string), + _ => panic!("Unexpected state received"), + }, + Err(_) => panic!("Failed to receive status"), + } + } +} diff --git a/roles/jd-server/src/main.rs b/roles/jd-server/src/main.rs index 9d56b491f..ac030445a 100644 --- a/roles/jd-server/src/main.rs +++ b/roles/jd-server/src/main.rs @@ -11,6 +11,7 @@ use tokio::{select, task}; use tracing::{error, info, warn}; mod lib; +use ext_config::{Config, File, FileFormat}; use lib::job_declarator::JobDeclarator; mod args { @@ -87,17 +88,22 @@ async fn main() { } }; + let config_path = args.config_path.to_str().expect("Invalid config path"); + // Load config - let config: Configuration = match std::fs::read_to_string(&args.config_path) { - Ok(c) => match toml::from_str(&c) { + let config: Configuration = match Config::builder() + .add_source(File::new(config_path, FileFormat::Toml)) + .build() + { + Ok(settings) => match settings.try_deserialize::() { Ok(c) => c, Err(e) => { - error!("Failed to parse config: {}", e); + error!("Failed to deserialize config: {}", e); return; } }, Err(e) => { - error!("Failed to read config: {}", e); + error!("Failed to build config: {}", e); return; } }; diff --git a/roles/mining-proxy/Cargo.toml b/roles/mining-proxy/Cargo.toml index 951645fde..8ffb5d095 100644 --- a/roles/mining-proxy/Cargo.toml +++ b/roles/mining-proxy/Cargo.toml @@ -18,14 +18,14 @@ async-recursion = "0.3.2" binary_sv2 = { version = "^1.0.0", path = "../../protocols/v2/binary-sv2/binary-sv2" } buffer_sv2 = { version = "^1.0.0", path = "../../utils/buffer" } codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2", "with_buffer_pool"] } -const_sv2 = { version = "^1.0.0", path = "../../protocols/v2/const-sv2" } +const_sv2 = { version = "^2.0.0", path = "../../protocols/v2/const-sv2" } futures = "0.3.19" network_helpers_sv2 = {version = "2.0.0", path = "../roles-utils/network-helpers", features = ["with_tokio","with_buffer_pool"] } once_cell = "1.12.0" roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false } tokio = { version = "1", features = ["full"] } -toml = { version = "0.5.6", git = "https://github.com/diondokter/toml-rs", default-features = false, rev = "c4161aa" } +ext-config = { version = "0.14.0", features = ["toml"], package = "config" } tracing = {version = "0.1"} tracing-subscriber = {version = "0.3"} nohash-hasher = "0.2.0" diff --git a/roles/mining-proxy/src/lib/downstream_mining.rs b/roles/mining-proxy/src/lib/downstream_mining.rs index d4e9bcb17..da8901c49 100644 --- a/roles/mining-proxy/src/lib/downstream_mining.rs +++ b/roles/mining-proxy/src/lib/downstream_mining.rs @@ -1,7 +1,11 @@ -#![allow(dead_code)] +use std::{convert::TryInto, sync::Arc}; -use super::upstream_mining::{StdFrame as UpstreamFrame, UpstreamMiningNode}; use async_channel::{Receiver, SendError, Sender}; +use tokio::{net::TcpListener, sync::oneshot::Receiver as TokioReceiver}; +use tracing::{info, warn}; + +use codec_sv2::{StandardEitherFrame, StandardSv2Frame}; +use network_helpers_sv2::plain_connection_tokio::PlainConnection; use roles_logic_sv2::{ common_messages_sv2::{SetupConnection, SetupConnectionSuccess}, common_properties::{CommonDownstreamData, IsDownstream, IsMiningDownstream}, @@ -15,9 +19,8 @@ use roles_logic_sv2::{ routing_logic::MiningProxyRoutingLogic, utils::Mutex, }; -use tracing::info; -use codec_sv2::{Frame, StandardEitherFrame, StandardSv2Frame}; +use super::upstream_mining::{ProxyRemoteSelector, StdFrame as UpstreamFrame, UpstreamMiningNode}; pub type Message = MiningDeviceMessages<'static>; pub type StdFrame = StandardSv2Frame; @@ -25,15 +28,14 @@ pub type EitherFrame = StandardEitherFrame; /// 1 to 1 connection with a downstream node that implement the mining (sub)protocol can be either /// a mining device or a downstream proxy. -/// A downstream can only be linked with an upstream at a time. Support multi upstrems for -/// downstream do no make much sense. +/// A downstream can only be linked with an upstream at a time. Support multi upstreams for +/// downstream do not make much sense. #[derive(Debug)] pub struct DownstreamMiningNode { id: u32, receiver: Receiver, sender: Sender, pub status: DownstreamMiningNodeStatus, - pub prev_job_id: Option, upstream: Option>>, } @@ -47,22 +49,14 @@ pub enum DownstreamMiningNodeStatus { #[derive(Debug, Clone)] #[allow(clippy::enum_variant_names)] pub enum Channel { - DowntreamHomUpstreamGroup { + DownstreamHomUpstreamGroup { data: CommonDownstreamData, channel_id: u32, group_id: u32, }, - DowntreamHomUpstreamExtended { + DownstreamHomUpstreamExtended { data: CommonDownstreamData, channel_id: u32, - group_id: u32, - }, - // Below variant is not supported cause do not have much sense - // DowntreamNonHomUpstreamGroup { data: CommonDownstreamData, group_ids: Vec, extended_ids: Vec}, - DowntreamNonHomUpstreamExtended { - data: CommonDownstreamData, - group_ids: Vec, - extended_ids: Vec, }, } @@ -101,7 +95,7 @@ impl DownstreamMiningNodeStatus { match self { DownstreamMiningNodeStatus::Initializing => panic!(), DownstreamMiningNodeStatus::Paired(data) => { - let channel = Channel::DowntreamHomUpstreamGroup { + let channel = Channel::DownstreamHomUpstreamGroup { data: *data, channel_id, group_id, @@ -113,14 +107,13 @@ impl DownstreamMiningNodeStatus { } } - fn open_channel_for_down_hom_up_extended(&mut self, channel_id: u32, group_id: u32) { + fn open_channel_for_down_hom_up_extended(&mut self, channel_id: u32, _group_id: u32) { match self { DownstreamMiningNodeStatus::Initializing => panic!(), DownstreamMiningNodeStatus::Paired(data) => { - let channel = Channel::DowntreamHomUpstreamExtended { + let channel = Channel::DownstreamHomUpstreamExtended { data: *data, channel_id, - group_id, }; let self_ = Self::ChannelOpened(channel); let _ = std::mem::replace(self, self_); @@ -128,35 +121,8 @@ impl DownstreamMiningNodeStatus { DownstreamMiningNodeStatus::ChannelOpened(..) => panic!("Channel already opened"), } } - - fn add_extended_from_non_hom_for_up_extended(&mut self, id: u32) { - match self { - DownstreamMiningNodeStatus::Initializing => panic!(), - DownstreamMiningNodeStatus::Paired(data) => { - let channel = Channel::DowntreamNonHomUpstreamExtended { - data: *data, - group_ids: vec![], - extended_ids: vec![id], - }; - let self_ = Self::ChannelOpened(channel); - let _ = std::mem::replace(self, self_); - } - DownstreamMiningNodeStatus::ChannelOpened( - Channel::DowntreamNonHomUpstreamExtended { extended_ids, .. }, - ) => { - if !extended_ids.contains(&id) { - extended_ids.push(id) - } - } - _ => panic!(), - } - } } -use core::convert::TryInto; -use std::sync::Arc; -use tokio::task; - impl PartialEq for DownstreamMiningNode { fn eq(&self, other: &Self) -> bool { self.id == other.id @@ -177,16 +143,12 @@ impl DownstreamMiningNode { self.status .open_channel_for_down_hom_up_extended(channel_id, group_id); } - pub fn add_extended_from_non_hom_for_up_extended(&mut self, id: u32) { - self.status.add_extended_from_non_hom_for_up_extended(id); - } pub fn new(receiver: Receiver, sender: Sender, id: u32) -> Self { Self { receiver, sender, status: DownstreamMiningNodeStatus::Initializing, - prev_job_id: None, upstream: None, id, } @@ -316,7 +278,7 @@ impl DownstreamMiningNode { pub fn exit(self_: Arc>) { if let Some(up) = self_.safe_lock(|s| s.upstream.clone()).unwrap() { - super::upstream_mining::UpstreamMiningNode::remove_dowstream(up, &self_); + UpstreamMiningNode::remove_dowstream(up, &self_); }; self_ .safe_lock(|s| { @@ -326,8 +288,6 @@ impl DownstreamMiningNode { } } -use super::upstream_mining::ProxyRemoteSelector; - /// It impl UpstreamMining cause the proxy act as an upstream node for the DownstreamMiningNode impl ParseDownstreamMiningMessages< @@ -414,14 +374,14 @@ impl match &self.status { DownstreamMiningNodeStatus::Initializing => todo!(), DownstreamMiningNodeStatus::Paired(_) => todo!(), - DownstreamMiningNodeStatus::ChannelOpened(Channel::DowntreamHomUpstreamGroup { + DownstreamMiningNodeStatus::ChannelOpened(Channel::DownstreamHomUpstreamGroup { .. }) => { let remote = self.upstream.as_ref().unwrap(); let message = Mining::SubmitSharesStandard(m); Ok(SendTo::RelayNewMessageToRemote(remote.clone(), message)) } - DownstreamMiningNodeStatus::ChannelOpened(Channel::DowntreamHomUpstreamExtended { + DownstreamMiningNodeStatus::ChannelOpened(Channel::DownstreamHomUpstreamExtended { .. }) => { // Safe unwrap is channel have been opened it means that the dowsntream is paired @@ -430,12 +390,6 @@ impl let res = UpstreamMiningNode::handle_std_shr(remote.clone(), m).unwrap(); Ok(SendTo::Respond(res)) } - DownstreamMiningNodeStatus::ChannelOpened( - Channel::DowntreamNonHomUpstreamExtended { .. }, - ) => { - // unreachable cause the proxy do not support this kind of channel - unreachable!(); - } } } @@ -483,44 +437,48 @@ impl } } -use network_helpers_sv2::plain_connection_tokio::PlainConnection; -use std::net::SocketAddr; -use tokio::net::TcpListener; - -pub async fn listen_for_downstream_mining(address: SocketAddr) { - info!("Listening for downstream mining connections on {}", address); - let listner = TcpListener::bind(address).await.unwrap(); +pub async fn listen_for_downstream_mining( + listener: TcpListener, + mut shutdown_rx: TokioReceiver<()>, +) { let mut ids = roles_logic_sv2::utils::Id::new(); - - while let Ok((stream, _)) = listner.accept().await { - let (receiver, sender): (Receiver, Sender) = - PlainConnection::new(stream).await; - let node = DownstreamMiningNode::new(receiver, sender, ids.next()); - - task::spawn(async move { - let mut incoming: StdFrame = node.receiver.recv().await.unwrap().try_into().unwrap(); - let message_type = incoming.get_header().unwrap().msg_type(); - let payload = incoming.payload(); - let routing_logic = super::get_common_routing_logic(); - let node = Arc::new(Mutex::new(node)); - - // Call handle_setup_connection or fail - match DownstreamMiningNode::handle_message_common( - node.clone(), - message_type, - payload, - routing_logic, - ) { - Ok(SendToCommon::RelayNewMessageToRemote(_, message)) => { - let message = match message { - roles_logic_sv2::parsers::CommonMessages::SetupConnectionSuccess(m) => m, - _ => panic!(), - }; - DownstreamMiningNode::start(node, message).await + loop { + tokio::select! { + accept_result = listener.accept() => { + let (stream, _) = accept_result.expect("failed to accept downstream connection"); + let (receiver, sender): (Receiver, Sender) = + PlainConnection::new(stream).await; + let node = DownstreamMiningNode::new(receiver, sender, ids.next()); + + let mut incoming: StdFrame = + node.receiver.recv().await.unwrap().try_into().unwrap(); + let message_type = incoming.get_header().unwrap().msg_type(); + let payload = incoming.payload(); + let routing_logic = super::get_common_routing_logic(); + let node = Arc::new(Mutex::new(node)); + + // Call handle_setup_connection or fail + let common_msg = DownstreamMiningNode::handle_message_common( + node.clone(), + message_type, + payload, + routing_logic + ).expect("failed to process downstream message"); + + + if let SendToCommon::RelayNewMessageToRemote(_, relay_msg) = common_msg { + if let roles_logic_sv2::parsers::CommonMessages::SetupConnectionSuccess(setup_msg) = relay_msg { + DownstreamMiningNode::start(node, setup_msg).await; + } + } else { + warn!("Received unexpected message from downstream"); } - _ => panic!(), } - }); + _ = &mut shutdown_rx => { + info!("Closing listener"); + return; + } + } } } @@ -529,14 +487,11 @@ impl IsDownstream for DownstreamMiningNode { match self.status { DownstreamMiningNodeStatus::Initializing => panic!(), DownstreamMiningNodeStatus::Paired(data) => data, - DownstreamMiningNodeStatus::ChannelOpened(Channel::DowntreamHomUpstreamGroup { + DownstreamMiningNodeStatus::ChannelOpened(Channel::DownstreamHomUpstreamGroup { data, .. }) => data, - DownstreamMiningNodeStatus::ChannelOpened( - Channel::DowntreamNonHomUpstreamExtended { data, .. }, - ) => data, - DownstreamMiningNodeStatus::ChannelOpened(Channel::DowntreamHomUpstreamExtended { + DownstreamMiningNodeStatus::ChannelOpened(Channel::DownstreamHomUpstreamExtended { data, .. }) => data, diff --git a/roles/mining-proxy/src/lib/mod.rs b/roles/mining-proxy/src/lib/mod.rs index b9a9fd2f8..8c7a8563d 100644 --- a/roles/mining-proxy/src/lib/mod.rs +++ b/roles/mining-proxy/src/lib/mod.rs @@ -97,7 +97,7 @@ pub enum ChannelKind { } #[derive(Debug, Deserialize, Clone)] -pub struct Config { +pub struct Configuration { pub upstreams: Vec, pub listen_address: String, pub listen_mining_port: u16, @@ -110,7 +110,7 @@ pub struct Config { pub async fn initialize_r_logic( upstreams: &[UpstreamMiningValues], group_id: Arc>, - config: Config, + config: Configuration, ) -> RLogic { let channel_ids = Arc::new(Mutex::new(Id::new())); let mut upstream_mining_nodes = Vec::with_capacity(upstreams.len()); diff --git a/roles/mining-proxy/src/lib/upstream_mining.rs b/roles/mining-proxy/src/lib/upstream_mining.rs index 61a5d0f31..5ad012d10 100644 --- a/roles/mining-proxy/src/lib/upstream_mining.rs +++ b/roles/mining-proxy/src/lib/upstream_mining.rs @@ -1,14 +1,16 @@ #![allow(dead_code)] -use super::EXTRANONCE_RANGE_1_LENGTH; -use roles_logic_sv2::utils::Id; +use core::convert::TryInto; +use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; -use super::downstream_mining::{Channel, DownstreamMiningNode, StdFrame as DownstreamFrame}; use async_channel::{Receiver, SendError, Sender}; use async_recursion::async_recursion; -use codec_sv2::{Frame, HandshakeRole, Initiator, StandardEitherFrame, StandardSv2Frame}; -use network_helpers_sv2::noise_connection_tokio::Connection; use nohash_hasher::BuildNoHashHasher; +use tokio::{net::TcpStream, task}; +use tracing::{debug, error, info}; + +use codec_sv2::{HandshakeRole, Initiator, StandardEitherFrame, StandardSv2Frame}; +use network_helpers_sv2::noise_connection_tokio::Connection; use roles_logic_sv2::{ channel_logic::{ channel_factory::{ExtendedChannelKind, OnNewShare, ProxyExtendedChannelFactory, Share}, @@ -26,14 +28,15 @@ use roles_logic_sv2::{ routing_logic::MiningProxyRoutingLogic, selectors::{DownstreamMiningSelector, ProxyDownstreamMiningSelector as Prs}, template_distribution_sv2::SubmitSolution, - utils::{GroupId, Mutex}, + utils::{GroupId, Id, Mutex}, }; -use std::{collections::HashMap, sync::Arc}; -use tokio::{net::TcpStream, task}; -use tracing::error; - use stratum_common::bitcoin::TxOut; +use super::{ + downstream_mining::{Channel, DownstreamMiningNode, StdFrame as DownstreamFrame}, + EXTRANONCE_RANGE_1_LENGTH, +}; + pub type Message = PoolMessages<'static>; pub type StdFrame = StandardSv2Frame; pub type EitherFrame = StandardEitherFrame; @@ -188,10 +191,6 @@ pub struct UpstreamMiningNode { reconnect: bool, } -use core::convert::TryInto; -use std::{net::SocketAddr, time::Duration}; -use tracing::{debug, info}; - /// It assume that endpoint NEVER change flags and version! /// I can open both extended and group channel with upstream. impl UpstreamMiningNode { @@ -471,11 +470,10 @@ impl UpstreamMiningNode { super::downstream_mining::DownstreamMiningNodeStatus::ChannelOpened( channel, ) => match channel { - Channel::DowntreamHomUpstreamGroup { channel_id, .. } => Some(*channel_id), - Channel::DowntreamHomUpstreamExtended { channel_id, .. } => { + Channel::DownstreamHomUpstreamGroup { channel_id, .. } => Some(*channel_id), + Channel::DownstreamHomUpstreamExtended { channel_id, .. } => { Some(*channel_id) } - Channel::DowntreamNonHomUpstreamExtended { .. } => todo!(), }, }) .unwrap() @@ -1048,7 +1046,7 @@ impl .ok_or(Error::NoDownstreamsConnected)?; for downstream in downstreams { match downstream.safe_lock(|r| r.get_channel().clone()).unwrap() { - Channel::DowntreamHomUpstreamGroup { + Channel::DownstreamHomUpstreamGroup { channel_id, group_id, .. @@ -1257,9 +1255,10 @@ impl IsMiningUpstream for UpstreamMin #[cfg(test)] mod tests { - use super::*; use std::net::{IpAddr, Ipv4Addr}; + use super::*; + #[test] fn new_upstream_minining_node() { let id = 0; diff --git a/roles/mining-proxy/src/main.rs b/roles/mining-proxy/src/main.rs index 0725c189b..575098fe7 100644 --- a/roles/mining-proxy/src/main.rs +++ b/roles/mining-proxy/src/main.rs @@ -3,7 +3,7 @@ //! Downstream means another proxy or a mining device //! //! UpstreamMining is the trait that a proxy must implement in order to -//! understant Downstream mining messages. +//! understand Downstream mining messages. //! //! DownstreamMining is the trait that a proxy must implement in order to //! understand Upstream mining messages @@ -18,13 +18,17 @@ //! A Downstream that signal the incapacity to handle group channels can open only one channel. //! #![allow(special_module_name)] -mod lib; - -use lib::Config; -use roles_logic_sv2::utils::{GroupId, Mutex}; use std::{net::SocketAddr, sync::Arc}; + +use tokio::{net::TcpListener, sync::oneshot}; use tracing::{error, info}; +use ext_config::{Config, File, FileFormat}; +use lib::Configuration; +use roles_logic_sv2::utils::{GroupId, Mutex}; + +mod lib; + mod args { use std::path::PathBuf; @@ -89,12 +93,12 @@ mod args { } /// 1. the proxy scan all the upstreams and map them -/// 2. donwstream open a connetcion with proxy +/// 2. downstream open a connection with proxy /// 3. downstream send SetupConnection -/// 4. a mining_channle::Upstream is created +/// 4. a mining_channels::Upstream is created /// 5. upstream_mining::UpstreamMiningNodes is used to pair this downstream with the most suitable /// upstream -/// 6. mining_channle::Upstream create a new downstream_mining::DownstreamMiningNode embedding +/// 6. mining_channels::Upstream create a new downstream_mining::DownstreamMiningNode embedding /// itself in it /// 7. normal operation between the paired downstream_mining::DownstreamMiningNode and /// upstream_mining::UpstreamMiningNode begin @@ -109,13 +113,21 @@ async fn main() { } }; - // Scan all the upstreams and map them - let config_file = std::fs::read_to_string(args.config_path.clone()) - .unwrap_or_else(|_| panic!("Can not open {:?}", args.config_path)); - let config = match toml::from_str::(&config_file) { - Ok(cfg) => cfg, + let config_path = args.config_path.to_str().expect("Invalid config path"); + + let config: Configuration = match Config::builder() + .add_source(File::new(config_path, FileFormat::Toml)) + .build() + { + Ok(settings) => match settings.try_deserialize::() { + Ok(c) => c, + Err(e) => { + error!("Failed to deserialize config: {}", e); + return; + } + }, Err(e) => { - error!("Failed to parse config file: {}", e); + error!("Failed to build config: {}", e); return; } }; @@ -126,16 +138,37 @@ async fn main() { lib::initialize_r_logic(&config.upstreams, group_id, config.clone()).await, )) .expect("BUG: Failed to set ROUTING_LOGIC"); - info!("PROXY INITIALIZING"); + + info!("Initializing upstream scanner"); lib::initialize_upstreams(config.min_supported_version, config.max_supported_version).await; - info!("PROXY INITIALIZED"); + info!("Initializing downstream listener"); - // Wait for downstream connection let socket = SocketAddr::new( config.listen_address.parse().unwrap(), config.listen_mining_port, ); + let listener = TcpListener::bind(socket).await.unwrap(); + + info!("Listening for downstream mining connections on {}", socket); + + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + + let (_, res) = tokio::join!( + // Wait for downstream connection + lib::downstream_mining::listen_for_downstream_mining(listener, shutdown_rx), + // handle SIGTERM/QUIT / ctrl+c + tokio::spawn(async { + tokio::signal::ctrl_c() + .await + .expect("Failed to listen to signals"); + let _ = shutdown_tx.send(()); + info!("Interrupt received"); + }) + ); + + if let Err(e) = res { + panic!("Failed to wait for clean exit: {:?}", e); + } - info!("PROXY INITIALIZED"); - crate::lib::downstream_mining::listen_for_downstream_mining(socket).await + info!("Shutdown done"); } diff --git a/roles/pool/Cargo.toml b/roles/pool/Cargo.toml index 3b62c74e1..c253e6b3e 100644 --- a/roles/pool/Cargo.toml +++ b/roles/pool/Cargo.toml @@ -16,14 +16,14 @@ async-channel = "1.5.1" binary_sv2 = { version = "^1.0.0", path = "../../protocols/v2/binary-sv2/binary-sv2" } buffer_sv2 = { version = "^1.0.0", path = "../../utils/buffer" } codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2"] } -const_sv2 = { version = "^1.0.0", path = "../../protocols/v2/const-sv2" } +const_sv2 = { version = "^2.0.0", path = "../../protocols/v2/const-sv2" } network_helpers_sv2 = { version = "2.0.0", path = "../roles-utils/network-helpers", features =["with_tokio","with_buffer_pool"] } noise_sv2 = { version = "1.1.0", path = "../../protocols/v2/noise-sv2" } rand = "0.8.4" roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } serde = { version = "1.0.89", features = ["derive", "alloc"], default-features = false } tokio = { version = "1", features = ["full"] } -toml = { version = "0.5.6", git = "https://github.com/diondokter/toml-rs", default-features = false, rev = "c4161aa" } +ext-config = { version = "0.14.0", features = ["toml"], package = "config" } tracing = { version = "0.1" } tracing-subscriber = "0.3" async-recursion = "1.0.0" diff --git a/roles/pool/src/lib/mining_pool/mod.rs b/roles/pool/src/lib/mining_pool/mod.rs index e189c5406..0db3f8a5f 100644 --- a/roles/pool/src/lib/mining_pool/mod.rs +++ b/roles/pool/src/lib/mining_pool/mod.rs @@ -4,7 +4,7 @@ use super::{ }; use async_channel::{Receiver, Sender}; use binary_sv2::U256; -use codec_sv2::{Frame, HandshakeRole, Responder, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{HandshakeRole, Responder, StandardEitherFrame, StandardSv2Frame}; use error_handling::handle_result; use key_utils::{Secp256k1PublicKey, Secp256k1SecretKey, SignatureService}; use network_helpers_sv2::noise_connection_tokio::Connection; @@ -657,23 +657,41 @@ impl Pool { #[cfg(test)] mod test { use binary_sv2::{B0255, B064K}; + use ext_config::{Config, File, FileFormat}; use std::convert::TryInto; + use tracing::error; use stratum_common::{ bitcoin, bitcoin::{util::psbt::serialize::Serialize, Transaction, Witness}, }; + use super::Configuration; + // this test is used to verify the `coinbase_tx_prefix` and `coinbase_tx_suffix` values tested against in // message generator `stratum/test/message-generator/test/pool-sri-test-extended.json` #[test] fn test_coinbase_outputs_from_config() { + let config_path = "./config-examples/pool-config-local-tp-example.toml"; + // Load config - let config: super::Configuration = toml::from_str( - &std::fs::read_to_string("./config-examples/pool-config-local-tp-example.toml") - .unwrap(), - ) - .unwrap(); + let config: Configuration = match Config::builder() + .add_source(File::new(&config_path, FileFormat::Toml)) + .build() + { + Ok(settings) => match settings.try_deserialize::() { + Ok(c) => c, + Err(e) => { + error!("Failed to deserialize config: {}", e); + return; + } + }, + Err(e) => { + error!("Failed to build config: {}", e); + return; + } + }; + // template from message generator test (mock TP template) let _extranonce_len = 3; let coinbase_prefix = vec![3, 76, 163, 38, 0]; diff --git a/roles/pool/src/lib/mining_pool/setup_connection.rs b/roles/pool/src/lib/mining_pool/setup_connection.rs index cf2c06022..f0c47e9a8 100644 --- a/roles/pool/src/lib/mining_pool/setup_connection.rs +++ b/roles/pool/src/lib/mining_pool/setup_connection.rs @@ -3,7 +3,6 @@ use super::super::{ mining_pool::{EitherFrame, StdFrame}, }; use async_channel::{Receiver, Sender}; -use codec_sv2::Frame; use roles_logic_sv2::{ common_messages_sv2::{ has_requires_std_job, has_version_rolling, has_work_selection, SetupConnection, diff --git a/roles/pool/src/lib/template_receiver/mod.rs b/roles/pool/src/lib/template_receiver/mod.rs index 49d58e82a..2eeaa554f 100644 --- a/roles/pool/src/lib/template_receiver/mod.rs +++ b/roles/pool/src/lib/template_receiver/mod.rs @@ -4,7 +4,7 @@ use super::{ status, }; use async_channel::{Receiver, Sender}; -use codec_sv2::{Frame, HandshakeRole, Initiator}; +use codec_sv2::{HandshakeRole, Initiator}; use error_handling::handle_result; use key_utils::Secp256k1PublicKey; use network_helpers_sv2::noise_connection_tokio::Connection; diff --git a/roles/pool/src/lib/template_receiver/setup_connection.rs b/roles/pool/src/lib/template_receiver/setup_connection.rs index 60c3cb4f8..6687eadc6 100644 --- a/roles/pool/src/lib/template_receiver/setup_connection.rs +++ b/roles/pool/src/lib/template_receiver/setup_connection.rs @@ -3,7 +3,6 @@ use super::super::{ mining_pool::{EitherFrame, StdFrame}, }; use async_channel::{Receiver, Sender}; -use codec_sv2::Frame; use roles_logic_sv2::{ common_messages_sv2::{Protocol, SetupConnection}, errors::Error, diff --git a/roles/pool/src/main.rs b/roles/pool/src/main.rs index 169243c23..55d6e117a 100644 --- a/roles/pool/src/main.rs +++ b/roles/pool/src/main.rs @@ -9,6 +9,7 @@ use lib::{ template_receiver::TemplateRx, }; +use ext_config::{Config, File, FileFormat}; use tokio::select; mod args { @@ -86,17 +87,22 @@ async fn main() { } }; + let config_path = args.config_path.to_str().expect("Invalid config path"); + // Load config - let config: Configuration = match std::fs::read_to_string(&args.config_path) { - Ok(c) => match toml::from_str(&c) { + let config: Configuration = match Config::builder() + .add_source(File::new(config_path, FileFormat::Toml)) + .build() + { + Ok(settings) => match settings.try_deserialize::() { Ok(c) => c, Err(e) => { - error!("Failed to parse config: {}", e); + error!("Failed to deserialize config: {}", e); return; } }, Err(e) => { - error!("Failed to read config: {}", e); + error!("Failed to build config: {}", e); return; } }; diff --git a/roles/roles-utils/network-helpers/Cargo.toml b/roles/roles-utils/network-helpers/Cargo.toml index e2c69806a..b4af8f293 100644 --- a/roles/roles-utils/network-helpers/Cargo.toml +++ b/roles/roles-utils/network-helpers/Cargo.toml @@ -15,7 +15,7 @@ async-channel = { version = "1.8.0", optional = true } tokio = { version = "1", features = ["full"], optional = true } binary_sv2 = { version = "^1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2", optional = true } codec_sv2 = { version = "1.0.1", path = "../../../protocols/v2/codec-sv2", features=["noise_sv2"], optional = true } -const_sv2 = {version = "1.0.0", path = "../../../protocols/v2/const-sv2"} +const_sv2 = {version = "2.0.0", path = "../../../protocols/v2/const-sv2"} serde = { version = "1.0.89", features = ["derive"], default-features = false, optional = true } tracing = { version = "0.1" } futures = "0.3.28" diff --git a/roles/test-utils/mining-device/Cargo.toml b/roles/test-utils/mining-device/Cargo.toml index 4ce6c5037..a59719504 100644 --- a/roles/test-utils/mining-device/Cargo.toml +++ b/roles/test-utils/mining-device/Cargo.toml @@ -10,11 +10,10 @@ publish = false stratum-common = { version = "1.0.0", path = "../../../common" } codec_sv2 = { version = "^1.0.1", path = "../../../protocols/v2/codec-sv2", features=["noise_sv2"] } roles_logic_sv2 = { version = "1.0.0", path = "../../../protocols/v2/roles-logic-sv2" } -const_sv2 = { version = "1.0.0", path = "../../../protocols/v2/const-sv2" } +const_sv2 = { version = "2.0.0", path = "../../../protocols/v2/const-sv2" } async-channel = "1.5.1" -async-std={version = "1.8.0", features = ["attributes"]} binary_sv2 = { version = "1.0.0", path = "../../../protocols/v2/binary-sv2/binary-sv2" } -network_helpers_sv2 = { version = "2.0.0", path = "../../roles-utils/network-helpers", features=["async_std"] } +network_helpers_sv2 = { version = "2.0.0", path = "../../roles-utils/network-helpers", features=["tokio"] } buffer_sv2 = { version = "1.0.0", path = "../../../utils/buffer"} async-recursion = "0.3.2" rand = "0.8.4" @@ -24,3 +23,4 @@ clap = { version = "^4.5.4", features = ["derive"] } tracing = { version = "0.1" } tracing-subscriber = "0.3" sha2 = "0.10.6" +tokio = "^1.38.0" diff --git a/roles/test-utils/mining-device/README.md b/roles/test-utils/mining-device/README.md new file mode 100644 index 000000000..4065c2c30 --- /dev/null +++ b/roles/test-utils/mining-device/README.md @@ -0,0 +1,21 @@ +# CPU Sv2 mining device + +Header only sv2 cpu miner. + +``` +Usage: mining-device [OPTIONS] --address-pool + +Options: + -p, --pubkey-pool Pool pub key, when left empty the pool certificate is not checked + -i, --id-device Sometimes used by the pool to identify the device + -a, --address-pool Address of the pool in this format ip:port or domain:port + --handicap This value is used to slow down the cpu miner, it represents the number of micro-seconds that are awaited between hashes [default: 0] + --id-user User id, used when a new channel is opened, it can be used by the pool to identify the miner + -h, --help Print help + -V, --version Print version +``` + +Usage example: +``` +cargo run --release -- --address-pool 127.0.0.1:20000 --id-device device_id::SOLO::bc1qxy2kgdygjrsqtzq2n0yrf2493p83kkfjhx0wlh +``` diff --git a/roles/test-utils/mining-device/src/main.rs b/roles/test-utils/mining-device/src/main.rs index 1bfbf6737..35a31d56a 100644 --- a/roles/test-utils/mining-device/src/main.rs +++ b/roles/test-utils/mining-device/src/main.rs @@ -1,10 +1,17 @@ -use async_std::net::TcpStream; +#![allow(clippy::option_map_unit_fn)] use key_utils::Secp256k1PublicKey; -use network_helpers_sv2::Connection; +use network_helpers_sv2::noise_connection_tokio::Connection; use roles_logic_sv2::utils::Id; -use std::{net::SocketAddr, sync::Arc, thread::sleep, time::Duration}; +use std::{ + net::{SocketAddr, ToSocketAddrs}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; +use tokio::net::TcpStream; -use async_std::net::ToSocketAddrs; use clap::Parser; use rand::{thread_rng, Rng}; use std::time::Instant; @@ -36,7 +43,7 @@ struct Args { address_pool: String, #[arg( long, - help = "This value is used to slow down the cpu miner, it rapresents the number of micro-seconds that are awaited between hashes", + help = "This value is used to slow down the cpu miner, it represents the number of micro-seconds that are awaited between hashes", default_value = "0" )] handicap: u32, @@ -57,14 +64,12 @@ async fn connect( let address = address .clone() .to_socket_addrs() - .await .expect("Invalid pool address, use one of this formats: ip:port, domain:port") .next() .expect("Invalid pool address, use one of this formats: ip:port, domain:port"); info!("Connecting to pool at {}", address); let socket = loop { - let pool = - async_std::future::timeout(Duration::from_secs(5), TcpStream::connect(address)).await; + let pool = tokio::time::timeout(Duration::from_secs(5), TcpStream::connect(address)).await; match pool { Ok(result) => match result { Ok(socket) => break socket, @@ -73,7 +78,7 @@ async fn connect( "Failed to connect to Upstream role at {}, retrying in 5s: {}", address, e ); - sleep(Duration::from_secs(5)); + tokio::time::sleep(Duration::from_secs(5)).await; } }, Err(_) => { @@ -85,15 +90,15 @@ async fn connect( info!("Pool tcp connection established at {}", address); let address = socket.peer_addr().unwrap(); let initiator = Initiator::new(pub_key.map(|e| e.0)); - let (receiver, sender): (Receiver, Sender) = - Connection::new(socket, codec_sv2::HandshakeRole::Initiator(initiator), 10) + let (receiver, sender, _, _): (Receiver, Sender, _, _) = + Connection::new(socket, codec_sv2::HandshakeRole::Initiator(initiator)) .await .unwrap(); info!("Pool noise connection established at {}", address); Device::start(receiver, sender, address, device_id, user_id, handicap).await } -#[async_std::main] +#[tokio::main(flavor = "current_thread")] async fn main() { let args = Args::parse(); tracing_subscriber::fmt::init(); @@ -110,7 +115,7 @@ async fn main() { use async_channel::{Receiver, Sender}; use binary_sv2::u256_from_int; -use codec_sv2::{Frame, Initiator, StandardEitherFrame, StandardSv2Frame}; +use codec_sv2::{Initiator, StandardEitherFrame, StandardSv2Frame}; use roles_logic_sv2::{ common_messages_sv2::{Protocol, SetupConnection, SetupConnectionSuccess}, common_properties::{IsMiningUpstream, IsUpstream}, @@ -218,6 +223,12 @@ impl ParseUpstreamCommonMessages for SetupConnectionHandler { } } +#[derive(Debug, Clone)] +struct NewWorkNotifier { + should_send: bool, + sender: Sender<()>, +} + #[derive(Debug)] pub struct Device { #[allow(dead_code)] @@ -230,13 +241,15 @@ pub struct Device { jobs: Vec>, prev_hash: Option>, sequence_numbers: Id, + notify_changes_to_mining_thread: NewWorkNotifier, } fn open_channel(device_id: Option) -> OpenStandardMiningChannel<'static> { let user_identity = device_id.unwrap_or_default().try_into().unwrap(); let id: u32 = 10; info!("Measuring CPU hashrate"); - let nominal_hash_rate = measure_hashrate(5) as f32; + let p = std::thread::available_parallelism().unwrap().get() as u32 - 3; + let nominal_hash_rate = measure_hashrate(5) as f32 * p as f32; info!("Pc hashrate is {}", nominal_hash_rate); info!("MINING DEVICE: send open channel with request id {}", id); OpenStandardMiningChannel { @@ -267,6 +280,7 @@ impl Device { .await; info!("Pool sv2 connection established at {}", addr); let miner = Arc::new(Mutex::new(Miner::new(handicap))); + let (notify_changes_to_mining_thread, update_miners) = async_channel::unbounded(); let self_ = Self { channel_opened: false, receiver: receiver.clone(), @@ -276,6 +290,10 @@ impl Device { prev_hash: None, channel_id: None, sequence_numbers: Id::new(), + notify_changes_to_mining_thread: NewWorkNotifier { + should_send: true, + sender: notify_changes_to_mining_thread, + }, }; let open_channel = MiningDeviceMessages::Mining(Mining::OpenStandardMiningChannel(open_channel(user_id))); @@ -286,24 +304,8 @@ impl Device { let (share_send, share_recv) = async_channel::unbounded(); - let handicap = miner.safe_lock(|m| m.handicap).unwrap(); - std::thread::spawn(move || loop { - std::thread::sleep(std::time::Duration::from_micros(handicap.into())); - if miner.safe_lock(|m| m.next_share()).unwrap().is_valid() { - let nonce = miner.safe_lock(|m| m.header.unwrap().nonce).unwrap(); - let time = miner.safe_lock(|m| m.header.unwrap().time).unwrap(); - let job_id = miner.safe_lock(|m| m.job_id).unwrap(); - let version = miner.safe_lock(|m| m.version).unwrap(); - share_send - .try_send((nonce, job_id.unwrap(), version.unwrap(), time)) - .unwrap(); - } - miner - .safe_lock(|m| m.header.as_mut().map(|h| h.nonce += 1)) - .unwrap(); - }); - - async_std::task::spawn(async move { + start_mining_threads(update_miners, miner, share_send); + tokio::task::spawn(async move { let recv = share_recv.clone(); loop { let (nonce, job_id, version, ntime) = recv.recv().await.unwrap(); @@ -322,6 +324,21 @@ impl Device { MiningRoutingLogic::None, ) .unwrap(); + let mut notify_changes_to_mining_thread = self_mutex + .safe_lock(|s| s.notify_changes_to_mining_thread.clone()) + .unwrap(); + if notify_changes_to_mining_thread.should_send + && (message_type == const_sv2::MESSAGE_TYPE_NEW_MINING_JOB + || message_type == const_sv2::MESSAGE_TYPE_SET_NEW_PREV_HASH + || message_type == const_sv2::MESSAGE_TYPE_SET_TARGET) + { + notify_changes_to_mining_thread + .sender + .send(()) + .await + .unwrap(); + notify_changes_to_mining_thread.should_send = false; + }; match next { SendTo::RelayNewMessageToRemote(_, m) => { let sv2_frame: StdFrame = MiningDeviceMessages::Mining(m).try_into().unwrap(); @@ -425,6 +442,7 @@ impl ParseUpstreamMiningMessages<(), NullDownstreamMiningSelector, NoRouting> fo self.miner .safe_lock(|miner| miner.new_target(m.target.to_vec())) .unwrap(); + self.notify_changes_to_mining_thread.should_send = true; Ok(SendTo::None(None)) } @@ -477,6 +495,7 @@ impl ParseUpstreamMiningMessages<(), NullDownstreamMiningSelector, NoRouting> fo .safe_lock(|miner| miner.new_header(p_h, &m)) .unwrap(); self.jobs = vec![m.as_static()]; + self.notify_changes_to_mining_thread.should_send = true; } (true, _) => self.jobs.push(m.as_static()), (false, None) => { @@ -509,6 +528,7 @@ impl ParseUpstreamMiningMessages<(), NullDownstreamMiningSelector, NoRouting> fo .unwrap(); self.jobs = vec![jobs[0].clone()]; self.prev_hash = Some(m.as_static()); + self.notify_changes_to_mining_thread.should_send = true; } _ => panic!(), } @@ -533,6 +553,7 @@ impl ParseUpstreamMiningMessages<(), NullDownstreamMiningSelector, NoRouting> fo self.miner .safe_lock(|miner| miner.new_target(m.maximum_target.to_vec())) .unwrap(); + self.notify_changes_to_mining_thread.should_send = true; Ok(SendTo::None(None)) } @@ -541,7 +562,7 @@ impl ParseUpstreamMiningMessages<(), NullDownstreamMiningSelector, NoRouting> fo } } -#[derive(Debug)] +#[derive(Debug, Clone)] struct Miner { header: Option, target: Option, @@ -669,3 +690,74 @@ fn generate_random_32_byte_array() -> [u8; 32] { rng.fill(&mut arr[..]); arr } + +fn start_mining_threads( + have_new_job: Receiver<()>, + miner: Arc>, + share_send: Sender<(u32, u32, u32, u32)>, +) { + tokio::task::spawn(async move { + let mut killers: Vec> = vec![]; + loop { + let available_parallelism = u32::max( + 2, + std::thread::available_parallelism().unwrap().get() as u32, + ); + let p = available_parallelism - 1; + let unit = u32::MAX / p; + while have_new_job.recv().await.is_ok() { + while let Some(killer) = killers.pop() { + killer.store(true, Ordering::Relaxed); + } + let miner = miner.safe_lock(|m| m.clone()).unwrap(); + for i in 0..p { + let mut miner = miner.clone(); + let share_send = share_send.clone(); + let killer = Arc::new(AtomicBool::new(false)); + miner.header.as_mut().map(|h| h.nonce = i * unit); + killers.push(killer.clone()); + std::thread::spawn(move || { + mine(miner, share_send, killer); + }); + } + } + } + }); +} + +fn mine(mut miner: Miner, share_send: Sender<(u32, u32, u32, u32)>, kill: Arc) { + if miner.handicap != 0 { + loop { + if kill.load(Ordering::Relaxed) { + break; + } + std::thread::sleep(std::time::Duration::from_micros(miner.handicap.into())); + if miner.next_share().is_valid() { + let nonce = miner.header.unwrap().nonce; + let time = miner.header.unwrap().time; + let job_id = miner.job_id.unwrap(); + let version = miner.version; + share_send + .try_send((nonce, job_id, version.unwrap(), time)) + .unwrap(); + } + miner.header.as_mut().map(|h| h.nonce += 1); + } + } else { + loop { + if miner.next_share().is_valid() { + if kill.load(Ordering::Relaxed) { + break; + } + let nonce = miner.header.unwrap().nonce; + let time = miner.header.unwrap().time; + let job_id = miner.job_id.unwrap(); + let version = miner.version; + share_send + .try_send((nonce, job_id, version.unwrap(), time)) + .unwrap(); + } + miner.header.as_mut().map(|h| h.nonce += 1); + } + } +} diff --git a/roles/test-utils/sv1-mining-device/src/job.rs b/roles/test-utils/sv1-mining-device/src/job.rs index ef02b009e..1d6b3d2bc 100644 --- a/roles/test-utils/sv1-mining-device/src/job.rs +++ b/roles/test-utils/sv1-mining-device/src/job.rs @@ -25,9 +25,10 @@ pub(crate) struct Job { impl Job { pub fn from_notify(notify_msg: server_to_client::Notify<'_>, extranonce: Vec) -> Self { - // TODO: Hard coded for demo. Should be properly translated from received Notify message - // Right now, Notify.job_id is a string, but the Job.job_id is a u32 here. - let job_id = 1u32; + let job_id = notify_msg + .job_id + .parse::() + .expect("expect valid job_id on String"); // Convert prev hash from Vec into expected [u32; 8] let prev_hash_vec: Vec = notify_msg.prev_hash.into(); diff --git a/roles/translator/Cargo.toml b/roles/translator/Cargo.toml index 182370cbd..c7abf7048 100644 --- a/roles/translator/Cargo.toml +++ b/roles/translator/Cargo.toml @@ -18,7 +18,7 @@ async-std = { version = "1.12.0", features = ["attributes"] } binary_sv2 = { version = "^1.0.0", path = "../../protocols/v2/binary-sv2/binary-sv2" } buffer_sv2 = { version = "^1.0.0", path = "../../utils/buffer" } codec_sv2 = { version = "^1.0.1", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2", "with_buffer_pool"] } -framing_sv2 = { version = "^1.1.0", path = "../../protocols/v2/framing-sv2" } +framing_sv2 = { version = "^2.0.0", path = "../../protocols/v2/framing-sv2" } network_helpers_sv2 = { version = "2.0.0", path = "../roles-utils/network-helpers", features=["async_std", "with_buffer_pool"] } once_cell = "1.12.0" roles_logic_sv2 = { version = "^1.0.0", path = "../../protocols/v2/roles-logic-sv2" } @@ -26,7 +26,7 @@ serde = { version = "1.0.89", default-features = false, features = ["derive", "a serde_json = { version = "1.0.64", default-features = false, features = ["alloc"] } futures = "0.3.25" tokio = { version = "1", features = ["full"] } -toml = { version = "0.5.6", git = "https://github.com/diondokter/toml-rs", default-features = false, rev = "c4161aa" } +ext-config = { version = "0.14.0", features = ["toml"], package = "config" } tracing = { version = "0.1" } tracing-subscriber = { version = "0.3" } v1 = { version = "^1.0.0", path = "../../protocols/v1", package="sv1_api" } @@ -34,11 +34,10 @@ error_handling = { version = "1.0.0", path = "../../utils/error-handling" } key-utils = { version = "^1.0.0", path = "../../utils/key-utils" } tokio-util = { version = "0.7.10", features = ["codec"] } async-compat = "0.2.1" - +rand = "0.8.4" [dev-dependencies] -rand = "0.8.4" sha2 = "0.10.6" [features] diff --git a/roles/translator/src/lib/downstream_sv1/diff_management.rs b/roles/translator/src/lib/downstream_sv1/diff_management.rs index eb8f0af38..3ee03c76f 100644 --- a/roles/translator/src/lib/downstream_sv1/diff_management.rs +++ b/roles/translator/src/lib/downstream_sv1/diff_management.rs @@ -320,11 +320,11 @@ mod test { #[test] fn test_diff_management() { let expected_shares_per_minute = 1000.0; - let total_run_time = std::time::Duration::from_secs(11); + let total_run_time = std::time::Duration::from_secs(60); let initial_nominal_hashrate = measure_hashrate(5); let target = match roles_logic_sv2::utils::hash_rate_to_target( initial_nominal_hashrate, - expected_shares_per_minute.into(), + expected_shares_per_minute, ) { Ok(target) => target, Err(_) => panic!(), @@ -342,13 +342,13 @@ mod test { } let calculated_share_per_min = count as f32 / (elapsed.as_secs_f32() / 60.0); - // This is the error margin for a confidence of 99% given the expect number of shares per + // This is the error margin for a confidence of 99.99...% given the expect number of shares per // minute TODO the review the math under it - let error_margin = get_error(expected_shares_per_minute.into()); + let error_margin = get_error(expected_shares_per_minute); let error = (calculated_share_per_min - expected_shares_per_minute as f32).abs(); assert!( error <= error_margin as f32, - "Calculated shares per minute are outside the 99% confidence interval. Error: {:?}, Error margin: {:?}, {:?}", error, error_margin,calculated_share_per_min + "Calculated shares per minute are outside the 99.99...% confidence interval. Error: {:?}, Error margin: {:?}, {:?}", error, error_margin,calculated_share_per_min ); } @@ -379,9 +379,8 @@ mod test { } let elapsed_secs = start_time.elapsed().as_secs_f64(); - let hashrate = hashes as f64 / elapsed_secs; - let nominal_hash_rate = hashrate; - nominal_hash_rate + + hashes as f64 / elapsed_secs } fn hash(share: &mut [u8; 80]) -> Target { @@ -438,6 +437,7 @@ mod test { 0, downstream_conf.clone(), Arc::new(Mutex::new(upstream_config)), + "0".to_string(), ); downstream.difficulty_mgmt.min_individual_miner_hashrate = start_hashrate as f32; diff --git a/roles/translator/src/lib/downstream_sv1/downstream.rs b/roles/translator/src/lib/downstream_sv1/downstream.rs index ddf74fe58..ebe00deee 100644 --- a/roles/translator/src/lib/downstream_sv1/downstream.rs +++ b/roles/translator/src/lib/downstream_sv1/downstream.rs @@ -13,7 +13,7 @@ use async_std::{ }; use error_handling::handle_result; use futures::FutureExt; -use tokio::sync::broadcast; +use tokio::{sync::broadcast, task::AbortHandle}; use super::{kill, DownstreamMessages, SubmitShareWithChannelId, SUBSCRIBE_TIMEOUT_SECS}; @@ -62,6 +62,7 @@ pub struct Downstream { extranonce2_len: usize, pub(super) difficulty_mgmt: DownstreamDifficultyConfig, pub(super) upstream_difficulty_config: Arc>, + last_job_id: String, // we usually receive a String on SV1 messages, no need to cast to u32 } impl Downstream { @@ -78,6 +79,7 @@ impl Downstream { extranonce2_len: usize, difficulty_mgmt: DownstreamDifficultyConfig, upstream_difficulty_config: Arc>, + last_job_id: String, ) -> Self { Downstream { connection_id, @@ -91,6 +93,7 @@ impl Downstream { extranonce2_len, difficulty_mgmt, upstream_difficulty_config, + last_job_id, } } /// Instantiate a new `Downstream`. @@ -107,6 +110,7 @@ impl Downstream { host: String, difficulty_config: DownstreamDifficultyConfig, upstream_difficulty_config: Arc>, + task_collector: Arc>>, ) { let stream = std::sync::Arc::new(stream); @@ -131,6 +135,7 @@ impl Downstream { extranonce2_len, difficulty_mgmt: difficulty_config, upstream_difficulty_config, + last_job_id: "".to_string(), })); let self_ = downstream.clone(); @@ -146,11 +151,12 @@ impl Downstream { let rx_shutdown_clone = rx_shutdown.clone(); let tx_shutdown_clone = tx_shutdown.clone(); let tx_status_reader = tx_status.clone(); + let task_collector_mining_device = task_collector.clone(); // Task to read from SV1 Mining Device Client socket via `socket_reader`. Depending on the // SV1 message received, a message response is sent directly back to the SV1 Downstream // role, or the message is sent upwards to the Bridge for translation into a SV2 message // and then sent to the SV2 Upstream role. - let _socket_reader_task = task::spawn(async move { + let socket_reader_task = tokio::task::spawn(async move { let reader = BufReader::new(&*socket_reader); let mut messages = FramedRead::new( async_compat::Compat::new(reader), @@ -201,15 +207,22 @@ impl Downstream { kill(&tx_shutdown_clone).await; warn!("Downstream: Shutting down sv1 downstream reader"); }); + let _ = task_collector_mining_device.safe_lock(|a| { + a.push(( + socket_reader_task.abort_handle(), + "socket_reader_task".to_string(), + )) + }); let rx_shutdown_clone = rx_shutdown.clone(); let tx_shutdown_clone = tx_shutdown.clone(); let tx_status_writer = tx_status.clone(); let host_ = host.clone(); + let task_collector_new_sv1_message_no_transl = task_collector.clone(); // Task to receive SV1 message responses to SV1 messages that do NOT need translation. // These response messages are sent directly to the SV1 Downstream role. - let _socket_writer_task = task::spawn(async move { + let socket_writer_task = tokio::task::spawn(async move { loop { select! { res = receiver_outgoing.recv().fuse() => { @@ -238,11 +251,18 @@ impl Downstream { &host_ ); }); + let _ = task_collector_new_sv1_message_no_transl.safe_lock(|a| { + a.push(( + socket_writer_task.abort_handle(), + "socket_writer_task".to_string(), + )) + }); let tx_status_notify = tx_status; let self_ = downstream.clone(); - let _notify_task = task::spawn(async move { + let task_collector_notify_task = task_collector.clone(); + let notify_task = tokio::task::spawn(async move { let timeout_timer = std::time::Instant::now(); let mut first_sent = false; loop { @@ -271,6 +291,11 @@ impl Downstream { ); let sv1_mining_notify_msg = last_notify.clone().unwrap(); + + self_ + .safe_lock(|s| s.last_job_id = sv1_mining_notify_msg.clone().job_id) + .unwrap(); + let message: json_rpc::Message = sv1_mining_notify_msg.into(); handle_result!( tx_status_notify, @@ -290,9 +315,11 @@ impl Downstream { // if hashrate has changed, update difficulty management, and send new mining.set_difficulty handle_result!(tx_status_notify, Self::try_update_difficulty_settings(downstream.clone()).await); - let sv1_mining_notify_msg = handle_result!(tx_status_notify, res); - let message: json_rpc::Message = sv1_mining_notify_msg.into(); + let message: json_rpc::Message = sv1_mining_notify_msg.clone().into(); + + self_.safe_lock(|s| s.last_job_id = sv1_mining_notify_msg.job_id).unwrap(); + handle_result!(tx_status_notify, Downstream::send_message_downstream(downstream.clone(), message).await); }, _ = rx_shutdown.recv().fuse() => { @@ -318,10 +345,14 @@ impl Downstream { &host ); }); + + let _ = task_collector_notify_task + .safe_lock(|a| a.push((notify_task.abort_handle(), "notify_task".to_string()))); } /// Accept connections from one or more SV1 Downstream roles (SV1 Mining Devices) and create a /// new `Downstream` for each connection. + #[allow(clippy::too_many_arguments)] pub fn accept_connections( downstream_addr: SocketAddr, tx_sv1_submit: Sender, @@ -330,8 +361,11 @@ impl Downstream { bridge: Arc>, downstream_difficulty_config: DownstreamDifficultyConfig, upstream_difficulty_config: Arc>, + task_collector: Arc>>, ) { - task::spawn(async move { + let task_collector_downstream = task_collector.clone(); + + let accept_connections = tokio::task::spawn(async move { let downstream_listener = TcpListener::bind(downstream_addr).await.unwrap(); let mut downstream_incoming = downstream_listener.incoming(); @@ -358,6 +392,7 @@ impl Downstream { host, downstream_difficulty_config.clone(), upstream_difficulty_config.clone(), + task_collector_downstream.clone(), ) .await; } @@ -367,6 +402,12 @@ impl Downstream { } } }); + let _ = task_collector.safe_lock(|a| { + a.push(( + accept_connections.abort_handle(), + "accept_connections".to_string(), + )) + }); } /// As SV1 messages come in, determines if the message response needs to be translated to SV2 @@ -491,12 +532,12 @@ impl IsServer<'static> for Downstream { /// When miner find the job which meets requested difficulty, it can submit share to the server. /// Only [Submit](client_to_server::Submit) requests for authorized user names can be submitted. fn handle_submit(&self, request: &client_to_server::Submit<'static>) -> bool { - info!("Down: Submitting Share"); + info!("Down: Submitting Share {:?}", request); debug!("Down: Handling mining.submit: {:?}", &request); // TODO: Check if receiving valid shares by adding diff field to Downstream - if self.first_job_received { + if request.job_id == self.last_job_id { let to_send = SubmitShareWithChannelId { channel_id: self.connection_id, share: request.clone(), @@ -504,11 +545,15 @@ impl IsServer<'static> for Downstream { extranonce2_len: self.extranonce2_len, version_rolling_mask: self.version_rolling_mask.clone(), }; + self.tx_sv1_bridge .try_send(DownstreamMessages::SubmitShares(to_send)) .unwrap(); - }; - true + + true + } else { + false + } } /// Indicates to the server that the client supports the mining.set_extranonce method. diff --git a/roles/translator/src/lib/error.rs b/roles/translator/src/lib/error.rs index debad1819..8abd61690 100644 --- a/roles/translator/src/lib/error.rs +++ b/roles/translator/src/lib/error.rs @@ -1,3 +1,4 @@ +use ext_config::ConfigError; use roles_logic_sv2::{ mining_sv2::{ExtendedExtranonce, NewExtendedMiningJob, SetCustomMiningJob}, parsers::Mining, @@ -38,8 +39,8 @@ pub enum Error<'a> { BadCliArgs, /// Errors on bad `serde_json` serialize/deserialize. BadSerdeJson(serde_json::Error), - /// Errors on bad `toml` deserialize. - BadTomlDeserialize(toml::de::Error), + /// Errors on bad `config` TOML deserialize. + BadConfigDeserialize(ConfigError), /// Errors from `binary_sv2` crate. BinarySv2(binary_sv2::Error), /// Errors on bad noise handshake. @@ -83,7 +84,7 @@ impl<'a> fmt::Display for Error<'a> { match self { BadCliArgs => write!(f, "Bad CLI arg input"), BadSerdeJson(ref e) => write!(f, "Bad serde json: `{:?}`", e), - BadTomlDeserialize(ref e) => write!(f, "Bad `toml` deserialize: `{:?}`", e), + BadConfigDeserialize(ref e) => write!(f, "Bad `config` TOML deserialize: `{:?}`", e), BinarySv2(ref e) => write!(f, "Binary SV2 error: `{:?}`", e), CodecNoise(ref e) => write!(f, "Noise error: `{:?}", e), FramingSv2(ref e) => write!(f, "Framing SV2 error: `{:?}`", e), @@ -159,9 +160,9 @@ impl<'a> From for Error<'a> { } } -impl<'a> From for Error<'a> { - fn from(e: toml::de::Error) -> Self { - Error::BadTomlDeserialize(e) +impl<'a> From for Error<'a> { + fn from(e: ConfigError) -> Self { + Error::BadConfigDeserialize(e) } } diff --git a/roles/translator/src/lib/proxy/bridge.rs b/roles/translator/src/lib/proxy/bridge.rs index 22aeaa18f..74db21111 100644 --- a/roles/translator/src/lib/proxy/bridge.rs +++ b/roles/translator/src/lib/proxy/bridge.rs @@ -1,5 +1,4 @@ use async_channel::{Receiver, Sender}; -use async_std::task; use roles_logic_sv2::{ channel_logic::channel_factory::{ExtendedChannelKind, ProxyExtendedChannelFactory, Share}, mining_sv2::{ @@ -9,7 +8,7 @@ use roles_logic_sv2::{ utils::{GroupId, Mutex}, }; use std::sync::Arc; -use tokio::sync::broadcast; +use tokio::{sync::broadcast, task::AbortHandle}; use v1::{client_to_server::Submit, server_to_client, utils::HexU32Be}; use super::super::{ @@ -22,7 +21,7 @@ use super::super::{ }; use error_handling::handle_result; use roles_logic_sv2::{channel_logic::channel_factory::OnNewShare, Error as RolesLogicError}; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, warn}; /// Bridge between the SV2 `Upstream` and SV1 `Downstream` responsible for the following messaging /// translation: @@ -64,6 +63,7 @@ pub struct Bridge { last_p_hash: Option>, target: Arc>>, last_job_id: u32, + task_collector: Arc>>, } impl Bridge { @@ -79,6 +79,7 @@ impl Bridge { extranonces: ExtendedExtranonce, target: Arc>>, up_id: u32, + task_collector: Arc>>, ) -> Arc> { let ids = Arc::new(Mutex::new(GroupId::new())); let share_per_min = 1.0; @@ -107,6 +108,7 @@ impl Bridge { last_p_hash: None, target, last_job_id: 0, + task_collector, })) } @@ -162,10 +164,12 @@ impl Bridge { /// Receives a `DownstreamMessages` message from the `Downstream`, handles based on the /// variant received. fn handle_downstream_messages(self_: Arc>) { + let task_collector_handle_downstream = + self_.safe_lock(|b| b.task_collector.clone()).unwrap(); let (rx_sv1_downstream, tx_status) = self_ .safe_lock(|s| (s.rx_sv1_downstream.clone(), s.tx_status.clone())) .unwrap(); - task::spawn(async move { + let handle_downstream = tokio::task::spawn(async move { loop { let msg = handle_result!(tx_status, rx_sv1_downstream.clone().recv().await); @@ -185,6 +189,12 @@ impl Bridge { }; } }); + let _ = task_collector_handle_downstream.safe_lock(|a| { + a.push(( + handle_downstream.abort_handle(), + "handle_downstream_message".to_string(), + )) + }); } /// receives a `SetDownstreamTarget` and updates the downstream target for the channel #[allow(clippy::result_large_err)] @@ -235,7 +245,7 @@ impl Bridge { match res { Ok(Ok(OnNewShare::SendErrorDownstream(e))) => { - error!( + warn!( "Submit share error {:?}", std::str::from_utf8(&e.error_code.to_vec()[..]) ); @@ -367,6 +377,8 @@ impl Bridge { /// corresponding `job_id` has already been received. If this is not the case, an error has /// occurred on the Upstream pool role and the connection will close. fn handle_new_prev_hash(self_: Arc>) { + let task_collector_handle_new_prev_hash = + self_.safe_lock(|b| b.task_collector.clone()).unwrap(); let (tx_sv1_notify, rx_sv2_set_new_prev_hash, tx_status) = self_ .safe_lock(|s| { ( @@ -377,7 +389,7 @@ impl Bridge { }) .unwrap(); debug!("Starting handle_new_prev_hash task"); - task::spawn(async move { + let handle_new_prev_hash = tokio::task::spawn(async move { loop { // Receive `SetNewPrevHash` from `Upstream` let sv2_set_new_prev_hash: SetNewPrevHash = @@ -397,6 +409,12 @@ impl Bridge { ) } }); + let _ = task_collector_handle_new_prev_hash.safe_lock(|a| { + a.push(( + handle_new_prev_hash.abort_handle(), + "handle_new_prev_hash".to_string(), + )) + }); } async fn handle_new_extended_mining_job_( @@ -460,6 +478,8 @@ impl Bridge { /// `SetNewPrevHash` `job_id`, an error has occurred on the Upstream pool role and the /// connection will close. fn handle_new_extended_mining_job(self_: Arc>) { + let task_collector_new_extended_mining_job = + self_.safe_lock(|b| b.task_collector.clone()).unwrap(); let (tx_sv1_notify, rx_sv2_new_ext_mining_job, tx_status) = self_ .safe_lock(|s| { ( @@ -470,7 +490,7 @@ impl Bridge { }) .unwrap(); debug!("Starting handle_new_extended_mining_job task"); - task::spawn(async move { + let handle_new_extended_mining_job = tokio::task::spawn(async move { loop { // Receive `NewExtendedMiningJob` from `Upstream` let sv2_new_extended_mining_job: NewExtendedMiningJob = handle_result!( @@ -494,6 +514,12 @@ impl Bridge { .store(true, std::sync::atomic::Ordering::SeqCst); } }); + let _ = task_collector_new_extended_mining_job.safe_lock(|a| { + a.push(( + handle_new_extended_mining_job.abort_handle(), + "handle_new_extended_mining_job".to_string(), + )) + }); } } pub struct OpenSv1Downstream { @@ -543,6 +569,7 @@ mod test { rx_sv1_notify, }; + let task_collector = Arc::new(Mutex::new(vec![])); let b = Bridge::new( rx_sv1_submit, tx_sv2_submit_shares_ext, @@ -553,6 +580,7 @@ mod test { extranonces, Arc::new(Mutex::new(upstream_target)), 1, + task_collector, ); (b, interface) } @@ -595,7 +623,7 @@ mod test { previous_output: p_out, script_sig: vec![89_u8; 16].into(), sequence: bitcoin::Sequence(0), - witness: Witness::from_vec(vec![]).into(), + witness: Witness::from_vec(vec![]), }; let tx = bitcoin::Transaction { version: 1, diff --git a/roles/translator/src/lib/status.rs b/roles/translator/src/lib/status.rs index 3ecbcc634..e8af6883e 100644 --- a/roles/translator/src/lib/status.rs +++ b/roles/translator/src/lib/status.rs @@ -48,6 +48,7 @@ pub enum State<'a> { DownstreamShutdown(Error<'a>), BridgeShutdown(Error<'a>), UpstreamShutdown(Error<'a>), + UpstreamTryReconnect(Error<'a>), Healthy(String), } @@ -83,13 +84,22 @@ async fn send_status( .await .unwrap_or(()); } - Sender::Upstream(tx) => { - tx.send(Status { - state: State::UpstreamShutdown(e), - }) - .await - .unwrap_or(()); - } + Sender::Upstream(tx) => match e { + Error::ChannelErrorReceiver(_) => { + tx.send(Status { + state: State::UpstreamTryReconnect(e), + }) + .await + .unwrap_or(()); + } + _ => { + tx.send(Status { + state: State::UpstreamShutdown(e), + }) + .await + .unwrap_or(()); + } + }, Sender::TemplateReceiver(tx) => { tx.send(Status { state: State::UpstreamShutdown(e), @@ -113,8 +123,8 @@ pub async fn handle_error( Error::BadCliArgs => send_status(sender, e, error_handling::ErrorBranch::Break).await, // Errors on bad `serde_json` serialize/deserialize. Error::BadSerdeJson(_) => send_status(sender, e, error_handling::ErrorBranch::Break).await, - // Errors on bad `toml` deserialize. - Error::BadTomlDeserialize(_) => { + // Errors on bad `config` TOML deserialize. + Error::BadConfigDeserialize(_) => { send_status(sender, e, error_handling::ErrorBranch::Break).await } // Errors from `binary_sv2` crate. diff --git a/roles/translator/src/lib/upstream_sv2/upstream.rs b/roles/translator/src/lib/upstream_sv2/upstream.rs index f6d192f75..613517432 100644 --- a/roles/translator/src/lib/upstream_sv2/upstream.rs +++ b/roles/translator/src/lib/upstream_sv2/upstream.rs @@ -9,9 +9,9 @@ use crate::{ upstream_sv2::{EitherFrame, Message, StdFrame, UpstreamConnection}, }; use async_channel::{Receiver, Sender}; -use async_std::{net::TcpStream, task}; +use async_std::net::TcpStream; use binary_sv2::u256_from_int; -use codec_sv2::{Frame, HandshakeRole, Initiator}; +use codec_sv2::{HandshakeRole, Initiator}; use error_handling::handle_result; use key_utils::Secp256k1PublicKey; use network_helpers_sv2::Connection; @@ -36,8 +36,10 @@ use roles_logic_sv2::{ use std::{ net::SocketAddr, sync::{atomic::AtomicBool, Arc}, - thread::sleep, - time::Duration, +}; +use tokio::{ + task::AbortHandle, + time::{sleep, Duration}, }; use tracing::{error, info, warn}; @@ -98,6 +100,7 @@ pub struct Upstream { // and the upstream just needs to occasionally check if it has changed more than // than the configured percentage pub(super) difficulty_config: Arc>, + task_collector: Arc>>, } impl PartialEq for Upstream { @@ -124,6 +127,7 @@ impl Upstream { tx_status: status::Sender, target: Arc>>, difficulty_config: Arc>, + task_collector: Arc>>, ) -> ProxyResult<'static, Arc>> { // Connect to the SV2 Upstream role retry connection every 5 seconds. let socket = loop { @@ -135,7 +139,7 @@ impl Upstream { address, e ); - sleep(Duration::from_secs(5)); + sleep(Duration::from_secs(5)).await; } } }; @@ -171,6 +175,7 @@ impl Upstream { tx_status, target, difficulty_config, + task_collector, }))) } @@ -259,6 +264,9 @@ impl Upstream { #[allow(clippy::result_large_err)] pub fn parse_incoming(self_: Arc>) -> ProxyResult<'static, ()> { let clone = self_.clone(); + let task_collector = self_.safe_lock(|s| s.task_collector.clone()).unwrap(); + let collector1 = task_collector.clone(); + let collector2 = task_collector.clone(); let ( tx_frame, tx_sv2_extranonce, @@ -281,16 +289,22 @@ impl Upstream { { let self_ = self_.clone(); let tx_status = tx_status.clone(); - task::spawn(async move { + let start_diff_management = tokio::task::spawn(async move { // No need to start diff management immediatly - async_std::task::sleep(Duration::from_secs(10)).await; + sleep(Duration::from_secs(10)).await; loop { handle_result!(tx_status, Self::try_update_hashrate(self_.clone()).await); } }); + let _ = collector1.safe_lock(|a| { + a.push(( + start_diff_management.abort_handle(), + "start_diff_management".to_string(), + )) + }); } - task::spawn(async move { + let parse_incoming = tokio::task::spawn(async move { loop { // Waiting to receive a message from the SV2 Upstream role let incoming = handle_result!(tx_status, recv.recv().await); @@ -433,6 +447,8 @@ impl Upstream { } } }); + let _ = collector2 + .safe_lock(|a| a.push((parse_incoming.abort_handle(), "parse_incoming".to_string()))); Ok(()) } @@ -459,6 +475,7 @@ impl Upstream { #[allow(clippy::result_large_err)] pub fn handle_submit(self_: Arc>) -> ProxyResult<'static, ()> { + let task_collector = self_.safe_lock(|s| s.task_collector.clone()).unwrap(); let clone = self_.clone(); let (tx_frame, receiver, tx_status) = clone .safe_lock(|s| { @@ -470,7 +487,7 @@ impl Upstream { }) .map_err(|_| PoisonLock)?; - task::spawn(async move { + let handle_submit = tokio::task::spawn(async move { loop { let mut sv2_submit: SubmitSharesExtended = handle_result!(tx_status, receiver.recv().await); @@ -506,6 +523,9 @@ impl Upstream { ); } }); + let _ = task_collector + .safe_lock(|a| a.push((handle_submit.abort_handle(), "handle_submit".to_string()))); + Ok(()) } diff --git a/roles/translator/src/main.rs b/roles/translator/src/main.rs index c1307a5a2..fc8e91de9 100644 --- a/roles/translator/src/main.rs +++ b/roles/translator/src/main.rs @@ -3,36 +3,47 @@ mod args; mod lib; use args::Args; +use async_channel::{bounded, unbounded}; use error::{Error, ProxyResult}; +use futures::{select, FutureExt}; use lib::{downstream_sv1, error, proxy, proxy_config, status, upstream_sv2}; use proxy_config::ProxyConfig; +use rand::Rng; use roles_logic_sv2::utils::Mutex; - -use async_channel::{bounded, unbounded}; -use futures::{select, FutureExt}; use std::{ net::{IpAddr, SocketAddr}, str::FromStr, sync::Arc, }; -use tokio::{sync::broadcast, task}; +use ext_config::{Config, File, FileFormat}; +use tokio::{sync::broadcast, task, task::AbortHandle, time::Duration}; use v1::server_to_client; use crate::status::{State, Status}; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, warn}; /// Process CLI args, if any. #[allow(clippy::result_large_err)] fn process_cli_args<'a>() -> ProxyResult<'a, ProxyConfig> { - let args = match Args::from_args() { - Ok(cfg) => cfg, - Err(help) => { - error!("{}", help); - return Err(Error::BadCliArgs); - } - }; - let config_file = std::fs::read_to_string(args.config_path)?; - Ok(toml::from_str::(&config_file)?) + // Parse CLI arguments + let args = Args::from_args().map_err(|help| { + error!("{}", help); + Error::BadCliArgs + })?; + + // Build configuration from the provided file path + let config_path = args.config_path.to_str().ok_or_else(|| { + error!("Invalid configuration path."); + Error::BadCliArgs + })?; + + let settings = Config::builder() + .add_source(File::new(config_path, FileFormat::Toml)) + .build()?; + + // Deserialize settings into ProxyConfig + let config = settings.try_deserialize::()?; + Ok(config) } #[tokio::main] @@ -43,22 +54,121 @@ async fn main() { Ok(p) => p, Err(e) => panic!("failed to load config: {}", e), }; - info!("PC: {:?}", &proxy_config); + info!("Proxy Config: {:?}", &proxy_config); let (tx_status, rx_status) = unbounded(); - // `tx_sv1_bridge` sender is used by `Downstream` to send a `DownstreamMessages` message to - // `Bridge` via the `rx_sv1_downstream` receiver - // (Sender, Receiver) - let (tx_sv1_bridge, rx_sv1_downstream) = unbounded(); + let target = Arc::new(Mutex::new(vec![0; 32])); + + // Sender/Receiver to send SV1 `mining.notify` message from the `Bridge` to the `Downstream` + let (tx_sv1_notify, _rx_sv1_notify): ( + broadcast::Sender, + broadcast::Receiver, + ) = broadcast::channel(10); + + let task_collector: Arc>> = Arc::new(Mutex::new(Vec::new())); + + start( + tx_sv1_notify.clone(), + target.clone(), + tx_status.clone(), + task_collector.clone(), + proxy_config.clone(), + ) + .await; + + debug!("Starting up signal listener"); + let task_collector_ = task_collector.clone(); + + let mut interrupt_signal_future = Box::pin(tokio::signal::ctrl_c().fuse()); + debug!("Starting up status listener"); + // Check all tasks if is_finished() is true, if so exit + loop { + let task_status = select! { + task_status = rx_status.recv().fuse() => task_status, + interrupt_signal = interrupt_signal_future => { + match interrupt_signal { + Ok(()) => { + info!("Interrupt received"); + }, + Err(err) => { + error!("Unable to listen for interrupt signal: {}", err); + // we also shut down in case of error + }, + } + break; + } + }; + let task_status: Status = task_status.unwrap(); + + match task_status.state { + // Should only be sent by the downstream listener + State::DownstreamShutdown(err) => { + error!("SHUTDOWN from: {}", err); + break; + } + State::BridgeShutdown(err) => { + error!("SHUTDOWN from: {}", err); + break; + } + State::UpstreamShutdown(err) => { + error!("SHUTDOWN from: {}", err); + break; + } + State::UpstreamTryReconnect(err) => { + error!("SHUTDOWN from: {}", err); + + // wait a random amount of time between 0 and 3000ms + // if all the downstreams try to reconnect at the same time, the upstream may fail + let mut rng = rand::thread_rng(); + let wait_time = rng.gen_range(0..=3000); + tokio::time::sleep(Duration::from_millis(wait_time)).await; + + // kill al the tasks + let task_collector_aborting = task_collector_.clone(); + kill_tasks(task_collector_aborting.clone()); + + warn!("Trying reconnecting to upstream"); + start( + tx_sv1_notify.clone(), + target.clone(), + tx_status.clone(), + task_collector_.clone(), + proxy_config.clone(), + ) + .await; + } + State::Healthy(msg) => { + info!("HEALTHY message: {}", msg); + } + } + } +} +fn kill_tasks(task_collector: Arc>>) { + let _ = task_collector.safe_lock(|t| { + while let Some(handle) = t.pop() { + handle.0.abort(); + warn!("Killed task: {:?}", handle.1); + } + }); +} + +async fn start<'a>( + tx_sv1_notify: broadcast::Sender>, + target: Arc>>, + tx_status: async_channel::Sender>, + task_collector: Arc>>, + proxy_config: ProxyConfig, +) { // Sender/Receiver to send a SV2 `SubmitSharesExtended` from the `Bridge` to the `Upstream` // (Sender>, Receiver>) let (tx_sv2_submit_shares_ext, rx_sv2_submit_shares_ext) = bounded(10); - // Sender/Receiver to send a SV2 `SetNewPrevHash` message from the `Upstream` to the `Bridge` - // (Sender>, Receiver>) - let (tx_sv2_set_new_prev_hash, rx_sv2_set_new_prev_hash) = bounded(10); + // `tx_sv1_bridge` sender is used by `Downstream` to send a `DownstreamMessages` message to + // `Bridge` via the `rx_sv1_downstream` receiver + // (Sender, Receiver) + let (tx_sv1_bridge, rx_sv1_downstream) = unbounded(); // Sender/Receiver to send a SV2 `NewExtendedMiningJob` message from the `Upstream` to the // `Bridge` @@ -69,13 +179,10 @@ async fn main() { // passed to the `Downstream` upon a Downstream role connection // (Sender, Receiver) let (tx_sv2_extranonce, rx_sv2_extranonce) = bounded(1); - let target = Arc::new(Mutex::new(vec![0; 32])); - // Sender/Receiver to send SV1 `mining.notify` message from the `Bridge` to the `Downstream` - let (tx_sv1_notify, _rx_sv1_notify): ( - broadcast::Sender, - broadcast::Receiver, - ) = broadcast::channel(10); + // Sender/Receiver to send a SV2 `SetNewPrevHash` message from the `Upstream` to the `Bridge` + // (Sender>, Receiver>) + let (tx_sv2_set_new_prev_hash, rx_sv2_set_new_prev_hash) = bounded(10); // Format `Upstream` connection address let upstream_addr = SocketAddr::new( @@ -85,7 +192,7 @@ async fn main() { ); let diff_config = Arc::new(Mutex::new(proxy_config.upstream_difficulty_config.clone())); - + let task_collector_upstream = task_collector.clone(); // Instantiate a new `Upstream` (SV2 Pool) let upstream = match upstream_sv2::Upstream::new( upstream_addr, @@ -98,6 +205,7 @@ async fn main() { status::Sender::Upstream(tx_status.clone()), target.clone(), diff_config.clone(), + task_collector_upstream, ) .await { @@ -107,12 +215,12 @@ async fn main() { return; } }; - + let task_collector_init_task = task_collector.clone(); // Spawn a task to do all of this init work so that the main thread // can listen for signals and failures on the status channel. This // allows for the tproxy to fail gracefully if any of these init tasks //fail - task::spawn(async move { + let task = task::spawn(async move { // Connect to the SV2 Upstream role match upstream_sv2::Upstream::connect( upstream.clone(), @@ -152,6 +260,7 @@ async fn main() { async_std::task::sleep(std::time::Duration::from_millis(100)).await; } + let task_collector_bridge = task_collector_init_task.clone(); // Instantiate a new `Bridge` and begins handling incoming messages let b = proxy::Bridge::new( rx_sv1_downstream, @@ -163,6 +272,7 @@ async fn main() { extended_extranonce, target, up_id, + task_collector_bridge, ); proxy::Bridge::start(b.clone()); @@ -172,6 +282,7 @@ async fn main() { proxy_config.downstream_port, ); + let task_collector_downstream = task_collector_init_task.clone(); // Accept connections from one or more SV1 Downstream roles (SV1 Mining Devices) downstream_sv1::Downstream::accept_connections( downstream_addr, @@ -181,49 +292,8 @@ async fn main() { b, proxy_config.downstream_difficulty_config, diff_config, + task_collector_downstream, ); }); // End of init task - - debug!("Starting up signal listener"); - let mut interrupt_signal_future = Box::pin(tokio::signal::ctrl_c().fuse()); - debug!("Starting up status listener"); - - // Check all tasks if is_finished() is true, if so exit - loop { - let task_status = select! { - task_status = rx_status.recv().fuse() => task_status, - interrupt_signal = interrupt_signal_future => { - match interrupt_signal { - Ok(()) => { - info!("Interrupt received"); - }, - Err(err) => { - error!("Unable to listen for interrupt signal: {}", err); - // we also shut down in case of error - }, - } - break; - } - }; - let task_status: Status = task_status.unwrap(); - - match task_status.state { - // Should only be sent by the downstream listener - State::DownstreamShutdown(err) => { - error!("SHUTDOWN from: {}", err); - break; - } - State::BridgeShutdown(err) => { - error!("SHUTDOWN from: {}", err); - break; - } - State::UpstreamShutdown(err) => { - error!("SHUTDOWN from: {}", err); - break; - } - State::Healthy(msg) => { - info!("HEALTHY message: {}", msg); - } - } - } + let _ = task_collector.safe_lock(|t| t.push((task.abort_handle(), "init task".to_string()))); } diff --git a/rust-toolchain.toml b/rust-toolchain.toml deleted file mode 100644 index ecc22580b..000000000 --- a/rust-toolchain.toml +++ /dev/null @@ -1,4 +0,0 @@ -[toolchain] -channel = "1.75.0" -components = [ "rustfmt", "clippy" ] -profile = "minimal" diff --git a/build-on-all-workspaces.sh b/scripts/build-on-all-workspaces.sh similarity index 100% rename from build-on-all-workspaces.sh rename to scripts/build-on-all-workspaces.sh diff --git a/scripts/build_header.sh b/scripts/build_header.sh new file mode 100755 index 000000000..046723cca --- /dev/null +++ b/scripts/build_header.sh @@ -0,0 +1,16 @@ +#! /bin/sh +cargo install --version 0.20.0 cbindgen + +rm -f ./sv2.h +touch ./sv2.h + +dir=${1:-../protocols} + +cd "$dir" + cbindgen --crate const_sv2 >> ../scripts/sv2.h + cbindgen --crate binary_codec_sv2 >> ../scripts/sv2.h + cbindgen --crate common_messages_sv2 >> ../scripts/sv2.h + cbindgen --crate template_distribution_sv2 >> ../scripts/sv2.h + cbindgen --crate codec_sv2 >> ../scripts/sv2.h + cbindgen --crate sv2_ffi >> ../scripts/sv2.h +cd .. diff --git a/clippy-on-all-workspaces.sh b/scripts/clippy-on-all-workspaces.sh similarity index 100% rename from clippy-on-all-workspaces.sh rename to scripts/clippy-on-all-workspaces.sh diff --git a/code-coverage-report.sh b/scripts/code-coverage-report.sh similarity index 100% rename from code-coverage-report.sh rename to scripts/code-coverage-report.sh diff --git a/message-generator-tests.sh b/scripts/message-generator-tests.sh similarity index 100% rename from message-generator-tests.sh rename to scripts/message-generator-tests.sh diff --git a/mg-codecov-tests.sh b/scripts/mg-codecov-tests.sh similarity index 100% rename from mg-codecov-tests.sh rename to scripts/mg-codecov-tests.sh diff --git a/sv2-header-check.sh b/scripts/sv2-header-check.sh similarity index 96% rename from sv2-header-check.sh rename to scripts/sv2-header-check.sh index 6e0469143..b39b0af54 100755 --- a/sv2-header-check.sh +++ b/scripts/sv2-header-check.sh @@ -21,9 +21,10 @@ set -ex # cargo install cbindgen --force bts # cbindgen -V -cd ./protocols/v2/sv2-ffi +echo $PWD +cd protocols/v2/sv2-ffi SHA1_1=$(sha1sum sv2.h) -cd ../../.. +cd ../../../scripts BUILD_SCRIPT="./build_header.sh" sh ./"$BUILD_SCRIPT" diff --git a/sv2-publish.sh b/scripts/sv2-publish.sh similarity index 100% rename from sv2-publish.sh rename to scripts/sv2-publish.sh diff --git a/tarpaulin.sh b/scripts/tarpaulin.sh similarity index 100% rename from tarpaulin.sh rename to scripts/tarpaulin.sh diff --git a/test/config/jds-receive-solution-while-processing-declared-job/jds-config.toml b/test/config/jds-receive-solution-while-processing-declared-job/jds-config.toml new file mode 100644 index 000000000..576b35443 --- /dev/null +++ b/test/config/jds-receive-solution-while-processing-declared-job/jds-config.toml @@ -0,0 +1,20 @@ +# SRI Pool config +authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" +authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" +cert_validity_sec = 3600 + +# list of compressed or uncompressed pubkeys for coinbase payout (only supports 1 item in the array at this point) +coinbase_outputs = [ + { output_script_type = "P2WPKH", output_script_value = "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075" }, +] + +listen_jd_address = "127.0.0.1:34264" + +core_rpc_url = "" +core_rpc_port = 48332 +core_rpc_user = "" +core_rpc_pass = "" +# Time interval used for JDS mempool update +[mempool_update_interval] +unit = "secs" +value = 1 diff --git a/test/config/jds-setupconnection-flag-test/jds-config-with-async-support.toml b/test/config/jds-setupconnection-flag-test/jds-config-with-async-support.toml new file mode 100644 index 000000000..379fee561 --- /dev/null +++ b/test/config/jds-setupconnection-flag-test/jds-config-with-async-support.toml @@ -0,0 +1,23 @@ +# Async Job Support +async_mining_allowed = true + +# SRI Pool config +authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" +authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" +cert_validity_sec = 3600 + +# list of compressed or uncompressed pubkeys for coinbase payout (only supports 1 item in the array at this point) +coinbase_outputs = [ + { output_script_type = "P2WPKH", output_script_value = "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075" }, +] + +listen_jd_address = "127.0.0.1:34264" + +core_rpc_url = "" +core_rpc_port = 18332 +core_rpc_user = "" +core_rpc_pass = "" +# Time interval used for JDS mempool update +[mempool_update_interval] +unit = "secs" +value = 1 \ No newline at end of file diff --git a/test/config/jds-setupconnection-flag-test/jds-config-without-async-support.toml b/test/config/jds-setupconnection-flag-test/jds-config-without-async-support.toml new file mode 100644 index 000000000..c84655276 --- /dev/null +++ b/test/config/jds-setupconnection-flag-test/jds-config-without-async-support.toml @@ -0,0 +1,23 @@ +# Async Job Support +async_mining_allowed = false + +# SRI Pool config +authority_public_key = "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" +authority_secret_key = "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" +cert_validity_sec = 3600 + +# list of compressed or uncompressed pubkeys for coinbase payout (only supports 1 item in the array at this point) +coinbase_outputs = [ + { output_script_type = "P2WPKH", output_script_value = "036adc3bdf21e6f9a0f0fb0066bf517e5b7909ed1563d6958a10993849a7554075" }, +] + +listen_jd_address = "127.0.0.1:34264" + +core_rpc_url = "" +core_rpc_port = 18332 +core_rpc_user = "" +core_rpc_pass = "" +# Time interval used for JDS mempool update +[mempool_update_interval] +unit = "secs" +value = 1 \ No newline at end of file diff --git a/test/config/tproxy-config-no-jd-sv1-cpu-md.toml b/test/config/tproxy-config-no-jd-sv1-cpu-md.toml index fdca70399..7c90479f0 100644 --- a/test/config/tproxy-config-no-jd-sv1-cpu-md.toml +++ b/test/config/tproxy-config-no-jd-sv1-cpu-md.toml @@ -35,7 +35,7 @@ coinbase_reward_sat = 5_000_000_000 # Difficulty params [downstream_difficulty_config] # hashes/s of the weakest miner that will be connecting -min_individual_miner_hashrate=100_000.0 +min_individual_miner_hashrate=100.0 # minimum number of shares needed before a mining.set_difficulty is sent for updating targets miner_num_submits_before_update=5 # target number of shares per minute the miner should be sending diff --git a/test/message-generator/messages/common_messages.json b/test/message-generator/messages/common_messages.json index ff5d200dd..65cc32a9a 100644 --- a/test/message-generator/messages/common_messages.json +++ b/test/message-generator/messages/common_messages.json @@ -48,6 +48,22 @@ }, "id": "setup_connection_job_declarator" }, + { + "message": { + "type": "SetupConnection", + "protocol": 1, + "min_version": 2, + "max_version": 2, + "flags": 0, + "endpoint_host": "", + "endpoint_port": 0, + "vendor": "", + "hardware_version": "", + "firmware": "", + "device_id": "" + }, + "id": "setup_connection_job_declarator_with_no_async_flag" + }, { "message": { "type": "SetupConnectionSuccess", diff --git a/test/message-generator/messages/mining_messages.json b/test/message-generator/messages/mining_messages.json index fa7385ba6..79da299c5 100644 --- a/test/message-generator/messages/mining_messages.json +++ b/test/message-generator/messages/mining_messages.json @@ -1,5 +1,30 @@ { "mining_messages": [ + { + "message": { + "type": "NewExtendedMiningJob", + "job_id": 1, + "channel_id": 0, + "min_ntime": [], + "version": 536870912, + "version_rolling_allowed": true, + "merkle_path": [], + "coinbase_tx_prefix": [2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 36, 2, 3, 15, 0], + "coinbase_tx_suffix": [255, 255, 255, 255, 2, 149, 0, 0, 0, 0, 0, 0, 0, 67, 65, 4, 70, 109, 127, 202, 229, 99, 229, 203, 9, 160, 209, 135, 11, 181, 128, 52, 72, 4, 97, 120, 121, 161, 73, 73, 207, 34, 40, 95, 27, 174, 63, 39, 103, 40, 23, 108, 60, 100, 49, 248, 238, 218, 69, 56, 220, 55, 200, 101, 226, 120, 79, 58, 158, 119, 208, 68, 243, 62, 64, 119, 151, 225, 39, 138, 172, 0, 0, 0, 0, 0, 0, 0, 0, 38, 106, 36, 170, 33, 169, 237, 226, 246, 28, 63, 113, 209, 222, 253, 63, 169, 153, 223, 163, 105, 83, 117, 92, 105, 6, 137, 121, 153, 98, 180, 139, 235, 216, 54, 151, 78, 140, 249, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "id": "new_extended_mining_job" + }, + { + "message":{ + "type": "SetNewPrevHash", + "channel_id": 0, + "job_id": 1, + "prev_hash": [91, 30, 84, 205, 18, 124, 218, 102, 28, 163, 155, 204, 173, 55, 119, 61, 224, 199, 68, 229, 144, 22, 92, 0, 53, 44, 15, 204, 200, 245, 149, 0], + "min_ntime": 1679128496, + "nbits": 545259519 + }, + "id": "set_new_prev_hash" + }, { "message": { "type": "OpenExtendedMiningChannelSuccess", diff --git a/test/message-generator/mock/jdc-mock-flag-0-for-jds-setupconnection-tests.json b/test/message-generator/mock/jdc-mock-flag-0-for-jds-setupconnection-tests.json new file mode 100644 index 000000000..e80141297 --- /dev/null +++ b/test/message-generator/mock/jdc-mock-flag-0-for-jds-setupconnection-tests.json @@ -0,0 +1,46 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Soft mock of JDC", + "Connect to JDS (where it doesn't matter if it supports async jobs or not)", + "Send SetupConnection with flag 0 (no async jobs) and expect SetupConnection.Success with flag 0" + ], + "frame_builders": [ + { + "type": "automatic", + "message_id": "test/message-generator/messages/common_messages.json::setup_connection_job_declarator_with_no_async_flag" + } + ], + "actions": [ + { + "message_ids": ["setup_connection_job_declarator_with_no_async_flag"], + "role": "client", + "results": [ + { + "type": "match_message_field", + "value": [ + "CommonMessages", + "SetupConnectionSuccess", + [ + [ + "flags", + { "U32": 0 } + ] + ] + ] + } + ], + "actiondoc": "This action sends SetupConnection and expects SetupConnection.Success with flag 0" + } + ], + "setup_commands": [], + "execution_commands": [], + "cleanup_commands": [], + "role": "client", + "downstream": { + "ip": "127.0.0.1", + "port": 34264, + "pub_key": "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" + } +} diff --git a/test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-with-async-support.json b/test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-with-async-support.json new file mode 100644 index 000000000..edbd83e13 --- /dev/null +++ b/test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-with-async-support.json @@ -0,0 +1,46 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Soft mock of JDC", + "Connect to JDS (that supports async jobs)", + "Send SetupConnection with flag 1 (async jobs) and expect SetupConnection.Success with flag 1" + ], + "frame_builders": [ + { + "type": "automatic", + "message_id": "test/message-generator/messages/common_messages.json::setup_connection_job_declarator" + } + ], + "actions": [ + { + "message_ids": ["setup_connection_job_declarator"], + "role": "client", + "results": [ + { + "type": "match_message_field", + "value": [ + "CommonMessages", + "SetupConnectionSuccess", + [ + [ + "flags", + { "U32": 1 } + ] + ] + ] + } + ], + "actiondoc": "This action sends SetupConnection and expects SetupConnection.Success with flag 1" + } + ], + "setup_commands": [], + "execution_commands": [], + "cleanup_commands": [], + "role": "client", + "downstream": { + "ip": "127.0.0.1", + "port": 34264, + "pub_key": "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" + } +} diff --git a/test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-without-async-support.json b/test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-without-async-support.json new file mode 100644 index 000000000..eeaf19ca5 --- /dev/null +++ b/test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-without-async-support.json @@ -0,0 +1,40 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Soft mock of JDC", + "Connect to JDS (that does not support async jobs)", + "Send SetupConnection with flag 1 (async jobs) and expect SetupConnection.Error" + ], + "frame_builders": [ + { + "type": "automatic", + "message_id": "test/message-generator/messages/common_messages.json::setup_connection_job_declarator" + } + ], + "actions": [ + { + "message_ids": ["setup_connection_job_declarator"], + "role": "client", + "results": [ + { + "type": "match_message_type", + "value": "0x02" + }, + { + "type": "close_connection" + } + ], + "actiondoc": "This action sends SetupConnection and expects SetupConnection.Error" + } + ], + "setup_commands": [], + "execution_commands": [], + "cleanup_commands": [], + "role": "client", + "downstream": { + "ip": "127.0.0.1", + "port": 34264, + "pub_key": "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" + } +} diff --git a/test/message-generator/mock/jdc-mock-jds-receive-solution-while-processing-declared-job.json b/test/message-generator/mock/jdc-mock-jds-receive-solution-while-processing-declared-job.json new file mode 100644 index 000000000..67148be87 --- /dev/null +++ b/test/message-generator/mock/jdc-mock-jds-receive-solution-while-processing-declared-job.json @@ -0,0 +1,146 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Mock JDC", + "Send SetupConnection and await for SetupConnection.Success", + "Send AllocateMiningJobToken0 and await for AllocateMiningJobToken.Success0", + "Send AllocateMiningJobToken1 and await for AllocateMiningJobToken.Success1", + "Send DeclareMiningJob and await for ProvideMissingTransactions", + "Send SubmitSolution and assert that connection was sustained" + ], + "job_declaration_messages": [ + { + "message": { + "type": "AllocateMiningJobToken", + "user_identifier": "", + "request_id": 0, + "coinbase_tx_outputs": [] + }, + "id": "allocate_mining_job_token0" + }, + { + "message": { + "type": "AllocateMiningJobToken", + "user_identifier": "", + "request_id": 1, + "coinbase_tx_outputs": [] + }, + "id": "allocate_mining_job_token1" + }, + { + "message": { + "type": "DeclareMiningJob", + "request_id": 0, + "mining_job_token": [1, 0, 0, 0], + "version": 0, + "coinbase_prefix": [2, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 55, 2, 128, 121, 0, 83, 116, 114, 97, 116, 117, 109, 32, 118, 50, 32, 83, 82, 73, 32, 80, 111, 111, 108], + "coinbase_suffix": [255, 255, 255, 255, 2, 168, 247, 5, 42, 1, 0, 0, 0, 22, 0, 20, 235, 225, 183, 220, 194, 147, 204, 170, 14, 231, 67, 168, 111, 137, 223, 130, 88, 194, 8, 252, 0, 0, 0, 0, 0, 0, 0, 0, 38, 106, 36, 170, 33, 169, 237, 226, 201, 13, 62, 213, 94, 164, 53, 216, 76, 246, 14, 110, 125, 255, 48, 66, 12, 220, 90, 217, 209, 75, 129, 37, 185, 117, 116, 254, 30, 81, 159, 1, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + "tx_short_hash_nonce": 0, + "tx_short_hash_list": [[95, 135, 113, 8, 147, 179]], + "tx_hash_list_hash": [133, 189, 184, 91, 252, 203, 225, 42, 233, 16, 77, 119, 76, 134, 93, 189, 192, 159, 221, 130, 150, 196, 18, 32, 54, 212, 138, 255, 57, 63, 118, 74], + "excess_data": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + "id": "declare_mining_job" + }, + { + "message": { + "type": "SubmitSolution", + "extranonce": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0], + "prev_hash": [184, 103, 138, 88, 153, 105, 236, 29, 123, 246, 107, 203, 1, 33, 10, 122, 188, 139, 218, 141, 62, 177, 158, 101, 125, 92, 214, 150, 199, 220, 29, 8], + "ntime": 0, + "nonce": 0, + "nbits": 0, + "version": 0 + }, + "id": "submit_solution" + } + ], + "frame_builders": [ + { + "type": "automatic", + "message_id": "test/message-generator/messages/common_messages.json::setup_connection_job_declarator" + }, + { + "type": "automatic", + "message_id": "allocate_mining_job_token0" + }, + { + "type": "automatic", + "message_id": "allocate_mining_job_token1" + }, + { + "type": "automatic", + "message_id": "declare_mining_job" + }, + { + "type": "automatic", + "message_id": "submit_solution" + } + ], + "actions": [ + { + "message_ids": ["setup_connection_job_declarator"], + "role": "client", + "results": [ + { + "type": "match_message_type", + "value": "0x01" + } + ], + "actiondoc": "This action sends SetupConnection and awaits for a SetupConnection.Success" + }, + { + "message_ids": ["allocate_mining_job_token0"], + "role": "client", + "results": [ + { + "type": "match_message_type", + "value": "0x51" + } + ], + "actiondoc": "This action sends AllocateMiningJobToken0 and awaits for a AllocateMiningJobToken.Success0" + }, + { + "message_ids": ["allocate_mining_job_token1"], + "role": "client", + "results": [ + { + "type": "match_message_type", + "value": "0x51" + } + ], + "actiondoc": "This action sends AllocateMiningJobToken1 and awaits for a AllocateMiningJobToken.Success1" + }, + { + "message_ids": ["declare_mining_job"], + "role": "client", + "results": [ + { + "type": "match_message_type", + "value": "0x55" + } + ], + "actiondoc": "This action sends DeclareMiningJob and awaits for a ProvideMissingTransactions" + }, + { + "message_ids": ["submit_solution"], + "role": "client", + "results": [ + { + "type": "sustain_connection" + } + ], + "actiondoc": "This action sends SubmitSolution and asserts that connection was sustained" + } + ], + "setup_commands": [], + "execution_commands": [], + "cleanup_commands": [], + "role": "client", + "downstream": { + "ip": "127.0.0.1", + "port": 34264, + "pub_key": "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72" + } +} diff --git a/test/message-generator/mock/upstream-mock.json b/test/message-generator/mock/upstream-mock.json new file mode 100644 index 000000000..094d2dc67 --- /dev/null +++ b/test/message-generator/mock/upstream-mock.json @@ -0,0 +1,97 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Mock an Upstream", + "Start listening on port 34254", + "Receive SetupConnection", + "Sends SetupConnection.Success", + "Receive OpenExtendedMiningChannel", + "Send OpenExtendedMiningChannel.Success", + "Send NewExtendedMiningJob" + ], + "frame_builders": [ + { + "type": "automatic", + "message_id": "test/message-generator/messages/common_messages.json::setup_connection_success_tproxy" + }, + { + "type": "automatic", + "message_id": "test/message-generator/messages/mining_messages.json::open_extended_mining_channel_success" + }, + { + "type": "automatic", + "message_id": "test/message-generator/messages/mining_messages.json::set_custom_mining_job_success" + }, + { + "type": "automatic", + "message_id": "test/message-generator/messages/mining_messages.json::submit_shares_error" + }, + { + "type": "automatic", + "message_id": "test/message-generator/messages/mining_messages.json::new_extended_mining_job" + }, + { + "type": "automatic", + "message_id": "test/message-generator/messages/mining_messages.json::set_new_prev_hash" + } + ], + "actions": [ + { + "message_ids": [], + "role": "server", + "results": [ + { + "type": "match_message_type", + "value": "0x00" + } + ], + "actiondoc": "Checks that a SetupConnection message is received from Downstream" + }, + { + "message_ids": ["setup_connection_success_tproxy"], + "role": "server", + "results": [ + { + "type": "match_message_type", + "value": "0x13" + } + ], + "actiondoc": "Sends SetupConnection.Success to Downstream, then checks that a OpenExtendedMiningChannel is sent from Downstream" + }, + { + "message_ids": ["open_extended_mining_channel_success"], + "role": "server", + "results": [], + "actiondoc": "Sends OpenExtendedMiningChannel.Success to Downstream" + }, + { + "message_ids": ["new_extended_mining_job"], + "role": "server", + "results": [], + "actiondoc": "Sends NewExtendedMiningJob to Downstream" + }, + { + "message_ids": ["set_new_prev_hash"], + "role": "server", + "results": [], + "actiondoc": "Sends SetNewPrevHash to Downstream" + } + ], + "setup_commands": [], + "execution_commands": [], + "cleanup_commands": [ + { + "command": "sleep", + "args": ["10000"], + "conditions": "None" + } + ], + "role": "server", + "upstream": { + "ip": "127.0.0.1", + "port": 34254, + "pub_key": "9auqWEzQDVyd2oe1JVGFLMLHZtCo2FFqZwtKA5gd9xbuEu7PH72", + "secret_key": "mkDLTBBRxdBv998612qipDYoTK3YUrqLe8uWw7gu3iXbSrn2n" + } +} diff --git a/test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.json b/test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.json new file mode 100644 index 000000000..27807bb82 --- /dev/null +++ b/test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.json @@ -0,0 +1,79 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Launch real JDS", + "Mock a JDC that sends SubmitSolution before ProvideMissingTransactionsSuccess" + ], + "frame_builders": [ + ], + "actions": [ + ], + "setup_commands": [ + { + "command": "cargo", + "args": [ + "llvm-cov", + "--no-report", + "run", + "-p", + "jd_server", + "--", + "-c", + "../test/config/jds-receive-solution-while-processing-declared-job/jds-config.toml" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "JD INITIALIZED", + "output_location": "StdOut", + "late_condition": false, + "condition": true + } + ], + "timer_secs": 300, + "warn_no_panic": false + } + } + }, + { + "command": "cargo", + "args": [ + "run", + "../../test/message-generator/mock/jdc-mock-jds-receive-solution-while-processing-declared-job.json" + ], + + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "TEST FAIL", + "output_location": "StdErr", + "condition": false, + "late_condition": false + } + ], + "timer_secs": 600, + "warn_no_panic": true + } + } + }, + { + "command": "sleep", + "args": ["1000"], + "conditions": "None" + } + ], + "execution_commands": [ + ], + "cleanup_commands": [ + { + "command": "pkill", + "args": ["-f", "jd_server", "-SIGINT"], + "late_condition": false, + "conditions": "None" + } + ], + "role": "none" +} diff --git a/test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.sh b/test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.sh new file mode 100755 index 000000000..39c8559ef --- /dev/null +++ b/test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.sh @@ -0,0 +1,9 @@ +cd roles +cargo llvm-cov --no-report -p jd_server + +cd ../utils/message-generator/ +cargo build + +RUST_LOG=debug cargo run ../../test/message-generator/test/jds-receive-solution-while-processing-declared-job/jds-receive-solution-while-processing-declared-job.json || { echo 'mg test failed' ; exit 1; } + +sleep 10 diff --git a/test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.json b/test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.json new file mode 100644 index 000000000..c0d781d69 --- /dev/null +++ b/test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.json @@ -0,0 +1,92 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Launch the jd-server" + ], + "frame_builders": [ + ], + "actions": [ + ], + "setup_commands": [ + { + "command": "cargo", + "args": [ + "llvm-cov", + "--no-report", + "run", + "-p", + "jd_server", + "--", + "-c", + "../test/config/jds-setupconnection-flag-test/jds-config-with-async-support.toml" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "JD INITIALIZED", + "output_location": "StdOut", + "late_condition": false, + "condition": true + } + ], + "timer_secs": 300, + "warn_no_panic": false + } + } + }, + { + "command": "cargo", + "args": [ + "run", + "../../test/message-generator/mock/jdc-mock-flag-0-for-jds-setupconnection-tests.json" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "TEST FAILED", + "output_location": "StdErr", + "late_condition": false, + "condition": false + } + ], + "timer_secs": 600, + "warn_no_panic": false + } + } + }, + { + "command": "cargo", + "args": [ + "run", + "../../test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-with-async-support.json" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "TEST FAILED", + "output_location": "StdErr", + "late_condition": false, + "condition": false + } + ], + "timer_secs": 600, + "warn_no_panic": false + } + } + } + ], + "execution_commands": [ + ], + "cleanup_commands": [ + { + "command": "pkill", + "args": ["-f", "jd_server", "-SIGINT"], + "conditions": "None" + } + ], + "role": "none" +} diff --git a/test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.sh b/test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.sh new file mode 100755 index 000000000..87dcf97e9 --- /dev/null +++ b/test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.sh @@ -0,0 +1,9 @@ +cd roles +cargo llvm-cov --no-report -p pool_sv2 + +cd ../utils/message-generator/ +cargo build + +RUST_LOG=debug cargo run ../../test/message-generator/test/jds-setupconnection-with-async-support/jds-setupconnection-with-async-support.json || { echo 'mg test failed' ; exit 1; } + +sleep 10 diff --git a/test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.json b/test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.json new file mode 100644 index 000000000..15eb2e978 --- /dev/null +++ b/test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.json @@ -0,0 +1,92 @@ +{ + "version": "2", + "doc": [ + "This test does", + "Launch the jd-server" + ], + "frame_builders": [ + ], + "actions": [ + ], + "setup_commands": [ + { + "command": "cargo", + "args": [ + "llvm-cov", + "--no-report", + "run", + "-p", + "jd_server", + "--", + "-c", + "../test/config/jds-setupconnection-flag-test/jds-config-without-async-support.toml" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "JD INITIALIZED", + "output_location": "StdOut", + "late_condition": false, + "condition": true + } + ], + "timer_secs": 300, + "warn_no_panic": false + } + } + }, + { + "command": "cargo", + "args": [ + "run", + "../../test/message-generator/mock/jdc-mock-flag-0-for-jds-setupconnection-tests.json" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "TEST FAILED", + "output_location": "StdErr", + "late_condition": false, + "condition": false + } + ], + "timer_secs": 600, + "warn_no_panic": false + } + } + }, + { + "command": "cargo", + "args": [ + "run", + "../../test/message-generator/mock/jdc-mock-flag-1-for-jds-setupconnection-without-async-support.json" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "TEST FAILED", + "output_location": "StdErr", + "late_condition": false, + "condition": false + } + ], + "timer_secs": 600, + "warn_no_panic": false + } + } + } + ], + "execution_commands": [ + ], + "cleanup_commands": [ + { + "command": "pkill", + "args": ["-f", "jd_server", "-SIGINT"], + "conditions": "None" + } + ], + "role": "none" +} diff --git a/test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.sh b/test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.sh new file mode 100755 index 000000000..151cec4ff --- /dev/null +++ b/test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.sh @@ -0,0 +1,9 @@ +cd roles +cargo llvm-cov --no-report -p pool_sv2 + +cd ../utils/message-generator/ +cargo build + +RUST_LOG=debug cargo run ../../test/message-generator/test/jds-setupconnection-without-async-support/jds-setupconnection-without-async-support.json || { echo 'mg test failed' ; exit 1; } + +sleep 10 diff --git a/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json b/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json new file mode 100644 index 000000000..79833f2c0 --- /dev/null +++ b/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json @@ -0,0 +1,105 @@ +{ + "version": "1", + "doc": [ + "This test does", + "Mock an Upstream", + "Run tProxy", + "tProxy receives NewExtendedMiningJob (with job_id = 1)", + "tProxy receives mining.authorize", + "tProxy sends mining.notify", + "tProxy receives mining.submit (old/invalid share with job_id=0)" + ], + "sv1_messages": [ + { + "message": { + "id": 1, + "method": "mining.authorize", + "params": ["username", "password"] + }, + "id": "mining.authorize" + }, + { + "message": { + "id": 0, + "method": "mining.submit", + "params": ["username", "0", "0000000000000000", "641577b0", "7a600640"] + }, + "id": "mining.submit" + } + ], + "frame_builders": [ + ], + "actions": [ + { + "message_ids": ["mining.authorize"], + "results": [ + { + "type": "match_message_id", + "value": 1 + } + ] + }, + { + "message_ids": ["mining.submit"], + "results": [ + { + "type": "match_message_field", + "value": [ + "mining.submit", + [ + [ + "result", + false + ] + ] + ] + } + ], + "actiondoc": "Checks that the mining.submit request (with wrong job_id) generates a response with false, indicating that the share was rejected" + } + ], + "setup_commands": [ + { + "command": "cargo", + "args": [ + "run", + "../../test/message-generator/mock/upstream-mock.json" + ], + "conditions": "None" + }, + { + "command": "cargo", + "args": [ + "run", + "-p", + "translator_sv2", + "--", + "-c", + "../test/config/tproxy-config-no-jd-sv1-cpu-md.toml" + ], + "conditions": { + "WithConditions": { + "conditions": [ + { + "output_string": "", + "output_location": "StdOut", + "late_condition": false, + "condition": true + } + ], + "timer_secs": 260, + "warn_no_panic": false + } + } + } + ], + "execution_commands": [ + ], + "cleanup_commands": [ + ], + "role": "client", + "downstream": { + "ip": "0.0.0.0", + "port": 34255 + } +} diff --git a/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.sh b/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.sh new file mode 100755 index 000000000..30149552d --- /dev/null +++ b/test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.sh @@ -0,0 +1,11 @@ +cd roles + +cargo build -p translator_sv2 +cargo build -p + +cd ../utils/message-generator/ +cargo build + +RUST_LOG=debug cargo run ../../test/message-generator/test/translation-proxy-old-share/translation-proxy-old-share.json || { echo 'mg test failed' ; exit 1; } + +sleep 10 diff --git a/utils/Cargo.lock b/utils/Cargo.lock index 3c7e5d36c..6c368c956 100644 --- a/utils/Cargo.lock +++ b/utils/Cargo.lock @@ -167,7 +167,7 @@ dependencies = [ [[package]] name = "buffer_sv2" -version = "1.0.0" +version = "1.1.0" dependencies = [ "aes-gcm", "criterion", @@ -190,9 +190,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" [[package]] name = "cfg-if" @@ -287,9 +287,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crypto-common" @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "error_handling" @@ -448,7 +448,7 @@ dependencies = [ [[package]] name = "key-utils" -version = "1.0.0" +version = "1.1.0" dependencies = [ "bs58", "secp256k1 0.28.2", @@ -464,9 +464,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.154" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "log" @@ -509,9 +509,9 @@ checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -522,15 +522,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] @@ -555,9 +555,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] @@ -705,9 +705,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.201" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] @@ -724,13 +724,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.201" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.62", + "syn 2.0.66", ] [[package]] @@ -794,9 +794,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.62" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f660c3bfcefb88c538776b6685a0c472e3128b51e74d48793dc2a488196e8eb" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ "proc-macro2", "quote", @@ -902,7 +902,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.62", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -924,7 +924,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.62", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 194360857..aa22ee1ae 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -2,7 +2,6 @@ name = "stratum_v2_utils" version = "1.0.0" authors = ["The Stratum v2 Developers"] edition = "2021" -rust-version = "1.75.0" description = "The Stratum protocol defines how miners, proxies, and pools communicate to contribute hashrate to the Bitcoin network. Stratum v2 is a robust set of primitives which anyone can use to expand the protocol or implement a role." documentation = "https://github.com/stratum-mining/stratum" readme = "README.md" diff --git a/utils/buffer/Cargo.toml b/utils/buffer/Cargo.toml index 798ba3c63..b3a8f543d 100644 --- a/utils/buffer/Cargo.toml +++ b/utils/buffer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "buffer_sv2" -version = "1.0.0" +version = "1.1.0" authors = ["fi3 "] edition = "2018" description = "buffer" diff --git a/utils/buffer/src/buffer.rs b/utils/buffer/src/buffer.rs index 9e611969e..f4b0dab50 100644 --- a/utils/buffer/src/buffer.rs +++ b/utils/buffer/src/buffer.rs @@ -70,6 +70,11 @@ impl Buffer for BufferFromSystemMemory { fn danger_set_start(&mut self, index: usize) { self.start = index; } + + #[inline] + fn is_droppable(&self) -> bool { + true + } } #[cfg(test)] @@ -101,6 +106,9 @@ impl Buffer for TestBufferFromMemory { fn danger_set_start(&mut self, _index: usize) { todo!() } + fn is_droppable(&self) -> bool { + true + } } impl AsRef<[u8]> for BufferFromSystemMemory { diff --git a/utils/buffer/src/buffer_pool/mod.rs b/utils/buffer/src/buffer_pool/mod.rs index a28fa46de..eae9c41ab 100644 --- a/utils/buffer/src/buffer_pool/mod.rs +++ b/utils/buffer/src/buffer_pool/mod.rs @@ -673,12 +673,25 @@ impl Buffer for BufferPool { fn danger_set_start(&mut self, index: usize) { self.start = index; } + + #[inline(always)] + fn is_droppable(&self) -> bool { + self.shared_state.load(Ordering::Relaxed) == 0 + } } #[cfg(not(test))] impl Drop for BufferPool { fn drop(&mut self) { - while self.shared_state.load(Ordering::Relaxed) != 0 {} + while self.shared_state.load(Ordering::Relaxed) != 0 { + std::hint::spin_loop(); + } + } +} + +impl BufferPool { + pub fn droppable(&self) -> bool { + self.shared_state.load(Ordering::Relaxed) == 0 } } diff --git a/utils/buffer/src/lib.rs b/utils/buffer/src/lib.rs index e184422d6..be1d3f8c3 100644 --- a/utils/buffer/src/lib.rs +++ b/utils/buffer/src/lib.rs @@ -85,4 +85,5 @@ pub trait Buffer { fn is_empty(&self) -> bool { self.len() == 0 } + fn is_droppable(&self) -> bool; } diff --git a/utils/message-generator/Cargo.toml b/utils/message-generator/Cargo.toml index b1a8a119d..f97d74f0d 100644 --- a/utils/message-generator/Cargo.toml +++ b/utils/message-generator/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" async-channel = "1.8.0" binary_sv2 = { version = "1.0.0", path = "../../protocols/v2/binary-sv2/binary-sv2", features = ["with_serde"] } codec_sv2 = { version = "1.0.0", path = "../../protocols/v2/codec-sv2", features = ["noise_sv2","with_buffer_pool","with_serde"] } -const_sv2 = { version = "1.0.0", path = "../../protocols/v2/const-sv2" } +const_sv2 = { version = "2.0.0", path = "../../protocols/v2/const-sv2" } load_file = "1.0.1" network_helpers_sv2 = { version = "2.0.0", path = "../../roles/roles-utils/network-helpers", features = ["with_tokio","with_serde"] } roles_logic_sv2 = { version = "1.0.0", path = "../../protocols/v2/roles-logic-sv2", features = ["with_serde"] } diff --git a/utils/message-generator/src/executor.rs b/utils/message-generator/src/executor.rs index b69bce0e6..22843e03a 100644 --- a/utils/message-generator/src/executor.rs +++ b/utils/message-generator/src/executor.rs @@ -7,7 +7,7 @@ use crate::{ }; use async_channel::{Receiver, Sender}; use binary_sv2::Serialize; -use codec_sv2::{Frame, StandardEitherFrame as EitherFrame, Sv2Frame}; +use codec_sv2::{StandardEitherFrame as EitherFrame, Sv2Frame}; use roles_logic_sv2::parsers::{self, AnyMessage}; use std::{collections::HashMap, convert::TryInto, sync::Arc}; @@ -199,35 +199,21 @@ impl Executor { result ); - // If the connection should drop at this point then let's just break the loop - // Can't do anything else after the connection drops. - if *result == ActionResult::CloseConnection { - info!( - "Waiting 1 sec to make sure that remote have time to close the connection" - ); - tokio::time::sleep(std::time::Duration::from_millis(1000)).await; - recv.recv() - .await - .expect_err("Expecting the connection to be closed: wasn't"); - success = true; - break; - } - - let message = match recv.recv().await { - Ok(message) => message, - Err(_) => { - success = false; - error!("Connection closed before receiving the message"); - break; - } - }; - - let mut message: Sv2Frame, _> = message.try_into().unwrap(); - debug!("RECV {:#?}", message); - let header = message.get_header().unwrap(); - let payload = message.payload(); match result { ActionResult::MatchMessageType(message_type) => { + let message = match recv.recv().await { + Ok(message) => message, + Err(_) => { + success = false; + error!("Connection closed before receiving the message"); + break; + } + }; + + let message: Sv2Frame, _> = message.try_into().unwrap(); + debug!("RECV {:#?}", message); + let header = message.get_header().unwrap(); + if header.msg_type() != *message_type { error!( "WRONG MESSAGE TYPE expected: {} received: {}", @@ -245,6 +231,20 @@ impl Executor { message_type, field_data, // Vec<(String, Sv2Type)> )) => { + let message = match recv.recv().await { + Ok(message) => message, + Err(_) => { + success = false; + error!("Connection closed before receiving the message"); + break; + } + }; + + let mut message: Sv2Frame, _> = + message.try_into().unwrap(); + debug!("RECV {:#?}", message); + let header = message.get_header().unwrap(); + let payload = message.payload(); if subprotocol.as_str() == "CommonMessages" { match (header.msg_type(), payload).try_into() { Ok(roles_logic_sv2::parsers::CommonMessages::SetupConnection(m)) => { @@ -532,6 +532,20 @@ impl Executor { message_type: _, fields, } => { + let message = match recv.recv().await { + Ok(message) => message, + Err(_) => { + success = false; + error!("Connection closed before receiving the message"); + break; + } + }; + + let mut message: Sv2Frame, _> = + message.try_into().unwrap(); + debug!("RECV {:#?}", message); + let header = message.get_header().unwrap(); + let payload = message.payload(); if subprotocol.as_str() == "CommonMessages" { match (header.msg_type(), payload).try_into() { Ok(parsers::CommonMessages::SetupConnection(m)) => { @@ -730,6 +744,19 @@ impl Executor { }; } ActionResult::MatchMessageLen(message_len) => { + let message = match recv.recv().await { + Ok(message) => message, + Err(_) => { + success = false; + error!("Connection closed before receiving the message"); + break; + } + }; + + let mut message: Sv2Frame, _> = + message.try_into().unwrap(); + debug!("RECV {:#?}", message); + let payload = message.payload(); if payload.len() != *message_len { error!( "WRONG MESSAGE len expected: {} received: {}", @@ -741,6 +768,18 @@ impl Executor { } } ActionResult::MatchExtensionType(ext_type) => { + let message = match recv.recv().await { + Ok(message) => message, + Err(_) => { + success = false; + error!("Connection closed before receiving the message"); + break; + } + }; + + let message: Sv2Frame, _> = message.try_into().unwrap(); + debug!("RECV {:#?}", message); + let header = message.get_header().unwrap(); if header.ext_type() != *ext_type { error!( "WRONG EXTENSION TYPE expected: {} received: {}", @@ -752,7 +791,26 @@ impl Executor { } } ActionResult::CloseConnection => { - todo!() + info!( + "Waiting 1 sec to make sure that remote has time to close the connection" + ); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + if !recv.is_closed() { + error!("Expected connection to close, but it didn't. Test failed."); + success = false; + break; + } + } + ActionResult::SustainConnection => { + info!( + "Waiting 1 sec to make sure that remote has time to close the connection" + ); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + if recv.is_closed() { + error!("Expected connection to sustain, but it didn't. Test failed."); + success = false; + break; + } } ActionResult::None => todo!(), } diff --git a/utils/message-generator/src/main.rs b/utils/message-generator/src/main.rs index 695b3919c..327d50cbc 100644 --- a/utils/message-generator/src/main.rs +++ b/utils/message-generator/src/main.rs @@ -191,6 +191,7 @@ enum ActionResult { MatchMessageLen(usize), MatchExtensionType(u16), CloseConnection, + SustainConnection, None, } @@ -225,6 +226,7 @@ impl std::fmt::Display for ActionResult { write!(f, "MatchExtensionType: {}", extension_type) } ActionResult::CloseConnection => write!(f, "Close connection"), + ActionResult::SustainConnection => write!(f, "Sustain connection"), ActionResult::GetMessageField { subprotocol, fields, @@ -451,7 +453,7 @@ mod test { into_static::into_static, net::{setup_as_downstream, setup_as_upstream}, }; - use codec_sv2::{Frame, Sv2Frame}; + use codec_sv2::Sv2Frame; use roles_logic_sv2::{ mining_sv2::{ CloseChannel, NewExtendedMiningJob, OpenExtendedMiningChannel, diff --git a/utils/message-generator/src/parser/actions.rs b/utils/message-generator/src/parser/actions.rs index ce84c7adf..23bf6188b 100644 --- a/utils/message-generator/src/parser/actions.rs +++ b/utils/message-generator/src/parser/actions.rs @@ -91,6 +91,7 @@ impl Sv2ActionParser { "close_connection" => { action_results.push(ActionResult::CloseConnection); } + "sustain_connection" => action_results.push(ActionResult::SustainConnection), "none" => { action_results.push(ActionResult::None); } diff --git a/utils/message-generator/src/parser/frames.rs b/utils/message-generator/src/parser/frames.rs index 633163200..cd4c2c582 100644 --- a/utils/message-generator/src/parser/frames.rs +++ b/utils/message-generator/src/parser/frames.rs @@ -1,5 +1,5 @@ use super::sv2_messages::{message_from_path, ReplaceField}; -use codec_sv2::{buffer_sv2::Slice, Frame as _Frame, Sv2Frame}; +use codec_sv2::{buffer_sv2::Slice, Sv2Frame}; use roles_logic_sv2::parsers::AnyMessage; use serde_json::{Map, Value}; use std::{collections::HashMap, convert::TryInto}; diff --git a/utils/message-generator/src/parser/sv2_messages.rs b/utils/message-generator/src/parser/sv2_messages.rs index 4919fc324..091d0c58c 100644 --- a/utils/message-generator/src/parser/sv2_messages.rs +++ b/utils/message-generator/src/parser/sv2_messages.rs @@ -372,6 +372,8 @@ pub enum JobDeclaration<'a> { ProvideMissingTransactions(ProvideMissingTransactions<'a>), #[serde(borrow)] ProvideMissingTransactionsSuccess(ProvideMissingTransactionsSuccess<'a>), + #[serde(borrow)] + SubmitSolution(SubmitSolutionJd<'a>), } impl<'a> From> for roles_logic_sv2::parsers::JobDeclaration<'a> { @@ -390,6 +392,7 @@ impl<'a> From> for roles_logic_sv2::parsers::JobDeclaration<' JobDeclaration::ProvideMissingTransactionsSuccess(m) => { Self::ProvideMissingTransactionsSuccess(m) } + JobDeclaration::SubmitSolution(m) => Self::SubmitSolution(m), } } }