Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Setup Initial Jenkins Git Structure #744

Merged
merged 38 commits into from
Jun 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
7e6f30e
Add in default jenkinsFile
lewijacn Jun 17, 2024
14ad3a9
Test 2
lewijacn Jun 17, 2024
2fea84a
Test 3
lewijacn Jun 17, 2024
dcf9096
Update agent
lewijacn Jun 17, 2024
a63765e
Load pipeline
lewijacn Jun 17, 2024
ab555d6
Load pipeline 2
lewijacn Jun 17, 2024
97dd360
Add shared library
lewijacn Jun 19, 2024
dd676fd
Update agent default pipeline
lewijacn Jun 19, 2024
b34da81
Shared library changes
lewijacn Jun 19, 2024
3a9d6fc
Update shared lib
lewijacn Jun 19, 2024
f5226c1
Test parameters
lewijacn Jun 19, 2024
6c5c2c4
Update vars
lewijacn Jun 19, 2024
1a98a1c
Update vars
lewijacn Jun 19, 2024
708df68
Test shared lib
lewijacn Jun 19, 2024
3f6b900
Test shared lib
lewijacn Jun 19, 2024
1abb6a4
Test shared lib
lewijacn Jun 19, 2024
24c0a37
Test shared lib
lewijacn Jun 19, 2024
63eae8a
Test shared lib
lewijacn Jun 19, 2024
81563a1
Test shared lib
lewijacn Jun 19, 2024
ff62416
Test shared lib
lewijacn Jun 19, 2024
3c7ad9d
Test shared lib
lewijacn Jun 19, 2024
ff1d9ac
Update pipeline files for main
lewijacn Jun 19, 2024
13865ac
Merge remote-tracking branch 'origin/main' into checkin-jenkinsfile
lewijacn Jun 19, 2024
958fe73
Update comments
lewijacn Jun 19, 2024
6d5a519
Update for testing
lewijacn Jun 19, 2024
94071fc
Update backfill test
lewijacn Jun 19, 2024
ca798d5
Common functions added to console library
lewijacn Jun 20, 2024
b08cdc1
Minor fixes
lewijacn Jun 20, 2024
32dd6f7
PR feedback part 1
lewijacn Jun 21, 2024
9b5993a
PR feedback part 2
lewijacn Jun 21, 2024
8a3013c
Merge remote-tracking branch 'origin/main' into checkin-jenkinsfile
lewijacn Jun 21, 2024
02d9551
Remove user string
lewijacn Jun 21, 2024
93563ed
Lint fixes
lewijacn Jun 21, 2024
fc62873
Add basic cli test cases
lewijacn Jun 21, 2024
14c4cee
Lint fixes
lewijacn Jun 21, 2024
cc11974
Merge remote-tracking branch 'origin/main' into checkin-jenkinsfile
lewijacn Jun 21, 2024
2ca87d0
PR feedback adjustments
lewijacn Jun 21, 2024
fd9ba8d
Merge remote-tracking branch 'origin/main' into checkin-jenkinsfile
lewijacn Jun 21, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ services:
# this is a convenience thing for testing -- it should be removed before this makes it to prod.
- ./lib/console_link:/root/lib/console_link
environment:
# Copy local AWS env variables to Docker container
# Copy local AWS env to Docker container
#- ~/.aws:/root/.aws
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,48 @@
click.echo(logic_clusters.cat_indices(ctx.env.target_cluster))


@cluster_group.command(name="connection-check")
@click.pass_obj
def connection_check_cmd(ctx):
"""Checks if a connection can be established to source and target clusters"""
click.echo("SOURCE CLUSTER")
click.echo(logic_clusters.connection_check(ctx.env.source_cluster))
click.echo("TARGET CLUSTER")
click.echo(logic_clusters.connection_check(ctx.env.target_cluster))


@cluster_group.command(name="run-test-benchmarks")
@click.pass_obj
def run_test_benchmarks_cmd(ctx):
"""Run a series of OpenSearch Benchmark workloads against the source cluster"""
click.echo(logic_clusters.run_test_benchmarks(ctx.env.source_cluster))


@cluster_group.command(name="clear-indices")
@click.option("--acknowledge-risk", is_flag=True, show_default=True, default=False,
help="Flag to acknowledge risk and skip confirmation")
@click.option('--cluster',
type=click.Choice(['source', 'target'], case_sensitive=False),
help="Cluster to perform clear indices action on",
required=True)
@click.pass_obj
def clear_indices_cmd(ctx, acknowledge_risk, cluster):
"""[Caution] Clear indices on a source or target cluster"""
cluster_focus = ctx.env.source_cluster if cluster.lower() == 'source' else ctx.env.target_cluster
if acknowledge_risk:
click.echo("Performing clear indices operation...")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

minor, but I'd also reiterate in these messages which cluster is being cleared.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I like it, added

click.echo(logic_clusters.clear_indices(cluster_focus))
else:
if click.confirm(f'Clearing indices WILL result in the loss of all data on the {cluster.lower()} cluster. '
f'Are you sure you want to continue?'):
click.echo(f"Performing clear indices operation on {cluster.lower()} cluster...")
click.echo(logic_clusters.clear_indices(cluster_focus))

Check warning on line 116 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/cli.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/cli.py#L115-L116

Added lines #L115 - L116 were not covered by tests
else:
click.echo("Aborting command.")

# ##################### REPLAYER ###################


@cli.group(name="replayer")
@click.pass_obj
def replayer_group(ctx):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,53 @@
from console_link.models.cluster import Cluster
from console_link.models.cluster import Cluster, HttpMethod
from dataclasses import dataclass
import logging

logger = logging.getLogger(__name__)


@dataclass
class ConnectionResult:
connection_message: str
connection_established: bool
cluster_version: str


def cat_indices(cluster: Cluster, as_json=False):
as_json_suffix = "?format=json" if as_json else ""
as_json_suffix = "?format=json" if as_json else "?v"
cat_indices_path = f"/_cat/indices{as_json_suffix}"
r = cluster.call_api(cat_indices_path)
return r.json() if as_json else r.content


def connection_check(cluster: Cluster) -> ConnectionResult:
cluster_details_path = "/"
caught_exception = None
r = None
try:
r = cluster.call_api(cluster_details_path, timeout=3)
except Exception as e:
caught_exception = e
logging.debug(f"Unable to access cluster: {cluster} with exception: {e}")
if caught_exception is None:
response_json = r.json()
return ConnectionResult(connection_message="Successfully connected!",

Check warning on line 33 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L23-L33

Added lines #L23 - L33 were not covered by tests
connection_established=True,
cluster_version=response_json['version']['number'])
else:
return ConnectionResult(connection_message=f"Unable to connect to cluster with error: {caught_exception}",

Check warning on line 37 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L37

Added line #L37 was not covered by tests
connection_established=False,
cluster_version=None)


def run_test_benchmarks(cluster: Cluster):
cluster.execute_benchmark_workload(workload="geonames")
cluster.execute_benchmark_workload(workload="http_logs")
cluster.execute_benchmark_workload(workload="nested")
cluster.execute_benchmark_workload(workload="nyc_taxis")

Check warning on line 46 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L43-L46

Added lines #L43 - L46 were not covered by tests


# As a default we exclude system indices and searchguard indices
def clear_indices(cluster: Cluster):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this used anywhere? It's definitely very helpful for our testing, but I think we should put it behind a verification step where the user explicitly acknowledges that they're going to delete all the data in their cluster.

(helper for that https://click.palletsprojects.com/en/8.1.x/prompts/#confirmation-prompts)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

potentially even do something like only create a CLI command for it (or at least only enable it) if there's an env variable like DEBUG_MODE or I_WANT_TO_DO_DANGEROUS_THINGS_TO_MY_CLUSTER set

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Went ahead and added this as a CLI option, with a confirmation step added to it, thanks for the link

clear_indices_path = "/*,-.*,-searchguard*,-sg7*"
r = cluster.call_api(clear_indices_path, method=HttpMethod.DELETE)
return r.content

Check warning on line 53 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py#L51-L53

Added lines #L51 - L53 were not covered by tests
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from requests.auth import HTTPBasicAuth
from cerberus import Validator
import logging
import subprocess
from console_link.models.schema_tools import contains_one_of

requests.packages.urllib3.disable_warnings() # ignore: type
Expand Down Expand Up @@ -84,7 +85,7 @@
elif 'sigv4' in config:
self.auth_type = AuthMethod.SIGV4

def call_api(self, path, method: HttpMethod = HttpMethod.GET) -> requests.Response:
def call_api(self, path, method: HttpMethod = HttpMethod.GET, timeout=None) -> requests.Response:
"""
Calls an API on the cluster.
"""
Expand All @@ -105,7 +106,30 @@
f"{self.endpoint}{path}",
verify=(not self.allow_insecure),
auth=auth,
timeout=timeout
)
logger.debug(f"Cluster API call request: {r.request}")
r.raise_for_status()
return r

def execute_benchmark_workload(self, workload: str,
workload_params='target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,'
'search_clients:1'):
client_options = ""
if not self.allow_insecure:
client_options += "use_ssl:true,verify_certs:false"
if self.auth_type == AuthMethod.BASIC_AUTH:
if self.auth_details['password'] is not None:
client_options += (f"basic_auth_user:{self.auth_details['username']},"

Check warning on line 123 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L118-L123

Added lines #L118 - L123 were not covered by tests
f"basic_auth_password:{self.auth_details['password']}")
else:
raise NotImplementedError(f"Auth type {self.auth_type} with AWS Secret ARN is not currently support "

Check warning on line 126 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L126

Added line #L126 was not covered by tests
f"for executing benchmark workloads")
elif self.auth_type == AuthMethod.SIGV4:
raise NotImplementedError(f"Auth type {self.auth_type} is not currently support for executing "

Check warning on line 129 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L128-L129

Added lines #L128 - L129 were not covered by tests
f"benchmark workloads")
logger.info(f"Running opensearch-benchmark with '{workload}' workload")
subprocess.run(f"opensearch-benchmark execute-test --distribution-version=1.0.0 "

Check warning on line 132 in TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py

View check run for this annotation

Codecov / codecov/patch

TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/cluster.py#L131-L132

Added lines #L131 - L132 were not covered by tests
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do the benchmark outputs print to stdout or are they captured here? Kind of nice to show the user that something's happening, so I think I'm moderately in favor of not-capturing, but don't feel super strongly.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right now they are printing to stdout which I kinda like as well. I'm open to change that though if we decide we don't like it

f"--target-host={self.endpoint} --workload={workload} --pipeline=benchmark-only --test-mode "
f"--kill-running-processes --workload-params={workload_params} "
f"--client-options={client_options}", shell=True)
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,44 @@ def test_cli_cluster_cat_indices(runner, env, mocker):
mock.assert_called()


def test_cli_cluster_connection_check(runner, env, mocker):
mock = mocker.patch('console_link.logic.clusters.connection_check')
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'connection-check'],
catch_exceptions=True)
# Should have been called two times.
assert result.exit_code == 0
assert 'SOURCE CLUSTER' in result.output
assert 'TARGET CLUSTER' in result.output
mock.assert_called()


def test_cli_cluster_run_test_benchmarks(runner, env, mocker):
mock = mocker.patch('console_link.logic.clusters.run_test_benchmarks')
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'run-test-benchmarks'],
catch_exceptions=True)
mock.assert_called_once()
assert result.exit_code == 0


def test_cli_cluster_clear_indices(runner, env, mocker):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you do a version of this test without --acknowledge-risk to make sure that it doesn't call clear?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added 👍

mock = mocker.patch('console_link.logic.clusters.clear_indices')
result = runner.invoke(cli,
['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'clear-indices',
'--cluster', 'source', '--acknowledge-risk'],
catch_exceptions=True)
mock.assert_called_once()
assert result.exit_code == 0


def test_cli_cluster_clear_indices_no_acknowledge(runner, env, mocker):
mock = mocker.patch('console_link.logic.clusters.clear_indices')
runner.invoke(cli,
['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'clear-indices',
'--cluster', 'source'],
catch_exceptions=True)
assert not mock.called


def test_cli_with_metrics_get_data(runner, env, mocker):
mock = mocker.patch('console_link.models.metrics_source.PrometheusMetricsSource.get_metrics')
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'metrics', 'list'],
Expand All @@ -69,10 +107,10 @@ def test_cli_with_backfill_describe(runner, env, mocker):

def test_cli_snapshot_create(runner, env, mocker):
mock = mocker.patch('console_link.logic.snapshot.create')

# Set the mock return value
mock.return_value = SnapshotStatus.COMPLETED, "Snapshot created successfully."

# Test snapshot creation
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'snapshot', 'create'],
catch_exceptions=True)
Expand All @@ -87,16 +125,16 @@ def test_cli_snapshot_create(runner, env, mocker):
@pytest.mark.skip(reason="Not implemented yet")
def test_cli_snapshot_status(runner, env, mocker):
mock = mocker.patch('console_link.logic.snapshot.status')

# Set the mock return value
mock.return_value = SnapshotStatus.COMPLETED, "Snapshot status: COMPLETED"

# Test snapshot status
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'snapshot', 'status'],
catch_exceptions=True)
assert result.exit_code == 0
assert "Snapshot status: COMPLETED" in result.output

# Ensure the mocks were called
mock.assert_called_once()

Expand Down Expand Up @@ -124,7 +162,7 @@ def test_cli_cat_indices_e2e(runner, env):
text=target_cat_indices)
result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'cat-indices'],
catch_exceptions=True)

assert result.exit_code == 0
assert 'SOURCE CLUSTER' in result.output
assert 'TARGET CLUSTER' in result.output
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ export class MigrationConsoleStack extends MigrationServiceCore {
let servicePortMappings: PortMapping[]|undefined
let serviceDiscoveryPort: number|undefined
let serviceDiscoveryEnabled = false
let imageCommand: string[]|undefined
let imageCommand = ['/bin/sh', '-c', '/root/loadServicesFromParameterStore.sh']

const osClusterEndpoint = getMigrationStringParameterValue(this, {
...props,
Expand Down
70 changes: 70 additions & 0 deletions jenkins/migrationIntegPipelines/ec2SourceE2EPipeline.groovy
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
// Note:
// 1. We are using an existing common VPC that we provide through a 'vpcId' parameter on the pipeline for now until we move
// to a proper Jenkins accounts and can create a setup without public subnets as well as request an extension to allow more than 5 VPCs per region
// 2. There is a still a manual step needed on the EC2 source load balancer to replace its security group rule which allows all traffic (0.0.0.0/0) to
// allow traffic for the relevant service security group. This needs a better story around accepting user security groups in our Migration CDK.

def sourceContextId = 'source-single-node-ec2'
def migrationContextId = 'migration-default'
def gitUrl = 'https://github.com/opensearch-project/opensearch-migrations.git'
def gitBranch = 'main'
def stageId = 'aws-integ'
def source_cdk_context = """
{
"source-single-node-ec2": {
"suffix": "ec2-source-<STAGE>",
"networkStackSuffix": "ec2-source-<STAGE>",
"vpcId": "$vpcId",
"distVersion": "7.10.2",
"cidr": "12.0.0.0/16",
"distributionUrl": "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.10.2-linux-x86_64.tar.gz",
"captureProxyEnabled": true,
"securityDisabled": true,
"minDistribution": false,
"cpuArch": "x64",
"isInternal": true,
"singleNodeCluster": true,
"networkAvailabilityZones": 2,
"dataNodeCount": 1,
"managerNodeCount": 0,
"serverAccessType": "ipv4",
"restrictServerAccessTo": "0.0.0.0/0"
}
}
"""
def migration_cdk_context = """
{
"migration-default": {
"stage": "<STAGE>",
"vpcId": "$vpcId",
"engineVersion": "OS_2.11",
"domainName": "os-cluster-<STAGE>",
"dataNodeCount": 2,
"openAccessPolicyEnabled": true,
"domainRemovalPolicy": "DESTROY",
"artifactBucketRemovalPolicy": "DESTROY",
"trafficReplayerExtraArgs": "--speedup-factor 10.0",
"fetchMigrationEnabled": true,
"reindexFromSnapshotServiceEnabled": true,
"sourceClusterEndpoint": "<SOURCE_CLUSTER_ENDPOINT>",
"dpPipelineTemplatePath": "../../../test/dp_pipeline_aws_integ.yaml",
"migrationConsoleEnableOSI": true,
"migrationAPIEnabled": true
}
}
"""

@Library("migrations-shared-lib@main")_

defaultIntegPipeline(
sourceContext: source_cdk_context,
migrationContext: migration_cdk_context,
sourceContextId: sourceContextId,
migrationContextId: migrationContextId,
gitUrl: gitUrl,
gitBranch: gitBranch,
stageId: stageId
//deployStep: {
// echo 'Custom Test Step'
//}
Comment on lines +67 to +69
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

intentional?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was trying to use it as a reference that steps could be replaced if needed

)
66 changes: 66 additions & 0 deletions jenkins/migrationIntegPipelines/rfsBackfillE2EPipeline.groovy
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
// Note:
// 1. We are using an existing common VPC that we provide through a 'vpcId' parameter on the pipeline for now until we move
// to a proper Jenkins accounts and can create a setup without public subnets as well as request an extension to allow more than 5 VPCs per region
// 2. There is a still a manual step needed on the EC2 source load balancer to replace its security group rule which allows all traffic (0.0.0.0/0) to
// allow traffic for the relevant service security group. This needs a better story around accepting user security groups in our Migration CDK.

def sourceContextId = 'source-single-node-ec2'
def migrationContextId = 'migration-rfs'
def gitUrl = 'https://github.com/opensearch-project/opensearch-migrations.git'
def gitBranch = 'main'
def stageId = 'rfs-integ'
def source_cdk_context = """
{
"source-single-node-ec2": {
"suffix": "ec2-source-<STAGE>",
"networkStackSuffix": "ec2-source-<STAGE>",
"vpcId": "$vpcId",
"distVersion": "7.10.2",
"distributionUrl": "https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-7.10.2-linux-x86_64.tar.gz",
"captureProxyEnabled": false,
"securityDisabled": true,
"minDistribution": false,
"cpuArch": "x64",
"isInternal": true,
"singleNodeCluster": true,
"networkAvailabilityZones": 2,
"dataNodeCount": 1,
"managerNodeCount": 0,
"serverAccessType": "ipv4",
"restrictServerAccessTo": "0.0.0.0/0"
}
}
"""
def migration_cdk_context = """
{
"migration-rfs": {
"stage": "<STAGE>",
"vpcId": "$vpcId",
"engineVersion": "OS_2.11",
"domainName": "os-cluster-<STAGE>",
"dataNodeCount": 2,
"openAccessPolicyEnabled": true,
"domainRemovalPolicy": "DESTROY",
"artifactBucketRemovalPolicy": "DESTROY",
"kafkaBrokerServiceEnabled": true,
"trafficReplayerServiceEnabled": false,
"reindexFromSnapshotServiceEnabled": true,
"sourceClusterEndpoint": "<SOURCE_CLUSTER_ENDPOINT>"
}
}
"""

@Library("migrations-shared-lib@main")_

defaultIntegPipeline(
sourceContext: source_cdk_context,
migrationContext: migration_cdk_context,
sourceContextId: sourceContextId,
migrationContextId: migrationContextId,
gitUrl: gitUrl,
gitBranch: gitBranch,
stageId: stageId,
finishStep: {
echo 'Skipping step for RFS'
}
)
Loading
Loading