Skip to content

Commit

Permalink
ci: add selfhost muliti-machine integration test (#3283)
Browse files Browse the repository at this point in the history
  • Loading branch information
lqy222 authored and dl239 committed Sep 19, 2023
1 parent bafee9c commit 1cb6184
Show file tree
Hide file tree
Showing 12 changed files with 575 additions and 17 deletions.
396 changes: 396 additions & 0 deletions .github/workflows/selfhost_intergration.yml

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions cases/integration_test/ddl/test_options.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ cases:
-
id: 22
desc: test-case
mode: standalone-unsupport
mode: standalone-unsupport,disk-unsupport
inputs:
-
columns : ["c1 string","c2 smallint","c3 int","c4 bigint","c5 float","c6 double","c7 timestamp","c8 date","c9 bool"]
Expand Down Expand Up @@ -397,7 +397,7 @@ cases:
-
id: 24
desc: 没有partitionnum和replicanum,指定distribution
mode: standalone-unsupport
mode: standalone-unsupport,disk-unsupport
inputs:
- name: t3
sql: |
Expand Down
45 changes: 39 additions & 6 deletions cases/integration_test/expression/test_condition.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,12 @@ cases:
sql: |
select col1,ifnull(col2,"abc") as e1 from {0};
expect:
success: false
columns: ["col1 int", "e1 string"]
order: col1
rows:
- [1, '0']
- [2, 'abc']
- [3, '1']
- id: 10
desc: IFNULL-表达式
sqlDialect: ["HybridSQL"]
Expand Down Expand Up @@ -285,7 +290,12 @@ cases:
sql: |
select col1,ifnull(col2 /0 ,100) as e3 from {0};
expect:
success: false
columns: ["col1 int", "e3 double"]
order: col1
rows:
- [1, 100]
- [2, 100]
- [3, 100]
- id: 11-2
mode: cli-unsupport
desc: NVL is synonyms to ifnull
Expand Down Expand Up @@ -317,7 +327,12 @@ cases:
sql: |
select col1,nvl(col2 /0 ,100) as e3 from {0};
expect:
success: false
columns: ["col1 int", "e3 double"]
order: col1
rows:
- [1, 100]
- [2, 100]
- [3, 100]
- id: 12
desc: IFNULL-兼容类型
sqlDialect: ["HybridSQL"]
Expand All @@ -331,7 +346,13 @@ cases:
sql: |
select col1,ifnull(col2,100) as e1 from {0};
expect:
success: false
columns: ["col1 int", "e1 bigint"]
order: col1
rows:
- [1, 0]
- [2, 100]
- [3, 1]

- id: 13
desc: IFNULL-浮点型
sqlDialect: ["HybridSQL"]
Expand All @@ -345,7 +366,13 @@ cases:
sql: |
select col1,ifnull(col2,1.1) as e2 from {0};
expect:
success: false
columns: ["col1 int", "e2 double"]
order: col1
rows:
- [1, 0]
- [2, 1.1]
- [3, 1]


- id: NVL2-1
desc: NVL2
Expand Down Expand Up @@ -378,7 +405,13 @@ cases:
sql: |
select col1,nvl2(col2, "abc", col1 + 1) as e1 from {0};
expect:
success: false
columns: ["col1 int", "e1 string"]
order: col1
rows:
- [1, 'abc']
- [2, '3']
- [3, 'abc']


- id: NVL2-3
desc: NVL2, sub expression
Expand Down
2 changes: 1 addition & 1 deletion cases/integration_test/expression/test_like.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ cases:
columns : ["id bigint","c1 string","c7 timestamp"]
indexs: ["index1:id:c7"]
rows:
- [1,'\\\%a_b',1590738990000]
- [1,'\\%a_b',1590738990000]
- [2,'\\\aabb',1590738991000]
- [3,"_a%_b",1590738992000]
- [4,"ba_c",1590738993000]
Expand Down
2 changes: 2 additions & 0 deletions cases/integration_test/long_window/test_long_window.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,7 @@ cases:
id: 10
version: 0.6.1
desc: delete pk
mode: cluster-unsupport
longWindow: w1:2s
inputs:
-
Expand Down Expand Up @@ -359,6 +360,7 @@ cases:
id: 11
version: 0.6.1
desc: delete 组合索引
mode: cluster-unsupport
longWindow: w1:2s
inputs:
-
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<openmldb.batch.version>0.7.0</openmldb.batch.version>
<openmldb.native.version>0.7.0-allinone</openmldb.native.version>
<openmldb.batch.version>0.7.0-SNAPSHOT</openmldb.batch.version>
<openmldb.native.version>0.7.0-SNAPSHOT</openmldb.native.version>
<spark.version>2.2.0</spark.version>
<suiteXmlFile>test_suite/test_tmp.xml</suiteXmlFile>
<aspectj.version>1.8.9</aspectj.version>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ public class OpenMLDBConfig {
}

public static boolean isCluster() {
return OpenMLDBGlobalVar.env.equals("cluster");

return OpenMLDBGlobalVar.env.equals("cluster")||OpenMLDBGlobalVar.env.equals("deploy");
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ public void beforeTest(@Optional("qa") String env,@Optional("main") String versi
openMLDBDeploy.setCluster(false);
OpenMLDBGlobalVar.mainInfo = openMLDBDeploy.deployCluster(2, 3);
}else if(env.equalsIgnoreCase("deploy")){
OpenMLDBGlobalVar.mainInfo = YamlUtil.getObject("out/openmldb_info.yaml",OpenMLDBInfo.class);
OpenMLDBGlobalVar.mainInfo = YamlUtil.getObject(Tool.openMLDBDir().getAbsolutePath()+"/out/openmldb_info.yaml",OpenMLDBInfo.class);
} else if(env.equalsIgnoreCase("yarn")) {
OpenMLDBDeploy openMLDBDeploy = new OpenMLDBDeploy(version);
openMLDBDeploy.setOpenMLDBPath(openMLDBPath);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
public class TestExternalFunction extends OpenMLDBTest {

@Story("ExternalFunction")
@Test(enabled = true)
@Test(enabled = false)
public void testFunctionMethods() {
Statement statement = executor.getStatement();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>

<openmldb.jdbc.version>0.6.4</openmldb.jdbc.version>
<openmldb.jdbc.version>0.7.0-SNAPSHOT</openmldb.jdbc.version>
<openmldb.navtive.version>0.7.0-SNAPSHOT</openmldb.navtive.version>
</properties>

Expand Down
126 changes: 126 additions & 0 deletions test/steps/format_config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
#! /usr/bin/env bash

#set DeployDir
rootPath="$1"
jobName="$2"
portFrom="$3"
portTo="$4"
Type="$5"
Dependency="$6"
version=$(grep 'OPENMLDB_VERSION' "$rootPath"/conf/openmldb-env.sh | awk -F= '{print $2}')
curTime=$(date "+%m%d%H%M")
dirName="${jobName}-${version}-${curTime}"

#set Deploy Host and Ports
Hosts=(node-3 node-4 node-1)

AvaNode1Ports=$(ssh "${Hosts[0]}" "comm -23 <(seq $portFrom $portTo | sort) <(sudo ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 8")
AvaNode2Ports=$(ssh "${Hosts[1]}" "comm -23 <(seq $portFrom $portTo | sort) <(sudo ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 2")
AvaNode3Ports=$(ssh "${Hosts[2]}" "comm -23 <(seq $portFrom $portTo | sort) <(sudo ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 1")


taskmanagerHost=$(hostname | awk -F"." '{print $1}' )

taskmanagerPort=$(ssh "${taskmanagerHost}" "comm -23 <(seq $portFrom $portTo | sort) <(sudo ss -Htan | awk '{print $4}' | cut -d':' -f2 | sort -u) | shuf | head -n 1")


tablet1Port=$(echo "$AvaNode1Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $1}')
tablet2Port=$(echo "$AvaNode2Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $1}')
tablet3Port=$(echo "$AvaNode3Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $1}')
ns1Port=$(echo "$AvaNode1Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $2}')
ns2Port=$(echo "$AvaNode2Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $2}')
apiserverPort=$(echo "$AvaNode1Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $4}')
#taskmanagerPort=$(echo $AvaNode1Ports | awk '{print $5}')
zookeeperPort1=$(echo "$AvaNode1Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $6}')
zookeeperPort2=$(echo "$AvaNode1Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $7}')
zookeeperPort3=$(echo "$AvaNode1Ports" | awk 'BEGIN{ RS="";FS="\n"}{print $8}')

# write addr to hosts
cat >"$rootPath"/conf/hosts<<EOF
[tablet]
${Hosts[0]}:$tablet1Port /tmp/$dirName/tablet
${Hosts[1]}:$tablet2Port /tmp/$dirName/tablet
${Hosts[2]}:$tablet3Port /tmp/$dirName/tablet
[nameserver]
${Hosts[0]}:$ns1Port /tmp/$dirName/ns
${Hosts[1]}:$ns2Port /tmp/$dirName/ns
[apiserver]
${Hosts[0]}:$apiserverPort /tmp/$dirName/apiserver
[taskmanager]
${taskmanagerHost}:$taskmanagerPort /tmp/$dirName/taskmanager
[zookeeper]
${Hosts[0]}:$zookeeperPort1:$zookeeperPort2:$zookeeperPort3 /tmp/$dirName/zk
EOF

#write openmldb.env.sh
cat >"$rootPath"/conf/openmldb-env.sh<<EOF
export OPENMLDB_VERSION=$version
export OPENMLDB_MODE=\${OPENMLDB_MODE:=cluster}
export OPENMLDB_USE_EXISTING_ZK_CLUSTER=false
export OPENMLDB_ZK_HOME=
export OPENMLDB_ZK_CLUSTER=
export OPENMLDB_ZK_ROOT_PATH=/openmldb-$dirName
export OPENMLDB_HOME=
export SPARK_HOME=/tmp/spark/spark-$dirName
export CLEAR_OPENMLDB_INSTALL_DIR=true
EOF

if [ "$Type" = "java" ]; then
mkdir -p out
touch out/openmldb_info.yaml
cat >out/openmldb_info.yaml<<EOF
deployType: CLUSTER
zk_cluster: "${Hosts[0]}:$zookeeperPort1"
zk_root_path: "/openmldb-$dirName"
basePath: "$rootPath/tmp"
openMLDBPath: "/tmp/$dirName/tablet/bin/openmldb"
apiServerEndpoints:
- "${Hosts[0]}:$apiserverPort"
tabletEndpoints:
- "${Hosts[0]}:$tablet1Port"
- "${Hosts[1]}:$tablet2Port"
- "${Hosts[2]}:$tablet3Port"
EOF
fi


if [ "$Dependency" = "ssd" ]; then
mkdir -p /mnt/nvmessd/qytest/"$dirName"
cat >>"$rootPath"/conf/tablet.flags.template<<EOF
--ssd_root_path=/mnt/nvmessd/selfintegration/$dirName/db
--recycle_bin_ssd_root_path=/mnt/nvmessd/selfintegration/$dirName/recycle_ssd
EOF
# comment node-1 tablet , no ssd existed in node-1
sed -i "s/.*node-1.*/#&/g" out/openmldb_info.yaml
sed -i "s/.*node-1.*/#&/g" "$rootPath"/conf/hosts
fi

if [ "$Dependency" = "hadoop" ]; then
cat >"$rootPath"/conf/taskmanager.properties<<EOF
server.host=${Hosts[0]}
zookeeper.cluster=${Hosts[0]}:$zookeeperPort1
zookeeper.root_path=/openmldb-$dirName
server.port=$taskmanagerPort
job.log.path=./logs/
spark.home=/tmp/spark/spark-$dirName
spark.master=yarn-client
offline.data.prefix=hdfs://node-1/openmldb_integration_test/
spark.default.conf=spark.hadoop.yarn.timeline-service.enabled=false
hadoop.conf.dir=/mnt/hdd0/denglong/openmldb_runner_worker/hadoop
hadoop.user.name=root
external.function.dir=/tmp/
EOF
fi



# if [ "$Dependency" = "kafka" ]; then
# install kafak& deploy connector with kafka address and openmldb address
# cat >$rootPath/test/integration-test/openmldb-test-java/openmldb-ecosystem/src/test/resources/kafka_test_cases.ymls<<EOF
# // "bootstrap.servers": node-4:49092,
# // "connect.listeners": http://:8083,
# // apiserver.address: ${Hosts[0]}:$apiserverPort
# // "connection.url": "jdbc:openmldb:///kafka_test?zk=127.0.0.1:2181&zkPath=/openmldb"
# // zk_root_path: "/openmldb-$dirName"
# EOF
# fi
4 changes: 2 additions & 2 deletions test/test-tool/openmldb-deploy/cases/test_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,13 @@ def test_upgrade(self, replica_num, partition_num):
key_num = 50000
for i in range(key_num):
key = "key" + str(i)
self.cursor.execute(f"insert into {table_name} values (\'{key}\', \'col2\')");
self.cursor.execute(f"insert into {table_name} values (\'{key}\', \'col2\');")
result = self.cursor.execute(f"select * from {table_name}")
data = result.fetchall()
assert len(data) == key_num

status, cnt = self.get_leader_cnt("test", table_name, case_conf.conf["components"]["tablet"][0])
assert status.OK() and cnt > 0
assert status.OK()
status, unalive_cnt = self.get_unalive_cnt("test", table_name)
assert status.OK() and unalive_cnt == 0

Expand Down

0 comments on commit 1cb6184

Please sign in to comment.