diff --git a/Makefile b/Makefile index e725d7c..7843c52 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ BIN_NAME = ingress-perf BIN_PATH = $(BIN_DIR)/$(BIN_NAME) SOURCES = $(shell find . -type f -name "*.go") CGO = 0 +VEGETA_VERSION := 12.11.0 .PHONY: build lint clean @@ -30,12 +31,14 @@ $(BIN_PATH): $(SOURCES) container-build: build @echo "Building the container image" $(CONTAINER_BUILD) -f containers/Containerfile \ + --build-arg VEGETA_VERSION=$(VEGETA_VERSION) \ -t $(CONTAINER_NS)/$(BIN_NAME) ./containers gha-build: @echo "Building Multi-architecture container Images" $(CONTAINER_BUILD) -f containers/Containerfile \ --platform=linux/amd64,linux/arm64,linux/ppc64le,linux/s390x \ + --build-arg VEGETA_VERSION=$(VEGETA_VERSION) -t $(CONTAINER_NS)/$(BIN_NAME) ./containers --manifest=$(CONTAINER_NS)/$(BIN_NAME):latest gha-push: gha-build diff --git a/README.md b/README.md index 965a6ff..557f042 100644 --- a/README.md +++ b/README.md @@ -8,25 +8,29 @@ OCP Ingress performance ultimate tool! Ingress-perf configuration is defined in a YAML file, holding an array of the following structure. [Examples directory](./config) -| Field Name | Type | Description | Default Value | -|------------------|------------------|----------------------------------------------------------------------------------------------------------|---------------| -| `termination` | `string` | Defines the type of benchmark termination. Allowed values are `http`, `edge`, `reencrypt` and `reencrypt`. | N/A | -| `connections` | `int` | Defines the number of connections per client. | `0` | -| `samples` | `int` | Defines the number of samples per scenario. | `0` | -| `duration` | `time.Duration` | Defines the duration of each sample. | `""` | -| `path` | `string` | Defines the scenario endpoint path, for example: `/1024.html`, `/2048.html`. | `""` | -| `concurrency` | `int32` | Defines the number of clients that will concurrently run the benchmark scenario. | `0` | -| `tool` | `string` | Defines the tool to run the benchmark scenario. | `""` | -| `serverReplicas` | `int32` | Defines the number of server (nginx) replicas backed by the routes. | `0` | -| `tuningPatch` | `string` | Defines a JSON merge tuning patch for the default `IngressController` object. | `""` | -| `delay` | `time.Duration` | Defines a delay between samples. | `0s` | -| `warmup` | `bool` | Enables warmup: indexing will be disabled in this scenario. | `false` | -| `requestTimeout` | `time.Duration` | Request timeout | `1s` | -| `procs ` | `int` | Number of processes to trigger in each of the client pods | `1` | +| Field Name | Type | Description | Default Value | Tools | +|------------------|------------------|---------------------------------------------------------------------------------------------|---------------|------------------| +| `termination` | `string` | Benchmark termination. Allowed values are `http`, `edge`, `reencrypt` and `reencrypt`. | N/A | `wrk`,`vegeta` | +| `connections` | `int` | Number of connections per client. | `0` | `wrk`,`vegeta` | +| `samples` | `int` | Number of samples per scenario. | `0` | `wrk`,`vegeta` | +| `duration` | `time.Duration` | Duration of each sample. | `""` | `wrk`,`vegeta` | +| `path` | `string` | Scenario endpoint path, for example: `/1024.html`, `/2048.html`. | `""` | `wrk`,`vegeta` | +| `concurrency` | `int32` | Number of clients that will concurrently run the benchmark scenario. | `0` | `wrk`,`vegeta` | +| `tool` | `string` | Tool to run the benchmark scenario. | `""` | `wrk`,`vegeta` | +| `serverReplicas` | `int32` | Number of server (nginx) replicas backed by the routes. | `0` | `wrk`,`vegeta` | +| `tuningPatch` | `string` | Defines a JSON merge tuning patch for the default `IngressController` object. | `""` | `wrk`,`vegeta` | +| `delay` | `time.Duration` | Delay between samples. | `0s` | `wrk`,`vegeta` | +| `warmup` | `bool` | Enables warmup: indexing will be disabled in this scenario. | `false` | `wrk`,`vegeta` | +| `requestTimeout` | `time.Duration` | Request timeout | `1s` | `wrk`,`vegeta` | +| `procs` | `int` | Number of processes to trigger in each of the client pods | `1` | `wrk`,`vegeta` | +| `threads` | `int` | Number of threads/workers per process. It only applies when not using fixed number of RPS | `#cores` | `vegeta` | +| `keepalive` | `bool` | Use HTTP keepalived connections | `true` | `vegeta` | +| `requestRate` | `int` | Number of requests per second | `0` (unlimited) | `vegeta`| ## Supported tools -- wrk: HTTP benchmarking tool. https://github.com/wg/wrk +- wrk: HTTP benchmarking tool. . amd64, arm64, ppc64le, s390x +- vegeta: It's over 9000!. . amd4 ## Running diff --git a/config/vegeta.yml b/config/vegeta.yml new file mode 100644 index 0000000..03caae4 --- /dev/null +++ b/config/vegeta.yml @@ -0,0 +1,14 @@ +# vim: expandtab shiftwidth=2 softtabstop=2 + +- termination: http + connections: 200 + samples: 1 + duration: 30s + path: /128.html + concurrency: 2 + tool: vegeta + serverReplicas: 9 + requestTimeout: 2s + warmup: false + keepalive: false + threads: 50 diff --git a/containers/Containerfile b/containers/Containerfile index f8d4724..5ef9c37 100644 --- a/containers/Containerfile +++ b/containers/Containerfile @@ -4,6 +4,8 @@ RUN git clone https://github.com/wg/wrk.git --depth=1 RUN cd wrk && make -j $(nproc) FROM registry.access.redhat.com/ubi8/ubi:latest +ARG VEGETA_VERSION RUN dnf install -y iproute procps-ng +RUN curl -sS -L https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz | tar xz -C /usr/bin/ COPY --from=builder /wrk/wrk /usr/bin/wrk COPY json.lua json.lua diff --git a/pkg/config/config.go b/pkg/config/config.go index 3ded5fa..49f9002 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -16,6 +16,7 @@ package config import ( "os" + "runtime" "time" yaml "gopkg.in/yaml.v3" @@ -28,6 +29,8 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { Warmup: false, // Disable warmup by default RequestTimeout: time.Second, Procs: 1, + Keepalive: true, + Threads: runtime.NumCPU(), // As many threads as the number of logical CPU cores } if err := unmarshal(&defaultCfg); err != nil { return err diff --git a/pkg/config/types.go b/pkg/config/types.go index c0a5d9c..2dd25b0 100644 --- a/pkg/config/types.go +++ b/pkg/config/types.go @@ -41,9 +41,15 @@ type Config struct { // Tuning defines a tuning patch for the default IngressController object Tuning string `yaml:"tuningPatch" json:"tuningPatch"` // Delay defines a delay between samples - Delay time.Duration `yaml:"delay"` + Delay time.Duration `yaml:"delay" json:"delay"` // Warmup enables warmup: Indexing will be disabled in this scenario. Default is false Warmup bool `yaml:"warmup" json:"-"` // RequestTimeout defines the tool request timeout - RequestTimeout time.Duration `yaml:"requestTimeout"` + RequestTimeout time.Duration `yaml:"requestTimeout" json:"requestTimeout"` + // RequestRate defines the amount of requests to run in parallel + RequestRate int `yaml:"requestRate" json:"requestRate"` + // Keepalive use keepalived connections + Keepalive bool `yaml:"keepalive" json:"keepalive"` + // Defines the number of threads + Threads int `yaml:"threads" json:"threads"` } diff --git a/pkg/runner/exec.go b/pkg/runner/exec.go index fb4bd4c..51d958e 100644 --- a/pkg/runner/exec.go +++ b/pkg/runner/exec.go @@ -85,6 +85,7 @@ func runBenchmark(cfg config.Config, clusterMetadata tools.ClusterMetadata) ([]t for _, pod := range clientPods { for i := 0; i < cfg.Procs; i++ { func(p corev1.Pod) { + log.Debugf("Running %v in client pods", tool.Cmd()) errGroup.Go(func() error { return exec(context.TODO(), tool, p, &result) }) diff --git a/pkg/runner/tools/types.go b/pkg/runner/tools/types.go index a2112f0..a0ac874 100644 --- a/pkg/runner/tools/types.go +++ b/pkg/runner/tools/types.go @@ -33,22 +33,23 @@ type Tool interface { } type PodResult struct { - Name string `json:"pod"` - Node string `json:"node"` - InstanceType string `json:"instanceType"` - AvgRps float64 `json:"rps"` - StdevRps float64 `json:"rps_stdev"` - StdevLatency float64 `json:"stdev_lat"` - AvgLatency float64 `json:"avg_lat_us"` - MaxLatency float64 `json:"max_lat_us"` - P90Latency int64 `json:"p90_lat_us"` - P95Latency int64 `json:"p95_lat_us"` - P99Latency int64 `json:"p99_lat_us"` - HTTPErrors int64 `json:"http_errors"` - ReadErrors int64 `json:"read_errors"` - WriteErrors int64 `json:"write_errors"` - Requests int64 `json:"requests"` - Timeouts int64 `json:"timeouts"` + Name string `json:"pod"` + Node string `json:"node"` + InstanceType string `json:"instanceType"` + AvgRps float64 `json:"rps"` + StdevRps float64 `json:"rps_stdev"` + StdevLatency float64 `json:"stdev_lat"` + AvgLatency float64 `json:"avg_lat_us"` + MaxLatency float64 `json:"max_lat_us"` + P90Latency int64 `json:"p90_lat_us"` + P95Latency int64 `json:"p95_lat_us"` + P99Latency int64 `json:"p99_lat_us"` + HTTPErrors int64 `json:"http_errors"` + ReadErrors int64 `json:"read_errors"` + WriteErrors int64 `json:"write_errors"` + Requests int64 `json:"requests"` + Timeouts int64 `json:"timeouts"` + StatusCodes map[int]int `json:"status_codes"` } type Result struct { @@ -71,5 +72,30 @@ type Result struct { Requests int64 `json:"requests"` Timeouts int64 `json:"timeouts"` Version string `json:"version"` + StatusCodes map[int]int `json:"status_codes"` ClusterMetadata } + +type VegetaResult struct { + Latencies struct { + Total float64 `json:"total"` + AvgLatency float64 `json:"mean"` + P50Latency float64 `json:"50th"` + P90Latency float64 `json:"90th"` + P95Latency float64 `json:"95th"` + P99Latency float64 `json:"99th"` + MaxLatency float64 `json:"max"` + MinLatency float64 `json:"min"` + } `json:"latencies"` + BytesIn struct { + Total float64 `json:"total"` + Mean float64 `json:"mean"` + } `json:"bytes_in"` + BytesOut struct { + Total float64 `json:"total"` + Mean float64 `json:"mean"` + } `json:"bytes_out"` + Requests int64 `json:"requests"` + Throughput float64 `json:"throughput"` + StatusCodes map[int]int `json:"status_codes"` +} diff --git a/pkg/runner/tools/vegeta.go b/pkg/runner/tools/vegeta.go new file mode 100644 index 0000000..6570a2d --- /dev/null +++ b/pkg/runner/tools/vegeta.go @@ -0,0 +1,106 @@ +// Copyright 2023 The ingress-perf Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "encoding/json" + "fmt" + + "github.com/cloud-bulldozer/ingress-perf/pkg/config" +) + +type vegeta struct { + cmd []string + res VegetaResult +} + +func init() { + toolMap["vegeta"] = Vegeta +} + +func Vegeta(cfg config.Config, ep string) Tool { + endpoint := fmt.Sprintf("echo GET %v", ep) + vegetaCmd := fmt.Sprintf("vegeta attack -insecure -max-connections=%d -duration=%v -timeout=%v -keepalive=%v", cfg.Connections, cfg.Duration, cfg.RequestTimeout, cfg.Keepalive) + if cfg.RequestRate > 0 { + vegetaCmd += fmt.Sprintf(" -rate %d", cfg.RequestRate) + } else { + vegetaCmd += fmt.Sprintf(" -rate=0 -workers=%d -max-workers=%d", cfg.Threads, cfg.Threads) + } + + newWrk := &vegeta{ + cmd: []string{"bash", "-c", fmt.Sprintf("%v | %v > /tmp/result; vegeta report -type json /tmp/result", endpoint, vegetaCmd)}, + res: VegetaResult{}, + } + return newWrk +} + +func (v *vegeta) Cmd() []string { + return v.cmd +} + +/* Example JSON output +{ + "latencies": { + "total": 1256079085, + "mean": 31401977, + "50th": 24082627, + "90th": 56335116, + "95th": 66540881, + "99th": 77088475, + "max": 77088475, + "min": 16256151 + }, + "bytes_in": { + "total": 29211360, + "mean": 730284 + }, + "bytes_out": { + "total": 0, + "mean": 0 + }, + "earliest": "2023-09-28T12:04:38.399615001+02:00", + "latest": "2023-09-28T12:04:42.300039403+02:00", + "end": "2023-09-28T12:04:42.364625089+02:00", + "duration": 3900424402, + "wait": 64585686, + "requests": 40, + "rate": 10.255294264770113, + "throughput": 10.088246716208607, + "success": 1, + "status_codes": { + "200": 40 + }, + "errors": [] +} +*/ + +func (v *vegeta) ParseResult(stdout, _ string) (PodResult, error) { + var podResult PodResult + err := json.Unmarshal([]byte(stdout), &v.res) + if err != nil { + return podResult, err + } + podResult = PodResult{ + AvgRps: v.res.Throughput, + AvgLatency: v.res.Latencies.AvgLatency / 1e3, + MaxLatency: v.res.Latencies.MaxLatency / 1e3, + P90Latency: int64(v.res.Latencies.P90Latency / 1e3), + P95Latency: int64(v.res.Latencies.P95Latency / 1e3), + P99Latency: int64(v.res.Latencies.P99Latency / 1e3), + Requests: v.res.Requests, + StatusCodes: v.res.StatusCodes, + } + return podResult, nil +}