From 33c4a438fee3fe4f0992d08533d792523c5d1d13 Mon Sep 17 00:00:00 2001 From: Gaius Date: Wed, 12 Jan 2022 13:58:20 +0800 Subject: [PATCH] Refactor scheduler service (#958) Signed-off-by: Gaius --- cdn/supervisor/task/manager.go | 10 +- client/config/dynconfig_test.go | 10 +- cmd/dependency/dependency.go | 9 - cmd/scheduler/cmd/root.go | 15 +- cmd/scheduler/main.go | 1 - docs/en/deployment/configuration/cdn.yaml | 2 - docs/en/deployment/configuration/manager.yaml | 4 - .../deployment/configuration/scheduler.yaml | 131 +- docs/zh-CN/deployment/configuration/cdn.yaml | 2 - .../deployment/configuration/manager.yaml | 5 +- .../deployment/configuration/scheduler.yaml | 164 ++- go.mod | 8 +- go.sum | 11 +- internal/dflog/logger.go | 16 +- internal/job/queue_test.go | 4 +- manager/job/preheat.go | 4 +- pkg/container/list/sorted_list_test.go | 18 +- pkg/container/list/sorted_unique_list_test.go | 18 +- pkg/container/set/safe_set_test.go | 16 +- pkg/container/set/set_test.go | 14 +- pkg/dfpath/dfpath_test.go | 8 +- pkg/dfpath/mocks/dfpath_mock.go | 146 +++ pkg/gc/gc.go | 4 +- .../supervisor/mocks => pkg/gc}/gc_mock.go | 9 +- pkg/gc/gc_test.go | 40 +- pkg/gc/{mocks => }/logger_mock.go | 6 +- pkg/gc/{mocks => }/runner_mock.go | 6 +- pkg/reachable/reachable_test.go | 2 +- pkg/rpc/base/base.pb.go | 7 +- pkg/rpc/base/common/common.go | 2 +- pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go | 420 +++++++ pkg/rpc/cdnsystem/server/mocks/server_mock.go | 66 + .../dfdaemon/cdnsystem/mocks/client_mock.go | 106 ++ pkg/rpc/dfdaemon/client/mocks/client_mock.go | 113 ++ pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go | 456 +++++++ pkg/rpc/dfdaemon/server/mocks/server_mock.go | 80 ++ .../rpc/manager/client/mocks/client_mock.go | 2 +- pkg/rpc/manager/mocks/manager_mock.go | 589 +++++++++ pkg/rpc/manager/server/mocks/server_mock.go | 125 ++ pkg/rpc/scheduler/client/mocks/client_mock.go | 143 +++ .../client/mocks/peer_packet_stream_mock.go | 64 + pkg/rpc/scheduler/mocks/scheduler_mock.go | 519 ++++++++ pkg/rpc/scheduler/scheduler.go | 2 +- pkg/rpc/scheduler/server/mocks/server_mock.go | 93 ++ pkg/util/structutils/struct_utils_test.go | 4 +- scheduler/config/config.go | 362 ++++-- scheduler/config/config_test.go | 159 +-- scheduler/config/constants_otel.go | 62 - scheduler/config/dynconfig.go | 86 +- scheduler/config/dynconfig_test.go | 114 +- scheduler/config/mocks/dyncofig_mock.go | 197 +++ scheduler/config/testdata/scheduler.yaml | 62 +- .../core/evaluator/evaluator_base_test.go | 667 ---------- scheduler/core/events.go | 438 ------- scheduler/core/monitor.go | 126 -- .../core/scheduler/basic/basic_scheduler.go | 269 ---- scheduler/core/scheduler/scheduler.go | 70 -- scheduler/core/service.go | 366 ------ scheduler/core/worker.go | 101 -- scheduler/job/job.go | 55 +- scheduler/metrics/metrics.go | 4 +- scheduler/resource/cdn.go | 295 +++++ scheduler/resource/cdn_mock.go | 173 +++ scheduler/resource/cdn_test.go | 404 ++++++ scheduler/resource/host.go | 171 +++ scheduler/resource/host_manager.go | 69 ++ scheduler/resource/host_manager_mock.go | 88 ++ scheduler/resource/host_manager_test.go | 199 +++ scheduler/resource/host_test.go | 401 ++++++ scheduler/resource/peer.go | 421 +++++++ scheduler/resource/peer_manager.go | 153 +++ scheduler/resource/peer_manager_mock.go | 102 ++ scheduler/resource/peer_manager_test.go | 411 ++++++ scheduler/resource/peer_test.go | 1002 +++++++++++++++ scheduler/resource/resource.go | 98 ++ scheduler/resource/resource_mock.go | 90 ++ scheduler/resource/resource_test.go | 122 ++ scheduler/resource/task.go | 232 ++++ scheduler/resource/task_manager.go | 114 ++ scheduler/resource/task_manager_mock.go | 102 ++ scheduler/resource/task_manager_test.go | 376 ++++++ scheduler/resource/task_test.go | 688 ++++++++++ scheduler/rpcserver/rpcserver.go | 228 ++-- scheduler/rpcserver/rpcserver_test.go | 622 ++++++++++ scheduler/scheduler.go | 127 +- .../evaluator/evaluator.go | 9 +- .../evaluator/evaluator_base.go | 130 +- .../evaluator/evaluator_base_test.go | 700 +++++++++++ .../evaluator/evaluator_test.go | 2 +- .../{core => scheduler}/evaluator/plugin.go | 0 .../evaluator/plugin_test.go | 2 +- .../evaluator/testdata/main.go | 13 +- .../evaluator/testdata/plugin/evaluator.go | 10 +- scheduler/scheduler/mocks/scheduler_mock.go | 67 + scheduler/scheduler/scheduler.go | 177 +++ scheduler/scheduler/scheduler_test.go | 418 +++++++ scheduler/service/callback.go | 293 +++++ scheduler/service/callback_test.go | 1102 +++++++++++++++++ scheduler/service/mocks/callback_mock.go | 158 +++ scheduler/service/mocks/service_mock.go | 162 +++ scheduler/service/service.go | 232 ++++ scheduler/service/service_test.go | 773 ++++++++++++ scheduler/supervisor/cdn.go | 378 ------ scheduler/supervisor/cdn_test.go | 521 -------- scheduler/supervisor/host.go | 187 --- scheduler/supervisor/host_test.go | 222 ---- scheduler/supervisor/mocks/cdn_mock.go | 135 -- scheduler/supervisor/mocks/host_mock.go | 74 -- scheduler/supervisor/mocks/peer_mock.go | 103 -- scheduler/supervisor/mocks/task_mock.go | 89 -- scheduler/supervisor/peer.go | 631 ---------- scheduler/supervisor/peer_test.go | 613 --------- scheduler/supervisor/task.go | 390 ------ scheduler/supervisor/task_test.go | 569 --------- 114 files changed, 14402 insertions(+), 7058 deletions(-) create mode 100644 pkg/dfpath/mocks/dfpath_mock.go rename {scheduler/supervisor/mocks => pkg/gc}/gc_mock.go (92%) rename pkg/gc/{mocks => }/logger_mock.go (95%) rename pkg/gc/{mocks => }/runner_mock.go (92%) create mode 100644 pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go create mode 100644 pkg/rpc/cdnsystem/server/mocks/server_mock.go create mode 100644 pkg/rpc/dfdaemon/cdnsystem/mocks/client_mock.go create mode 100644 pkg/rpc/dfdaemon/client/mocks/client_mock.go create mode 100644 pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go create mode 100644 pkg/rpc/dfdaemon/server/mocks/server_mock.go rename scheduler/config/mocks/manager_client_mock.go => pkg/rpc/manager/client/mocks/client_mock.go (98%) create mode 100644 pkg/rpc/manager/mocks/manager_mock.go create mode 100644 pkg/rpc/manager/server/mocks/server_mock.go create mode 100644 pkg/rpc/scheduler/client/mocks/client_mock.go create mode 100644 pkg/rpc/scheduler/client/mocks/peer_packet_stream_mock.go create mode 100644 pkg/rpc/scheduler/mocks/scheduler_mock.go create mode 100644 pkg/rpc/scheduler/server/mocks/server_mock.go delete mode 100644 scheduler/config/constants_otel.go create mode 100644 scheduler/config/mocks/dyncofig_mock.go delete mode 100644 scheduler/core/evaluator/evaluator_base_test.go delete mode 100644 scheduler/core/events.go delete mode 100644 scheduler/core/monitor.go delete mode 100644 scheduler/core/scheduler/basic/basic_scheduler.go delete mode 100644 scheduler/core/scheduler/scheduler.go delete mode 100644 scheduler/core/service.go delete mode 100644 scheduler/core/worker.go create mode 100644 scheduler/resource/cdn.go create mode 100644 scheduler/resource/cdn_mock.go create mode 100644 scheduler/resource/cdn_test.go create mode 100644 scheduler/resource/host.go create mode 100644 scheduler/resource/host_manager.go create mode 100644 scheduler/resource/host_manager_mock.go create mode 100644 scheduler/resource/host_manager_test.go create mode 100644 scheduler/resource/host_test.go create mode 100644 scheduler/resource/peer.go create mode 100644 scheduler/resource/peer_manager.go create mode 100644 scheduler/resource/peer_manager_mock.go create mode 100644 scheduler/resource/peer_manager_test.go create mode 100644 scheduler/resource/peer_test.go create mode 100644 scheduler/resource/resource.go create mode 100644 scheduler/resource/resource_mock.go create mode 100644 scheduler/resource/resource_test.go create mode 100644 scheduler/resource/task.go create mode 100644 scheduler/resource/task_manager.go create mode 100644 scheduler/resource/task_manager_mock.go create mode 100644 scheduler/resource/task_manager_test.go create mode 100644 scheduler/resource/task_test.go create mode 100644 scheduler/rpcserver/rpcserver_test.go rename scheduler/{core => scheduler}/evaluator/evaluator.go (82%) rename scheduler/{core => scheduler}/evaluator/evaluator_base.go (54%) create mode 100644 scheduler/scheduler/evaluator/evaluator_base_test.go rename scheduler/{core => scheduler}/evaluator/evaluator_test.go (97%) rename scheduler/{core => scheduler}/evaluator/plugin.go (100%) rename scheduler/{core => scheduler}/evaluator/plugin_test.go (97%) rename scheduler/{core => scheduler}/evaluator/testdata/main.go (71%) rename scheduler/{core => scheduler}/evaluator/testdata/plugin/evaluator.go (74%) create mode 100644 scheduler/scheduler/mocks/scheduler_mock.go create mode 100644 scheduler/scheduler/scheduler.go create mode 100644 scheduler/scheduler/scheduler_test.go create mode 100644 scheduler/service/callback.go create mode 100644 scheduler/service/callback_test.go create mode 100644 scheduler/service/mocks/callback_mock.go create mode 100644 scheduler/service/mocks/service_mock.go create mode 100644 scheduler/service/service.go create mode 100644 scheduler/service/service_test.go delete mode 100644 scheduler/supervisor/cdn.go delete mode 100644 scheduler/supervisor/cdn_test.go delete mode 100644 scheduler/supervisor/host.go delete mode 100644 scheduler/supervisor/host_test.go delete mode 100644 scheduler/supervisor/mocks/cdn_mock.go delete mode 100644 scheduler/supervisor/mocks/host_mock.go delete mode 100644 scheduler/supervisor/mocks/peer_mock.go delete mode 100644 scheduler/supervisor/mocks/task_mock.go delete mode 100644 scheduler/supervisor/peer.go delete mode 100644 scheduler/supervisor/peer_test.go delete mode 100644 scheduler/supervisor/task.go delete mode 100644 scheduler/supervisor/task_test.go diff --git a/cdn/supervisor/task/manager.go b/cdn/supervisor/task/manager.go index 0d83bd7aa84..01cec2e93a2 100644 --- a/cdn/supervisor/task/manager.go +++ b/cdn/supervisor/task/manager.go @@ -232,7 +232,7 @@ const ( ) func (tm *manager) GC() error { - logger.MetaGCLogger.Info("start the task meta gc job") + logger.GCLogger.Info("start the task meta gc job") startTime := time.Now() var gcTasks []string var remainingTasks []string @@ -247,17 +247,17 @@ func (tm *manager) GC() error { } gcTasks = append(gcTasks, taskID) // gc task memory data - logger.MetaGCLogger.With("type", "meta").Infof("gc task: %s", taskID) + logger.GCLogger.With("type", "meta").Infof("gc task: %s", taskID) tm.deleteTask(taskID) return true }) // slow GC detected, report it with a log warning if timeDuring := time.Since(startTime); timeDuring > gcTasksTimeout { - logger.MetaGCLogger.With("type", "meta").Warnf("gc tasks: %d cost: %.3f", len(gcTasks), timeDuring.Seconds()) + logger.GCLogger.With("type", "meta").Warnf("gc tasks: %d cost: %.3f", len(gcTasks), timeDuring.Seconds()) } - logger.MetaGCLogger.With("type", "meta").Infof("%d tasks were successfully cleared, leaving %d tasks remaining", len(gcTasks), + logger.GCLogger.With("type", "meta").Infof("%d tasks were successfully cleared, leaving %d tasks remaining", len(gcTasks), len(remainingTasks)) - logger.MetaGCLogger.With("type", "meta").Debugf("tasks %s were successfully cleared, leaving tasks %s remaining", gcTasks, remainingTasks) + logger.GCLogger.With("type", "meta").Debugf("tasks %s were successfully cleared, leaving tasks %s remaining", gcTasks, remainingTasks) return nil } diff --git a/client/config/dynconfig_test.go b/client/config/dynconfig_test.go index be196373509..00453b6ca8f 100644 --- a/client/config/dynconfig_test.go +++ b/client/config/dynconfig_test.go @@ -43,7 +43,7 @@ func TestDynconfigNewDynconfig(t *testing.T) { expect func(t *testing.T, err error) }{ { - name: "new dynconfig succeeded", + name: "new dynconfig", expire: 10 * time.Second, hostOption: HostOption{ Hostname: "foo", @@ -135,7 +135,7 @@ func TestDynconfigGet(t *testing.T) { expect func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) }{ { - name: "get dynconfig cache data succeeded", + name: "get dynconfig cache data", expire: 10 * time.Second, hostOption: HostOption{ Hostname: "foo", @@ -172,7 +172,7 @@ func TestDynconfigGet(t *testing.T) { }, }, { - name: "get dynconfig data succeeded", + name: "get dynconfig data", expire: 10 * time.Millisecond, hostOption: HostOption{ Hostname: "foo", @@ -317,7 +317,7 @@ func TestDynconfigGetSchedulers(t *testing.T) { expect func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) }{ { - name: "get cache schedulers succeeded", + name: "get cache schedulers", expire: 10 * time.Second, hostOption: HostOption{ Hostname: "foo", @@ -354,7 +354,7 @@ func TestDynconfigGetSchedulers(t *testing.T) { }, }, { - name: "get schedulers succeeded", + name: "get schedulers", expire: 10 * time.Millisecond, hostOption: HostOption{ Hostname: "foo", diff --git a/cmd/dependency/dependency.go b/cmd/dependency/dependency.go index bff19b71331..2f95310625c 100644 --- a/cmd/dependency/dependency.go +++ b/cmd/dependency/dependency.go @@ -162,15 +162,6 @@ func SetupQuitSignalHandler(handler func()) { }() } -func GetConfigPath(name string) string { - cfgFile := viper.GetString("config") - if cfgFile != "" { - return cfgFile - } - - return filepath.Join(dfpath.DefaultConfigDir, fmt.Sprintf("%s.yaml", name)) -} - // initConfig reads in config file and ENV variables if set. func initConfig(useConfigFile bool, name string, config interface{}) { // Use config file and read once. diff --git a/cmd/scheduler/cmd/root.go b/cmd/scheduler/cmd/root.go index 3dcc03876ae..9cd8870227f 100644 --- a/cmd/scheduler/cmd/root.go +++ b/cmd/scheduler/cmd/root.go @@ -17,6 +17,7 @@ package cmd import ( + "context" "os" "github.com/pkg/errors" @@ -46,6 +47,9 @@ generate and maintain a P2P network during the download process, and push suitab DisableAutoGenTag: true, SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Initialize dfpath d, err := initDfpath(cfg.Server) if err != nil { @@ -62,12 +66,7 @@ generate and maintain a P2P network during the download process, and push suitab return err } - // Convert redis host config - if err := cfg.Convert(); err != nil { - return err - } - - return runScheduler(d) + return runScheduler(ctx, d) }, } @@ -100,7 +99,7 @@ func initDfpath(cfg *config.ServerConfig) (dfpath.Dfpath, error) { return dfpath.New(options...) } -func runScheduler(d dfpath.Dfpath) error { +func runScheduler(ctx context.Context, d dfpath.Dfpath) error { logger.Infof("Version:\n%s", version.Version()) // scheduler config values @@ -111,7 +110,7 @@ func runScheduler(d dfpath.Dfpath) error { ff := dependency.InitMonitor(cfg.Verbose, cfg.PProfPort, cfg.Telemetry) defer ff() - svr, err := scheduler.New(cfg, d) + svr, err := scheduler.New(ctx, cfg, d) if err != nil { return err } diff --git a/cmd/scheduler/main.go b/cmd/scheduler/main.go index a0a3440208d..a6a5422fa9b 100644 --- a/cmd/scheduler/main.go +++ b/cmd/scheduler/main.go @@ -18,7 +18,6 @@ package main import ( "d7y.io/dragonfly/v2/cmd/scheduler/cmd" - _ "d7y.io/dragonfly/v2/scheduler/core/scheduler/basic" ) func main() { diff --git a/docs/en/deployment/configuration/cdn.yaml b/docs/en/deployment/configuration/cdn.yaml index b5f53c93582..48aae45f079 100644 --- a/docs/en/deployment/configuration/cdn.yaml +++ b/docs/en/deployment/configuration/cdn.yaml @@ -1,5 +1,3 @@ -# This file is the template of cdn system configuration file. -# You can configure your cdn system by change the parameter according your requirement. base: # listenPort is the port cdn server listens on. # default: 8003 diff --git a/docs/en/deployment/configuration/manager.yaml b/docs/en/deployment/configuration/manager.yaml index 7faf208aee8..2d689a06a3f 100644 --- a/docs/en/deployment/configuration/manager.yaml +++ b/docs/en/deployment/configuration/manager.yaml @@ -1,6 +1,3 @@ -# This file is the template of manager configuration file. -# You can configure your manager by change the parameter according your requirement. ---- # current server info used for server server: # grpc server configure @@ -34,7 +31,6 @@ database: host: dragonfly port: 6379 db: 0 - # manager server cache # cache: # # redis cache configure diff --git a/docs/en/deployment/configuration/scheduler.yaml b/docs/en/deployment/configuration/scheduler.yaml index ec07a036615..5c2ab73725f 100644 --- a/docs/en/deployment/configuration/scheduler.yaml +++ b/docs/en/deployment/configuration/scheduler.yaml @@ -1,5 +1,21 @@ -# This file is the template of scheduler configuration file. -# You can configure your scheduler by change the parameter according your requirement. +# server scheduler instance configuration +server: + # # ip + # ip: 127.0.0.1 + # # host + # host: localhost + # port is the ip and port scheduler server listens on. + port: 8002 + # limit the number of requests + listenLimit: 1000 + # cacheDir is dynconfig cache storage directory + # in linux, default value is /var/cache/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/cache + cacheDir: "" + # logDir is the log storage directory + # in linux, default value is /var/log/dragonfly + # in macos(just for testing), default value is /Users/$USER/.dragonfly/logs + logDir: "" # scheduler policy configuration scheduler: @@ -10,66 +26,41 @@ scheduler: # and the compiled `d7y-scheduler-plugin-evaluator.so` file is added to # the dragonfly working directory plugins algorithm: default - # workerNum is the number of goroutines that perform scheduling tasks - # default: - workerNum: 12 - # backSourceCount is the number of back-to-origin clients when the CDN is disable or unavailable - # default: 3 + # backSourceCount is the number of backsource clients when the CDN is unavailable backSourceCount: 3 - # accessWindow is access interval window that can trigger CDN back to the source again - # default: 3m - accessWindow: 3m - # candidateParentCount is number of candidate parent nodes - # default: 10 - candidateParentCount: 10 - # scheduler is currently effective scheduling policy - # default: basic - scheduler: basic - # openMonitor Whether to enable monitoring, currently only the current peer list status information is monitored - # default: false - openMonitor: false + # retry scheduling limit times + retryLimit: 10 + # retry scheduling interval + retryInterval: 1s # gc metadata configuration gc: # peerGCInterval peer's gc interval peerGCInterval: 1m # peerTTL peer's TTL duration - peerTTL: 10m - # peerTTI peer's TTI duration - peerTTI: 3m + peerTTL: 5m # taskGCInterval task's gc interval taskGCInterval: 1m # taskTTL task's TTL duration taskTTL: 10m - # taskTTI task's TTI duration - taskTTI: 3m - -# server scheduler instance configuration -server: - # ip - # ip: 127.0.0.1 - # host - # host: localhost - # ListenPort is the ip and port scheduler server listens on. - # default: 8002 - port: 8002 - - # cacheDir is dynconfig cache storage directory - # in linux, default value is /var/cache/dragonfly - # in macos(just for testing), default value is /Users/$USER/.dragonfly/cache - cacheDir: "" - - # logDir is the log storage directory - # in linux, default value is /var/log/dragonfly - # in macos(just for testing), default value is /Users/$USER/.dragonfly/logs - logDir: "" # dynamic data configuration dynConfig: - # dynamic data source type - type: manager + # dynamic config refresh interval + refreshInterval: 5 * time.Minute + +# scheduler host configuration +host: + # idc is the idc of scheduler instance + idc: "" + # netTopology is the net topology of scheduler instance + netTopology: "" + # location is the location of scheduler instance + location: "" # manager configuration manager: + # scheduler enable contact with manager + enable: true # addr manager access address addr: 127.0.0.1:65003 # schedulerClusterID cluster id to which scheduler instance belongs @@ -79,21 +70,16 @@ manager: # interval interval: 5s -# host scheduler host configuration -host: - # location is the location of scheduler instance - location: "" - # idc is the idc of scheduler instance - idc: "" - # machinery async job configuration, see https://github.com/RichardKnop/machinery job: - # globalWorkerNum - globalWorkerNum: 2 - # schedulerWorkerNum - schedulerWorkerNum: 3 - # localWorkerNum - localWorkerNum: 3 + # scheduler enable job service + enable: true + # number of workers in global queue + globalWorkerNum: 1 + # number of workers in scheduler queue + schedulerWorkerNum: 1 + # number of workers in local queue + localWorkerNum: 5 # redis configuration redis: # host @@ -103,20 +89,23 @@ job: # password password: "" # brokerDB - brokerDB: "" + brokerDB: 1 # backendDB - backendDB: "" + backendDB: 2 -# whether to disable CDN -# default: false -disableCDN: false +# enable prometheus metrics +metrics: + # scheduler enable metrics service + enable: false + # metrics service address + addr: ":8000" + # enable peer host metrics + enablePeerHost: false # console shows log on console -# default: false console: false # Whether to enable debug level logger and enable pprof -# default: false verbose: false # listen port for pprof, only valid when the verbose option is true @@ -124,14 +113,4 @@ verbose: false pprofPort: -1 # jaeger endpoint url, like: http://jaeger.dragonfly.svc:14268/api/traces -# default: "" jaeger: "" - -# service name used in tracer -# default: dragonfly-scheduler -service-name: dragonfly-scheduler - -# enable prometheus metrics -# metrics: -# # metrics service address -# addr: ":8000" diff --git a/docs/zh-CN/deployment/configuration/cdn.yaml b/docs/zh-CN/deployment/configuration/cdn.yaml index e9e2d544b90..1e92355d55d 100644 --- a/docs/zh-CN/deployment/configuration/cdn.yaml +++ b/docs/zh-CN/deployment/configuration/cdn.yaml @@ -1,5 +1,3 @@ -# CDN 配置模版 -# 你可以通过修改这里的配置项来自定义你的 CDN base: # CDN 服务监听的端口 # 默认值:8003 diff --git a/docs/zh-CN/deployment/configuration/manager.yaml b/docs/zh-CN/deployment/configuration/manager.yaml index 60930e484cb..0a7f1355e86 100644 --- a/docs/zh-CN/deployment/configuration/manager.yaml +++ b/docs/zh-CN/deployment/configuration/manager.yaml @@ -1,6 +1,4 @@ -# 此文件是 manager 的配置文件模板,你可以通过根据需要改变对应的值来配置 manager 服务。 ---- -# 当前的服务配置 +# 服务配置 server: # grpc 服务配置 grpc: @@ -32,7 +30,6 @@ database: host: dragonfly port: 6379 db: 0 - # 缓存配置 # cache: # # redis 缓存配置 diff --git a/docs/zh-CN/deployment/configuration/scheduler.yaml b/docs/zh-CN/deployment/configuration/scheduler.yaml index 6ffd78214d3..32c4b12afda 100644 --- a/docs/zh-CN/deployment/configuration/scheduler.yaml +++ b/docs/zh-CN/deployment/configuration/scheduler.yaml @@ -1,4 +1,21 @@ -# scheduler 配置文件模版, 您可以根据您的需求更改这里的参数来配置您的 Scheduler +# scheduler 服务实例配置信息 +server: + # # 服务 IP + # ip: 127.0.0.1 + # # 服务地址 + # host: localhost + # 服务监听端口 + port: + # 限制请求并发数 + listenLimit: 1000 + # daemon 动态配置缓存目录 + # linux 上默认目录 /var/cache/dragonfly + # macos(仅开发、测试), 默认目录是 /Users/$USER/.dragonfly/cache + cacheDir: "" + # daemon 日志目录 + # linux 上默认目录 /var/log/dragonfly + # macos(仅开发、测试), 默认目录是 /Users/$USER/.dragonfly/logs + logDir: "" # scheduler 调度策略配置 scheduler: @@ -7,132 +24,91 @@ scheduler: # 也支持用户 plugin 扩展的方式,值为 "plugin" # 并且在 dragonfly 工作目录 plugins 中添加编译好的 `d7y-scheduler-plugin-evaluator.so` 文件 algorithm: default - # workerNum 执行调度任务处理的 goroutine 数量 - # default: 默认机器可用的 CPU 数量 - workerNum: 12 - # backSourceCount 当 CDN 禁用或不可用时,回源客户端的数量 - # default: 3 + # 单个任务允许客户端回源的数量 backSourceCount: 3 - # accessWindow 可触发 CDN 回源的时间间隔窗口 - # default: 3m - accessWindow: 3m - # candidateParentCount 候选父节点数量, - # default: 10 - candidateParentCount: 10 - # scheduler 当前生效的 scheduler 调度策略 - # default: basic - scheduler: basic - # cdnLoad CDN 节点可以提供上传的最大负载 - # default: 100 - cdnLoad: 100 - # clientLoad 普通客户端节点可以提供上传的最大负载 - # default: 10 - clientLoad: 10 - # openMonitor 是否打开监控,目前只监控了当前的 peer 列表状态信息 - # default: false - openMonitor: false - # gc 元数据回收策略 + # 调度重试次数限制 + retryLimit: 10 + # 调度重试时间间隔 + retryInterval: 1s + # 数据回收策略 gc: - # peerGCInterval peer 的回收间隔 + # peer 的回收间隔 peerGCInterval: 1m - # peerTTL peer 的离开时间, 距离上次 peer 的访问时间超过改值则 peer 会被设置成 leave 状态 - peerTTL: 10m - # peerTTI peer 的 TTI 时间,距离上次 peer 的访问时间超过改值则 peer 会被设置成 zombie 状态 - peerTTI: 3m - # taskGCInterval task 的回收间隔 + # 不活跃的 peer 的存活时间 + peerTTL: 5m + # task 的回收间隔 taskGCInterval: 1m - # taskTTL task 的离开时间, 距离上次 task 的访问时间超过改值则 task 会被设置成 leave 状态 + # 不活跃的 task 的存活时间 taskTTL: 10m - # taskTTI 的 TTI 时间,距离上次 task 的访问时间超过改值则 task 会被设置成 zombie 状态 - taskTTI: 3m - -# server scheduler 服务实例配置信息 -server: - # ip - # ip: 127.0.0.1 - # host - # host: localhost - # ListenPort is the ip and port scheduler server listens on. - # default: 8002 - port: - # daemon 动态配置缓存目录 - # linux 上默认目录 /var/cache/dragonfly - # macos(仅开发、测试), 默认目录是 /Users/$USER/.dragonfly/cache - cacheDir: "" - - # daemon 日志目录 - # linux 上默认目录 /var/log/dragonfly - # macos(仅开发、测试), 默认目录是 /Users/$USER/.dragonfly/logs - logDir: "" # 动态数据配置 dynConfig: - # type 动态数据来源类型 - type: manager + # 动态数据刷新间隔时间 + refreshInterval: 5 * time.Minute + +# 实例主机信息 +host: + # 实例所在机房 + idc: "" + # 实例网络拓扑信息 + netTopology: "" + # 实例所在的地理位置信息 + location: "" # manager 配置 manager: - # addr manager 访问地址 + # 启动与 manager 的连接 + enable: true + # manager 访问地址 addr: "" - # schedulerClusterID 要注册的 scheduler 集群 ID + # 注册的 scheduler 集群 ID schedulerClusterID: - # keepAlive 和 manager 保持心跳的配置 + # manager 心跳配置 keepAlive: - # interval 保持心跳的时间间隔 + # 保持心跳的时间间隔 interval: 5s -# scheduler 自身主机信息 -host: - # location scheduler 实例所在的地理位置信息 - location: "" - # idc scheduler 实例所在机房 - idc: "" - # machinery 异步任务配置,配置参考 https://github.com/RichardKnop/machinery job: - # globalWorkerNum - globalWorkerNum: 2 - # schedulerWorkerNum - schedulerWorkerNum: 3 - # localWorkerNum - localWorkerNum: 3 - # redis configuration + # 启动 job 服务 + enable: true + # global 通道 worker 数量 + globalWorkerNum: 1 + # scheduler 通道 worker 数量 + schedulerWorkerNum: 1 + # local 通道 worker 数量 + localWorkerNum: 5 + # redis 配置 redis: - # host redis 服务实例地址 + # 服务地址 host: "" - # port redis 服务实例端口 + # 服务端口 port: 6379 - # password + # 密码 password: "" - # brokerDB - brokerDB: "" - # backendDB - backendDB: "" + # broker 数据库 + brokerDB: 1 + # backend 数据库 + backendDB: 2 -# disableCDN 是否停用 CDN -# default: false -disableCDN: false +# 开启数据收集服务 +metrics: + # 启动数据收集服务 + enable: false + # 数据服务地址 + addr: ":8000" + # 开机收集 peer host 数据 + enablePeerHost: false # Console 是否在控制台程序中显示日志 -# default: false console: false # verbose 是否使用调试级别的日志、是否启用 pprof。 -# default: false verbose: false # pprofPort pprof 监听的端口,仅在 verbose 为 true 时可用 -# default is -1. If it is 0, pprof will use a random port. pprofPort: -1 # jaeger 地址 # 默认使用空字符串(不配置 jaeger), 例如: http://jaeger.dragonfly.svc:14268/api/traces jaeger: "" - -# tracer 中使用的 service-name -# 默认值:dragonfly-cdn -service-name: dragonfly-scheduler -# 开启数据收集服务 -# metrics: -# # 数据服务地址 -# addr: ":8000" diff --git a/go.mod b/go.mod index e09d27c1a7a..fe57054364a 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/agiledragon/gomonkey/v2 v2.3.0 github.com/aliyun/aliyun-oss-go-sdk v2.1.6+incompatible github.com/appleboy/gin-jwt/v2 v2.6.5-0.20210827121450-79689222c755 + github.com/bits-and-blooms/bitset v1.2.1 github.com/casbin/casbin/v2 v2.34.1 github.com/casbin/gorm-adapter/v3 v3.3.2 github.com/colinmarc/hdfs/v2 v2.2.0 @@ -37,10 +38,10 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/jarcoal/httpmock v1.0.8 + github.com/looplab/fsm v0.3.0 github.com/mcuadros/go-gin-prometheus v0.1.0 github.com/mitchellh/mapstructure v1.4.1 github.com/montanaflynn/stats v0.6.6 - github.com/olekukonko/tablewriter v0.0.5 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.14.0 github.com/opencontainers/go-digest v1.0.0 @@ -66,6 +67,7 @@ require ( go.uber.org/atomic v1.9.0 go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 @@ -79,7 +81,6 @@ require ( gorm.io/gorm v1.21.15 gorm.io/plugin/soft_delete v1.0.2 k8s.io/apimachinery v0.20.6 - k8s.io/client-go v11.0.0+incompatible moul.io/zapgorm2 v1.1.0 ) @@ -103,7 +104,6 @@ require ( github.com/fsnotify/fsnotify v1.4.9 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-echarts/go-echarts/v2 v2.2.3 // indirect - github.com/go-logr/logr v0.2.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect @@ -191,7 +191,6 @@ require ( golang.org/x/exp v0.0.0-20201221025956-e89b829e73ea // indirect golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.4 // indirect @@ -204,5 +203,4 @@ require ( gorm.io/driver/postgres v1.0.8 // indirect gorm.io/driver/sqlserver v1.0.4 // indirect gorm.io/plugin/dbresolver v1.1.0 // indirect - k8s.io/klog/v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 249c667e155..ffd95775b81 100644 --- a/go.sum +++ b/go.sum @@ -94,6 +94,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.1 h1:M+/hrU9xlMp7t4TyTDQW97d3tRPVuKFC6zBEK16QnXY= +github.com/bits-and-blooms/bitset v1.2.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0= @@ -209,7 +211,6 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -567,6 +568,8 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/looplab/fsm v0.3.0 h1:kIgNS3Yyud1tyxhG8kDqh853B7QqwnlWdgL3TD2s3Sw= +github.com/looplab/fsm v0.3.0/go.mod h1:PmD3fFvQEIsjMEfvZdrCDZ6y8VwKTwWNjlpEr6IKPO4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= @@ -593,7 +596,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.3 h1:j7a/xn1U6TKA/PHHxqZuzh64CdtRc7rU9M+AvkOl5bA= @@ -636,8 +638,6 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1391,11 +1391,8 @@ honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/apimachinery v0.20.6 h1:R5p3SlhaABYShQSO6LpPsYHjV05Q+79eBUR0Ut/f4tk= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= moul.io/zapgorm2 v1.1.0 h1:qwAlMBYf+qJkJ7PAzJl4oCe6eS6QGiKAXUPeis0+RBE= diff --git a/internal/dflog/logger.go b/internal/dflog/logger.go index 13d7bf81eb4..8c2dfec5818 100644 --- a/internal/dflog/logger.go +++ b/internal/dflog/logger.go @@ -26,7 +26,7 @@ import ( var ( CoreLogger *zap.SugaredLogger GrpcLogger *zap.SugaredLogger - MetaGCLogger *zap.SugaredLogger + GCLogger *zap.SugaredLogger StorageGCLogger *zap.SugaredLogger JobLogger *zap.SugaredLogger KeepAliveLogger *zap.SugaredLogger @@ -56,7 +56,7 @@ func SetCoreLogger(log *zap.SugaredLogger) { } func SetGCLogger(log *zap.SugaredLogger) { - MetaGCLogger = log + GCLogger = log } func SetStorageGCLogger(log *zap.SugaredLogger) { @@ -94,21 +94,27 @@ func With(args ...interface{}) *SugaredLoggerOnWith { } } +func WithHostID(hostID string) *SugaredLoggerOnWith { + return &SugaredLoggerOnWith{ + withArgs: []interface{}{"hostID", hostID}, + } +} + func WithTaskID(taskID string) *SugaredLoggerOnWith { return &SugaredLoggerOnWith{ - withArgs: []interface{}{"taskId", taskID}, + withArgs: []interface{}{"taskID", taskID}, } } func WithTaskAndPeerID(taskID, peerID string) *SugaredLoggerOnWith { return &SugaredLoggerOnWith{ - withArgs: []interface{}{"taskId", taskID, "peerID", peerID}, + withArgs: []interface{}{"taskID", taskID, "peerID", peerID}, } } func WithTaskIDAndURL(taskID, url string) *SugaredLoggerOnWith { return &SugaredLoggerOnWith{ - withArgs: []interface{}{"taskId", taskID, "url", url}, + withArgs: []interface{}{"taskID", taskID, "url", url}, } } diff --git a/internal/job/queue_test.go b/internal/job/queue_test.go index e1594d13ffc..a7dc25b4c2b 100644 --- a/internal/job/queue_test.go +++ b/internal/job/queue_test.go @@ -30,7 +30,7 @@ func TestJobGetSchedulerQueue(t *testing.T) { expect func(t *testing.T, result Queue, err error) }{ { - name: "get scheduler queue succeeded", + name: "get scheduler queue", clusterID: 1, hostname: "foo", expect: func(t *testing.T, result Queue, err error) { @@ -74,7 +74,7 @@ func TestJobGetCDNQueue(t *testing.T) { expect func(t *testing.T, result Queue, err error) }{ { - name: "get cdn queue succeeded", + name: "get cdn queue", clusterID: 1, hostname: "foo", expect: func(t *testing.T, result Queue, err error) { diff --git a/manager/job/preheat.go b/manager/job/preheat.go index 18cc7b45962..89155a671f1 100644 --- a/manager/job/preheat.go +++ b/manager/job/preheat.go @@ -42,7 +42,7 @@ import ( "d7y.io/dragonfly/v2/pkg/util/net/httputils" ) -var tracer = otel.Tracer("sender") +var tracer = otel.Tracer("manager") type PreheatType string @@ -160,7 +160,7 @@ func (p *preheat) createGroupJob(ctx context.Context, files []*internaljob.Prehe return nil, err } - logger.Infof("create preheat group job succeeded, group uuid: %s, urls: %s", group.GroupUUID, urls) + logger.Infof("create preheat group job successfully, group uuid: %s, urls: %s", group.GroupUUID, urls) return &internaljob.GroupJobState{ GroupUUID: group.GroupUUID, State: machineryv1tasks.StatePending, diff --git a/pkg/container/list/sorted_list_test.go b/pkg/container/list/sorted_list_test.go index 2201cecc55b..9d411626bca 100644 --- a/pkg/container/list/sorted_list_test.go +++ b/pkg/container/list/sorted_list_test.go @@ -37,7 +37,7 @@ func TestSortedListInsert(t *testing.T) { expect func(t *testing.T, l SortedList, items ...Item) }{ { - name: "insert values succeeded", + name: "insert values", mock: func(m ...*mocks.MockItemMockRecorder) {}, expect: func(t *testing.T, l SortedList, items ...Item) { assert := assert.New(t) @@ -47,7 +47,7 @@ func TestSortedListInsert(t *testing.T) { }, }, { - name: "insert multi value succeeded", + name: "insert multi value", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -130,7 +130,7 @@ func TestSortedListRemove(t *testing.T) { expect func(t *testing.T, l SortedList, items ...Item) }{ { - name: "remove values succeeded", + name: "remove values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -216,7 +216,7 @@ func TestSortedListContains(t *testing.T) { expect func(t *testing.T, l SortedList, items ...Item) }{ { - name: "contains values succeeded", + name: "contains values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -288,7 +288,7 @@ func TestSortedListLen(t *testing.T) { expect func(t *testing.T, l SortedList, items ...Item) }{ { - name: "get length succeeded", + name: "get length", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -360,7 +360,7 @@ func TestSortedListRange(t *testing.T) { expect func(t *testing.T, l SortedList, items ...Item) }{ { - name: "range succeeded", + name: "range values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -382,7 +382,7 @@ func TestSortedListRange(t *testing.T) { }, }, { - name: "range multi values succeeded", + name: "range multi values", mock: func(m ...*mocks.MockItemMockRecorder) { for i := range m { m[i].SortedValue().Return(i).AnyTimes() @@ -523,7 +523,7 @@ func TestSortedListReverseRange(t *testing.T) { expect func(t *testing.T, l SortedList, items ...Item) }{ { - name: "reverse range succeeded", + name: "reverse range values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -545,7 +545,7 @@ func TestSortedListReverseRange(t *testing.T) { }, }, { - name: "reverse range multi values succeeded", + name: "reverse range multi values", mock: func(m ...*mocks.MockItemMockRecorder) { for i := range m { m[i].SortedValue().Return(i).AnyTimes() diff --git a/pkg/container/list/sorted_unique_list_test.go b/pkg/container/list/sorted_unique_list_test.go index 221908a5c74..3622d5ca3ce 100644 --- a/pkg/container/list/sorted_unique_list_test.go +++ b/pkg/container/list/sorted_unique_list_test.go @@ -35,7 +35,7 @@ func TestSortedUniqueListInsert(t *testing.T) { expect func(t *testing.T, ul SortedUniqueList, items ...Item) }{ { - name: "insert values succeeded", + name: "insert values", mock: func(m ...*mocks.MockItemMockRecorder) {}, expect: func(t *testing.T, ul SortedUniqueList, items ...Item) { assert := assert.New(t) @@ -45,7 +45,7 @@ func TestSortedUniqueListInsert(t *testing.T) { }, }, { - name: "insert multi value succeeded", + name: "insert multi values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -129,7 +129,7 @@ func TestSortedUniqueListRemove(t *testing.T) { expect func(t *testing.T, ul SortedUniqueList, items ...Item) }{ { - name: "remove values succeeded", + name: "remove values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -217,7 +217,7 @@ func TestSortedUniqueListContains(t *testing.T) { expect func(t *testing.T, ul SortedUniqueList, items ...Item) }{ { - name: "contains values succeeded", + name: "contains values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -292,7 +292,7 @@ func TestSortedUniqueListLen(t *testing.T) { expect func(t *testing.T, ul SortedUniqueList, items ...Item) }{ { - name: "get length succeeded", + name: "get length", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -380,7 +380,7 @@ func TestSortedUniqueListRange(t *testing.T) { expect func(t *testing.T, ul SortedUniqueList, items ...Item) }{ { - name: "range succeeded", + name: "range values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -402,7 +402,7 @@ func TestSortedUniqueListRange(t *testing.T) { }, }, { - name: "range multi values succeeded", + name: "range multi values", mock: func(m ...*mocks.MockItemMockRecorder) { for i := range m { m[i].SortedValue().Return(i).AnyTimes() @@ -551,7 +551,7 @@ func TestSortedUniqueListReverseRange(t *testing.T) { expect func(t *testing.T, ul SortedUniqueList, items ...Item) }{ { - name: "reverse range succeeded", + name: "reverse range values", mock: func(m ...*mocks.MockItemMockRecorder) { gomock.InOrder( m[0].SortedValue().Return(0).Times(1), @@ -573,7 +573,7 @@ func TestSortedUniqueListReverseRange(t *testing.T) { }, }, { - name: "reverse range multi values succeeded", + name: "reverse range multi values", mock: func(m ...*mocks.MockItemMockRecorder) { for i := range m { m[i].SortedValue().Return(i).AnyTimes() diff --git a/pkg/container/set/safe_set_test.go b/pkg/container/set/safe_set_test.go index 101d9ec5a7f..c6f30f3dc89 100644 --- a/pkg/container/set/safe_set_test.go +++ b/pkg/container/set/safe_set_test.go @@ -34,7 +34,7 @@ func TestSafeSetAdd(t *testing.T) { expect func(t *testing.T, ok bool, s Set, value interface{}) }{ { - name: "add value succeeded", + name: "add value", value: "foo", expect: func(t *testing.T, ok bool, s Set, value interface{}) { assert := assert.New(t) @@ -92,7 +92,7 @@ func TestSafeSetDelete(t *testing.T) { expect func(t *testing.T, s Set, value interface{}) }{ { - name: "delete value succeeded", + name: "delete value", value: "foo", expect: func(t *testing.T, s Set, value interface{}) { assert := assert.New(t) @@ -151,7 +151,7 @@ func TestSafeSetContains(t *testing.T) { expect func(t *testing.T, s Set, value interface{}) }{ { - name: "contains value succeeded", + name: "contains value", value: "foo", expect: func(t *testing.T, s Set, value interface{}) { assert := assert.New(t) @@ -205,7 +205,7 @@ func TestSetSafeLen(t *testing.T) { expect func(t *testing.T, s Set) }{ { - name: "get length succeeded", + name: "get length", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -259,7 +259,7 @@ func TestSafeSetValues(t *testing.T) { expect func(t *testing.T, s Set) }{ { - name: "get values succeeded", + name: "get values", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -274,7 +274,7 @@ func TestSafeSetValues(t *testing.T) { }, }, { - name: "get multi values succeeded", + name: "get multi values", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -323,7 +323,7 @@ func TestSafeSetRange(t *testing.T) { expect func(t *testing.T, s Set) }{ { - name: "range succeeded", + name: "range values", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -334,7 +334,7 @@ func TestSafeSetRange(t *testing.T) { }, }, { - name: "range failed", + name: "range values failed", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") diff --git a/pkg/container/set/set_test.go b/pkg/container/set/set_test.go index db2d5b8184f..4e7af8576ec 100644 --- a/pkg/container/set/set_test.go +++ b/pkg/container/set/set_test.go @@ -29,7 +29,7 @@ func TestSetAdd(t *testing.T) { expect func(t *testing.T, ok bool, s Set, value interface{}) }{ { - name: "add value succeeded", + name: "add value", value: "foo", expect: func(t *testing.T, ok bool, s Set, value interface{}) { assert := assert.New(t) @@ -64,7 +64,7 @@ func TestSetDelete(t *testing.T) { expect func(t *testing.T, s Set, value interface{}) }{ { - name: "delete value succeeded", + name: "delete value", value: "foo", expect: func(t *testing.T, s Set, value interface{}) { assert := assert.New(t) @@ -99,7 +99,7 @@ func TestSetContains(t *testing.T) { expect func(t *testing.T, s Set, value interface{}) }{ { - name: "contains value succeeded", + name: "contains value", value: "foo", expect: func(t *testing.T, s Set, value interface{}) { assert := assert.New(t) @@ -131,7 +131,7 @@ func TestSetLen(t *testing.T) { expect func(t *testing.T, s Set) }{ { - name: "get length succeeded", + name: "get length", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -161,7 +161,7 @@ func TestSetValues(t *testing.T) { expect func(t *testing.T, s Set) }{ { - name: "get values succeeded", + name: "get values", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -176,7 +176,7 @@ func TestSetValues(t *testing.T) { }, }, { - name: "get multi values succeeded", + name: "get multi values", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") @@ -201,7 +201,7 @@ func TestSetRange(t *testing.T) { expect func(t *testing.T, s Set) }{ { - name: "range succeeded", + name: "range values", expect: func(t *testing.T, s Set) { assert := assert.New(t) s.Add("foo") diff --git a/pkg/dfpath/dfpath_test.go b/pkg/dfpath/dfpath_test.go index ca112b3a9e4..16c17769f19 100644 --- a/pkg/dfpath/dfpath_test.go +++ b/pkg/dfpath/dfpath_test.go @@ -29,7 +29,7 @@ func TestNew(t *testing.T) { expect func(t *testing.T, options []Option) }{ { - name: "new dfpath succeeded", + name: "new dfpath", expect: func(t *testing.T, options []Option) { assert := assert.New(t) d, err := New(options...) @@ -41,7 +41,7 @@ func TestNew(t *testing.T) { }, }, { - name: "new dfpath succeeded by workHome", + name: "new dfpath by workHome", options: []Option{WithWorkHome("foo")}, expect: func(t *testing.T, options []Option) { assert := assert.New(t) @@ -54,7 +54,7 @@ func TestNew(t *testing.T) { }, }, { - name: "new dfpath succeeded by cacheDir", + name: "new dfpath by cacheDir", options: []Option{WithCacheDir("foo")}, expect: func(t *testing.T, options []Option) { assert := assert.New(t) @@ -67,7 +67,7 @@ func TestNew(t *testing.T) { }, }, { - name: "new dfpath succeeded by logDir", + name: "new dfpath by logDir", options: []Option{WithLogDir("foo")}, expect: func(t *testing.T, options []Option) { assert := assert.New(t) diff --git a/pkg/dfpath/mocks/dfpath_mock.go b/pkg/dfpath/mocks/dfpath_mock.go new file mode 100644 index 00000000000..9ed3410b5ab --- /dev/null +++ b/pkg/dfpath/mocks/dfpath_mock.go @@ -0,0 +1,146 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/dfpath/dfpath.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockDfpath is a mock of Dfpath interface. +type MockDfpath struct { + ctrl *gomock.Controller + recorder *MockDfpathMockRecorder +} + +// MockDfpathMockRecorder is the mock recorder for MockDfpath. +type MockDfpathMockRecorder struct { + mock *MockDfpath +} + +// NewMockDfpath creates a new mock instance. +func NewMockDfpath(ctrl *gomock.Controller) *MockDfpath { + mock := &MockDfpath{ctrl: ctrl} + mock.recorder = &MockDfpathMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDfpath) EXPECT() *MockDfpathMockRecorder { + return m.recorder +} + +// CacheDir mocks base method. +func (m *MockDfpath) CacheDir() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CacheDir") + ret0, _ := ret[0].(string) + return ret0 +} + +// CacheDir indicates an expected call of CacheDir. +func (mr *MockDfpathMockRecorder) CacheDir() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CacheDir", reflect.TypeOf((*MockDfpath)(nil).CacheDir)) +} + +// DaemonLockPath mocks base method. +func (m *MockDfpath) DaemonLockPath() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DaemonLockPath") + ret0, _ := ret[0].(string) + return ret0 +} + +// DaemonLockPath indicates an expected call of DaemonLockPath. +func (mr *MockDfpathMockRecorder) DaemonLockPath() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DaemonLockPath", reflect.TypeOf((*MockDfpath)(nil).DaemonLockPath)) +} + +// DaemonSockPath mocks base method. +func (m *MockDfpath) DaemonSockPath() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DaemonSockPath") + ret0, _ := ret[0].(string) + return ret0 +} + +// DaemonSockPath indicates an expected call of DaemonSockPath. +func (mr *MockDfpathMockRecorder) DaemonSockPath() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DaemonSockPath", reflect.TypeOf((*MockDfpath)(nil).DaemonSockPath)) +} + +// DataDir mocks base method. +func (m *MockDfpath) DataDir() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DataDir") + ret0, _ := ret[0].(string) + return ret0 +} + +// DataDir indicates an expected call of DataDir. +func (mr *MockDfpathMockRecorder) DataDir() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DataDir", reflect.TypeOf((*MockDfpath)(nil).DataDir)) +} + +// DfgetLockPath mocks base method. +func (m *MockDfpath) DfgetLockPath() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DfgetLockPath") + ret0, _ := ret[0].(string) + return ret0 +} + +// DfgetLockPath indicates an expected call of DfgetLockPath. +func (mr *MockDfpathMockRecorder) DfgetLockPath() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DfgetLockPath", reflect.TypeOf((*MockDfpath)(nil).DfgetLockPath)) +} + +// LogDir mocks base method. +func (m *MockDfpath) LogDir() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogDir") + ret0, _ := ret[0].(string) + return ret0 +} + +// LogDir indicates an expected call of LogDir. +func (mr *MockDfpathMockRecorder) LogDir() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogDir", reflect.TypeOf((*MockDfpath)(nil).LogDir)) +} + +// PluginDir mocks base method. +func (m *MockDfpath) PluginDir() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PluginDir") + ret0, _ := ret[0].(string) + return ret0 +} + +// PluginDir indicates an expected call of PluginDir. +func (mr *MockDfpathMockRecorder) PluginDir() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PluginDir", reflect.TypeOf((*MockDfpath)(nil).PluginDir)) +} + +// WorkHome mocks base method. +func (m *MockDfpath) WorkHome() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WorkHome") + ret0, _ := ret[0].(string) + return ret0 +} + +// WorkHome indicates an expected call of WorkHome. +func (mr *MockDfpathMockRecorder) WorkHome() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WorkHome", reflect.TypeOf((*MockDfpath)(nil).WorkHome)) +} diff --git a/pkg/gc/gc.go b/pkg/gc/gc.go index e28bedd40ed..1d195da43d1 100644 --- a/pkg/gc/gc.go +++ b/pkg/gc/gc.go @@ -17,7 +17,7 @@ package gc import ( - "errors" + "fmt" "sync" "time" @@ -86,7 +86,7 @@ func (g gc) Add(t Task) error { func (g gc) Run(id string) error { v, ok := g.tasks.Load(id) if !ok { - return errors.New("can not find the task") + return fmt.Errorf("can not find task %s", id) } go g.run(v.(Task)) diff --git a/scheduler/supervisor/mocks/gc_mock.go b/pkg/gc/gc_mock.go similarity index 92% rename from scheduler/supervisor/mocks/gc_mock.go rename to pkg/gc/gc_mock.go index 547f124b1a3..8661a50d44d 100644 --- a/scheduler/supervisor/mocks/gc_mock.go +++ b/pkg/gc/gc_mock.go @@ -1,13 +1,12 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: d7y.io/dragonfly/v2/pkg/gc (interfaces: GC) +// Source: pkg/gc/gc.go -// Package mocks is a generated GoMock package. -package mocks +// Package gc is a generated GoMock package. +package gc import ( reflect "reflect" - gc "d7y.io/dragonfly/v2/pkg/gc" gomock "github.com/golang/mock/gomock" ) @@ -35,7 +34,7 @@ func (m *MockGC) EXPECT() *MockGCMockRecorder { } // Add mocks base method. -func (m *MockGC) Add(arg0 gc.Task) error { +func (m *MockGC) Add(arg0 Task) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Add", arg0) ret0, _ := ret[0].(error) diff --git a/pkg/gc/gc_test.go b/pkg/gc/gc_test.go index 77c7ff5e7c7..0e6d70b804a 100644 --- a/pkg/gc/gc_test.go +++ b/pkg/gc/gc_test.go @@ -24,8 +24,6 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - - "d7y.io/dragonfly/v2/pkg/gc/mocks" ) func TestGCAdd(t *testing.T) { @@ -35,7 +33,7 @@ func TestGCAdd(t *testing.T) { expect func(t *testing.T, err error) }{ { - name: "new GC instance succeeded", + name: "new GC", task: Task{ ID: "gc", Interval: 2 * time.Second, @@ -87,8 +85,8 @@ func TestGCAdd(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctl := gomock.NewController(t) - mockLogger := mocks.NewMockLogger(ctl) - mockRunner := mocks.NewMockRunner(ctl) + mockLogger := NewMockLogger(ctl) + mockRunner := NewMockRunner(ctl) gc := New(WithLogger(mockLogger)) @@ -106,16 +104,16 @@ func TestGCRun(t *testing.T) { tests := []struct { name string task Task - run func(gc GC, id string, ml *mocks.MockLogger, mr *mocks.MockRunner, t *testing.T) + run func(gc GC, id string, ml *MockLogger, mr *MockRunner, t *testing.T) }{ { - name: "run task succeeded", + name: "run task", task: Task{ ID: "foo", Interval: 2 * time.Hour, Timeout: 1 * time.Hour, }, - run: func(gc GC, id string, ml *mocks.MockLogger, mr *mocks.MockRunner, t *testing.T) { + run: func(gc GC, id string, ml *MockLogger, mr *MockRunner, t *testing.T) { var wg sync.WaitGroup wg.Add(3) defer wg.Wait() @@ -138,7 +136,7 @@ func TestGCRun(t *testing.T) { Interval: 2 * time.Hour, Timeout: 1 * time.Hour, }, - run: func(gc GC, id string, ml *mocks.MockLogger, mr *mocks.MockRunner, t *testing.T) { + run: func(gc GC, id string, ml *MockLogger, mr *MockRunner, t *testing.T) { var wg sync.WaitGroup wg.Add(4) defer wg.Wait() @@ -163,9 +161,9 @@ func TestGCRun(t *testing.T) { Interval: 2 * time.Hour, Timeout: 1 * time.Hour, }, - run: func(gc GC, id string, ml *mocks.MockLogger, mr *mocks.MockRunner, t *testing.T) { + run: func(gc GC, id string, ml *MockLogger, mr *MockRunner, t *testing.T) { assert := assert.New(t) - assert.EqualError(gc.Run("bar"), "can not find the task") + assert.EqualError(gc.Run("bar"), "can not find task bar") }, }, } @@ -173,8 +171,8 @@ func TestGCRun(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctl := gomock.NewController(t) - mockLogger := mocks.NewMockLogger(ctl) - mockRunner := mocks.NewMockRunner(ctl) + mockLogger := NewMockLogger(ctl) + mockRunner := NewMockRunner(ctl) gc := New(WithLogger(mockLogger)) if err := gc.Add(Task{ @@ -196,10 +194,10 @@ func TestGCRunAll(t *testing.T) { name string task1 Task task2 Task - run func(gc GC, ml *mocks.MockLogger, mr *mocks.MockRunner) + run func(gc GC, ml *MockLogger, mr *MockRunner) }{ { - name: "run task succeeded", + name: "run task", task1: Task{ ID: "foo", Interval: 2 * time.Hour, @@ -210,7 +208,7 @@ func TestGCRunAll(t *testing.T) { Interval: 2 * time.Hour, Timeout: 1 * time.Hour, }, - run: func(gc GC, ml *mocks.MockLogger, mr *mocks.MockRunner) { + run: func(gc GC, ml *MockLogger, mr *MockRunner) { var wg sync.WaitGroup wg.Add(3) defer wg.Wait() @@ -236,7 +234,7 @@ func TestGCRunAll(t *testing.T) { Interval: 2 * time.Hour, Timeout: 1 * time.Hour, }, - run: func(gc GC, ml *mocks.MockLogger, mr *mocks.MockRunner) { + run: func(gc GC, ml *MockLogger, mr *MockRunner) { var wg sync.WaitGroup wg.Add(4) defer wg.Wait() @@ -257,8 +255,8 @@ func TestGCRunAll(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctl := gomock.NewController(t) - mockLogger := mocks.NewMockLogger(ctl) - mockRunner := mocks.NewMockRunner(ctl) + mockLogger := NewMockLogger(ctl) + mockRunner := NewMockRunner(ctl) gc := New(WithLogger(mockLogger)) @@ -278,8 +276,8 @@ func TestGCRunAll(t *testing.T) { func TestGCServe(t *testing.T) { ctl := gomock.NewController(t) - mockLogger := mocks.NewMockLogger(ctl) - mockRunner := mocks.NewMockRunner(ctl) + mockLogger := NewMockLogger(ctl) + mockRunner := NewMockRunner(ctl) var wg sync.WaitGroup wg.Add(1) diff --git a/pkg/gc/mocks/logger_mock.go b/pkg/gc/logger_mock.go similarity index 95% rename from pkg/gc/mocks/logger_mock.go rename to pkg/gc/logger_mock.go index 6f60d868765..3ca8708f6ab 100644 --- a/pkg/gc/mocks/logger_mock.go +++ b/pkg/gc/logger_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ../../pkg/gc/logger.go +// Source: pkg/gc/logger.go -// Package mocks is a generated GoMock package. -package mocks +// Package gc is a generated GoMock package. +package gc import ( reflect "reflect" diff --git a/pkg/gc/mocks/runner_mock.go b/pkg/gc/runner_mock.go similarity index 92% rename from pkg/gc/mocks/runner_mock.go rename to pkg/gc/runner_mock.go index 2ba451c0345..11fc07c5728 100644 --- a/pkg/gc/mocks/runner_mock.go +++ b/pkg/gc/runner_mock.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ../../pkg/gc/task.go +// Source: pkg/gc/task.go -// Package mocks is a generated GoMock package. -package mocks +// Package gc is a generated GoMock package. +package gc import ( reflect "reflect" diff --git a/pkg/reachable/reachable_test.go b/pkg/reachable/reachable_test.go index 7c161f96f0a..74336752394 100644 --- a/pkg/reachable/reachable_test.go +++ b/pkg/reachable/reachable_test.go @@ -39,7 +39,7 @@ func TestReachableCheck(t *testing.T) { expect func(t *testing.T, err error) }{ { - name: "check address succeeded", + name: "check address", address: ":3000", network: "tcp", timeout: 1 * time.Second, diff --git a/pkg/rpc/base/base.pb.go b/pkg/rpc/base/base.pb.go index 20a3a5c710d..45f5cfd8bcb 100644 --- a/pkg/rpc/base/base.pb.go +++ b/pkg/rpc/base/base.pb.go @@ -22,11 +22,12 @@ package base import ( + reflect "reflect" + sync "sync" + _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const ( @@ -64,7 +65,7 @@ const ( Code_SchedNeedBackSource Code = 5001 // client should try to download from source Code_SchedPeerGone Code = 5002 // client should disconnect from scheduler Code_SchedPeerNotFound Code = 5004 // peer not found in scheduler - Code_SchedPeerPieceResultReportFail Code = 5005 // report piece + Code_SchedPeerPieceResultReportFail Code = 5005 // report piece failed Code_SchedTaskStatusError Code = 5006 // task status is fail // cdnsystem response error 6000-6999 Code_CDNError Code = 6000 diff --git a/pkg/rpc/base/common/common.go b/pkg/rpc/base/common/common.go index 73b05553556..3f08599faf8 100644 --- a/pkg/rpc/base/common/common.go +++ b/pkg/rpc/base/common/common.go @@ -26,7 +26,7 @@ import ( ) var EndOfPiece = int32(1) << 30 -var ZeroOfPiece = int32(-1) +var BeginOfPiece = int32(-1) // CdnSuffix represents cdn peer id suffix var CdnSuffix = "_CDN" diff --git a/pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go b/pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go new file mode 100644 index 00000000000..4d170f6ca63 --- /dev/null +++ b/pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go @@ -0,0 +1,420 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/cdnsystem/cdnsystem_grpc.pb.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + base "d7y.io/dragonfly/v2/pkg/rpc/base" + cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" +) + +// MockSeederClient is a mock of SeederClient interface. +type MockSeederClient struct { + ctrl *gomock.Controller + recorder *MockSeederClientMockRecorder +} + +// MockSeederClientMockRecorder is the mock recorder for MockSeederClient. +type MockSeederClientMockRecorder struct { + mock *MockSeederClient +} + +// NewMockSeederClient creates a new mock instance. +func NewMockSeederClient(ctrl *gomock.Controller) *MockSeederClient { + mock := &MockSeederClient{ctrl: ctrl} + mock.recorder = &MockSeederClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSeederClient) EXPECT() *MockSeederClientMockRecorder { + return m.recorder +} + +// GetPieceTasks mocks base method. +func (m *MockSeederClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockSeederClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).GetPieceTasks), varargs...) +} + +// ObtainSeeds mocks base method. +func (m *MockSeederClient) ObtainSeeds(ctx context.Context, in *cdnsystem.SeedRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_ObtainSeedsClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ObtainSeeds", varargs...) + ret0, _ := ret[0].(cdnsystem.Seeder_ObtainSeedsClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ObtainSeeds indicates an expected call of ObtainSeeds. +func (mr *MockSeederClientMockRecorder) ObtainSeeds(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederClient)(nil).ObtainSeeds), varargs...) +} + +// MockSeeder_ObtainSeedsClient is a mock of Seeder_ObtainSeedsClient interface. +type MockSeeder_ObtainSeedsClient struct { + ctrl *gomock.Controller + recorder *MockSeeder_ObtainSeedsClientMockRecorder +} + +// MockSeeder_ObtainSeedsClientMockRecorder is the mock recorder for MockSeeder_ObtainSeedsClient. +type MockSeeder_ObtainSeedsClientMockRecorder struct { + mock *MockSeeder_ObtainSeedsClient +} + +// NewMockSeeder_ObtainSeedsClient creates a new mock instance. +func NewMockSeeder_ObtainSeedsClient(ctrl *gomock.Controller) *MockSeeder_ObtainSeedsClient { + mock := &MockSeeder_ObtainSeedsClient{ctrl: ctrl} + mock.recorder = &MockSeeder_ObtainSeedsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSeeder_ObtainSeedsClient) EXPECT() *MockSeeder_ObtainSeedsClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockSeeder_ObtainSeedsClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockSeeder_ObtainSeedsClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockSeeder_ObtainSeedsClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockSeeder_ObtainSeedsClient) Recv() (*cdnsystem.PieceSeed, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*cdnsystem.PieceSeed) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockSeeder_ObtainSeedsClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).RecvMsg), m) +} + +// SendMsg mocks base method. +func (m_2 *MockSeeder_ObtainSeedsClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockSeeder_ObtainSeedsClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Trailer)) +} + +// MockSeederServer is a mock of SeederServer interface. +type MockSeederServer struct { + ctrl *gomock.Controller + recorder *MockSeederServerMockRecorder +} + +// MockSeederServerMockRecorder is the mock recorder for MockSeederServer. +type MockSeederServerMockRecorder struct { + mock *MockSeederServer +} + +// NewMockSeederServer creates a new mock instance. +func NewMockSeederServer(ctrl *gomock.Controller) *MockSeederServer { + mock := &MockSeederServer{ctrl: ctrl} + mock.recorder = &MockSeederServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSeederServer) EXPECT() *MockSeederServerMockRecorder { + return m.recorder +} + +// GetPieceTasks mocks base method. +func (m *MockSeederServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockSeederServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).GetPieceTasks), arg0, arg1) +} + +// ObtainSeeds mocks base method. +func (m *MockSeederServer) ObtainSeeds(arg0 *cdnsystem.SeedRequest, arg1 cdnsystem.Seeder_ObtainSeedsServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ObtainSeeds", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ObtainSeeds indicates an expected call of ObtainSeeds. +func (mr *MockSeederServerMockRecorder) ObtainSeeds(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederServer)(nil).ObtainSeeds), arg0, arg1) +} + +// mustEmbedUnimplementedSeederServer mocks base method. +func (m *MockSeederServer) mustEmbedUnimplementedSeederServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedSeederServer") +} + +// mustEmbedUnimplementedSeederServer indicates an expected call of mustEmbedUnimplementedSeederServer. +func (mr *MockSeederServerMockRecorder) mustEmbedUnimplementedSeederServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedSeederServer", reflect.TypeOf((*MockSeederServer)(nil).mustEmbedUnimplementedSeederServer)) +} + +// MockUnsafeSeederServer is a mock of UnsafeSeederServer interface. +type MockUnsafeSeederServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeSeederServerMockRecorder +} + +// MockUnsafeSeederServerMockRecorder is the mock recorder for MockUnsafeSeederServer. +type MockUnsafeSeederServerMockRecorder struct { + mock *MockUnsafeSeederServer +} + +// NewMockUnsafeSeederServer creates a new mock instance. +func NewMockUnsafeSeederServer(ctrl *gomock.Controller) *MockUnsafeSeederServer { + mock := &MockUnsafeSeederServer{ctrl: ctrl} + mock.recorder = &MockUnsafeSeederServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsafeSeederServer) EXPECT() *MockUnsafeSeederServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedSeederServer mocks base method. +func (m *MockUnsafeSeederServer) mustEmbedUnimplementedSeederServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedSeederServer") +} + +// mustEmbedUnimplementedSeederServer indicates an expected call of mustEmbedUnimplementedSeederServer. +func (mr *MockUnsafeSeederServerMockRecorder) mustEmbedUnimplementedSeederServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedSeederServer", reflect.TypeOf((*MockUnsafeSeederServer)(nil).mustEmbedUnimplementedSeederServer)) +} + +// MockSeeder_ObtainSeedsServer is a mock of Seeder_ObtainSeedsServer interface. +type MockSeeder_ObtainSeedsServer struct { + ctrl *gomock.Controller + recorder *MockSeeder_ObtainSeedsServerMockRecorder +} + +// MockSeeder_ObtainSeedsServerMockRecorder is the mock recorder for MockSeeder_ObtainSeedsServer. +type MockSeeder_ObtainSeedsServerMockRecorder struct { + mock *MockSeeder_ObtainSeedsServer +} + +// NewMockSeeder_ObtainSeedsServer creates a new mock instance. +func NewMockSeeder_ObtainSeedsServer(ctrl *gomock.Controller) *MockSeeder_ObtainSeedsServer { + mock := &MockSeeder_ObtainSeedsServer{ctrl: ctrl} + mock.recorder = &MockSeeder_ObtainSeedsServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSeeder_ObtainSeedsServer) EXPECT() *MockSeeder_ObtainSeedsServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockSeeder_ObtainSeedsServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Context)) +} + +// RecvMsg mocks base method. +func (m_2 *MockSeeder_ObtainSeedsServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockSeeder_ObtainSeedsServer) Send(arg0 *cdnsystem.PieceSeed) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockSeeder_ObtainSeedsServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockSeeder_ObtainSeedsServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockSeeder_ObtainSeedsServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockSeeder_ObtainSeedsServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetTrailer), arg0) +} diff --git a/pkg/rpc/cdnsystem/server/mocks/server_mock.go b/pkg/rpc/cdnsystem/server/mocks/server_mock.go new file mode 100644 index 00000000000..aef82933959 --- /dev/null +++ b/pkg/rpc/cdnsystem/server/mocks/server_mock.go @@ -0,0 +1,66 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/cdnsystem/server/server.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + base "d7y.io/dragonfly/v2/pkg/rpc/base" + cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" + gomock "github.com/golang/mock/gomock" +) + +// MockSeederServer is a mock of SeederServer interface. +type MockSeederServer struct { + ctrl *gomock.Controller + recorder *MockSeederServerMockRecorder +} + +// MockSeederServerMockRecorder is the mock recorder for MockSeederServer. +type MockSeederServerMockRecorder struct { + mock *MockSeederServer +} + +// NewMockSeederServer creates a new mock instance. +func NewMockSeederServer(ctrl *gomock.Controller) *MockSeederServer { + mock := &MockSeederServer{ctrl: ctrl} + mock.recorder = &MockSeederServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSeederServer) EXPECT() *MockSeederServerMockRecorder { + return m.recorder +} + +// GetPieceTasks mocks base method. +func (m *MockSeederServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockSeederServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).GetPieceTasks), arg0, arg1) +} + +// ObtainSeeds mocks base method. +func (m *MockSeederServer) ObtainSeeds(arg0 context.Context, arg1 *cdnsystem.SeedRequest, arg2 chan<- *cdnsystem.PieceSeed) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ObtainSeeds", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ObtainSeeds indicates an expected call of ObtainSeeds. +func (mr *MockSeederServerMockRecorder) ObtainSeeds(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederServer)(nil).ObtainSeeds), arg0, arg1, arg2) +} diff --git a/pkg/rpc/dfdaemon/cdnsystem/mocks/client_mock.go b/pkg/rpc/dfdaemon/cdnsystem/mocks/client_mock.go new file mode 100644 index 00000000000..654f6f35ddd --- /dev/null +++ b/pkg/rpc/dfdaemon/cdnsystem/mocks/client_mock.go @@ -0,0 +1,106 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/cdnsystem/client/client.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + dfnet "d7y.io/dragonfly/v2/internal/dfnet" + base "d7y.io/dragonfly/v2/pkg/rpc/base" + cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" + client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockCdnClient is a mock of CdnClient interface. +type MockCdnClient struct { + ctrl *gomock.Controller + recorder *MockCdnClientMockRecorder +} + +// MockCdnClientMockRecorder is the mock recorder for MockCdnClient. +type MockCdnClientMockRecorder struct { + mock *MockCdnClient +} + +// NewMockCdnClient creates a new mock instance. +func NewMockCdnClient(ctrl *gomock.Controller) *MockCdnClient { + mock := &MockCdnClient{ctrl: ctrl} + mock.recorder = &MockCdnClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCdnClient) EXPECT() *MockCdnClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockCdnClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockCdnClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCdnClient)(nil).Close)) +} + +// GetPieceTasks mocks base method. +func (m *MockCdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, addr, req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockCdnClientMockRecorder) GetPieceTasks(ctx, addr, req interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, addr, req}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockCdnClient)(nil).GetPieceTasks), varargs...) +} + +// ObtainSeeds mocks base method. +func (m *MockCdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, sr} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ObtainSeeds", varargs...) + ret0, _ := ret[0].(*client.PieceSeedStream) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ObtainSeeds indicates an expected call of ObtainSeeds. +func (mr *MockCdnClientMockRecorder) ObtainSeeds(ctx, sr interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, sr}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockCdnClient)(nil).ObtainSeeds), varargs...) +} + +// UpdateState mocks base method. +func (m *MockCdnClient) UpdateState(addrs []dfnet.NetAddr) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateState", addrs) +} + +// UpdateState indicates an expected call of UpdateState. +func (mr *MockCdnClientMockRecorder) UpdateState(addrs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateState", reflect.TypeOf((*MockCdnClient)(nil).UpdateState), addrs) +} diff --git a/pkg/rpc/dfdaemon/client/mocks/client_mock.go b/pkg/rpc/dfdaemon/client/mocks/client_mock.go new file mode 100644 index 00000000000..c100e9c051c --- /dev/null +++ b/pkg/rpc/dfdaemon/client/mocks/client_mock.go @@ -0,0 +1,113 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/dfdaemon/client/client.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + dfnet "d7y.io/dragonfly/v2/internal/dfnet" + base "d7y.io/dragonfly/v2/pkg/rpc/base" + dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" + client "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockDaemonClient is a mock of DaemonClient interface. +type MockDaemonClient struct { + ctrl *gomock.Controller + recorder *MockDaemonClientMockRecorder +} + +// MockDaemonClientMockRecorder is the mock recorder for MockDaemonClient. +type MockDaemonClientMockRecorder struct { + mock *MockDaemonClient +} + +// NewMockDaemonClient creates a new mock instance. +func NewMockDaemonClient(ctrl *gomock.Controller) *MockDaemonClient { + mock := &MockDaemonClient{ctrl: ctrl} + mock.recorder = &MockDaemonClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemonClient) EXPECT() *MockDaemonClientMockRecorder { + return m.recorder +} + +// CheckHealth mocks base method. +func (m *MockDaemonClient) CheckHealth(ctx context.Context, target dfnet.NetAddr, opts ...grpc.CallOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, target} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CheckHealth", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// CheckHealth indicates an expected call of CheckHealth. +func (mr *MockDaemonClientMockRecorder) CheckHealth(ctx, target interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, target}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonClient)(nil).CheckHealth), varargs...) +} + +// Close mocks base method. +func (m *MockDaemonClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockDaemonClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDaemonClient)(nil).Close)) +} + +// Download mocks base method. +func (m *MockDaemonClient) Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*client.DownResultStream, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Download", varargs...) + ret0, _ := ret[0].(*client.DownResultStream) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Download indicates an expected call of Download. +func (mr *MockDaemonClientMockRecorder) Download(ctx, req interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, req}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonClient)(nil).Download), varargs...) +} + +// GetPieceTasks mocks base method. +func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, addr, ptr} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, addr, ptr interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, addr, ptr}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).GetPieceTasks), varargs...) +} diff --git a/pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go b/pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go new file mode 100644 index 00000000000..7815e88f14e --- /dev/null +++ b/pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go @@ -0,0 +1,456 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/dfdaemon/dfdaemon_grpc.pb.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + base "d7y.io/dragonfly/v2/pkg/rpc/base" + dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// MockDaemonClient is a mock of DaemonClient interface. +type MockDaemonClient struct { + ctrl *gomock.Controller + recorder *MockDaemonClientMockRecorder +} + +// MockDaemonClientMockRecorder is the mock recorder for MockDaemonClient. +type MockDaemonClientMockRecorder struct { + mock *MockDaemonClient +} + +// NewMockDaemonClient creates a new mock instance. +func NewMockDaemonClient(ctrl *gomock.Controller) *MockDaemonClient { + mock := &MockDaemonClient{ctrl: ctrl} + mock.recorder = &MockDaemonClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemonClient) EXPECT() *MockDaemonClientMockRecorder { + return m.recorder +} + +// CheckHealth mocks base method. +func (m *MockDaemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CheckHealth", varargs...) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckHealth indicates an expected call of CheckHealth. +func (mr *MockDaemonClientMockRecorder) CheckHealth(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonClient)(nil).CheckHealth), varargs...) +} + +// Download mocks base method. +func (m *MockDaemonClient) Download(ctx context.Context, in *dfdaemon.DownRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_DownloadClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Download", varargs...) + ret0, _ := ret[0].(dfdaemon.Daemon_DownloadClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Download indicates an expected call of Download. +func (mr *MockDaemonClientMockRecorder) Download(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonClient)(nil).Download), varargs...) +} + +// GetPieceTasks mocks base method. +func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).GetPieceTasks), varargs...) +} + +// MockDaemon_DownloadClient is a mock of Daemon_DownloadClient interface. +type MockDaemon_DownloadClient struct { + ctrl *gomock.Controller + recorder *MockDaemon_DownloadClientMockRecorder +} + +// MockDaemon_DownloadClientMockRecorder is the mock recorder for MockDaemon_DownloadClient. +type MockDaemon_DownloadClientMockRecorder struct { + mock *MockDaemon_DownloadClient +} + +// NewMockDaemon_DownloadClient creates a new mock instance. +func NewMockDaemon_DownloadClient(ctrl *gomock.Controller) *MockDaemon_DownloadClient { + mock := &MockDaemon_DownloadClient{ctrl: ctrl} + mock.recorder = &MockDaemon_DownloadClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemon_DownloadClient) EXPECT() *MockDaemon_DownloadClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockDaemon_DownloadClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockDaemon_DownloadClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockDaemon_DownloadClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockDaemon_DownloadClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockDaemon_DownloadClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockDaemon_DownloadClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockDaemon_DownloadClient) Recv() (*dfdaemon.DownResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*dfdaemon.DownResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockDaemon_DownloadClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockDaemon_DownloadClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockDaemon_DownloadClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).RecvMsg), m) +} + +// SendMsg mocks base method. +func (m_2 *MockDaemon_DownloadClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockDaemon_DownloadClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockDaemon_DownloadClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockDaemon_DownloadClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Trailer)) +} + +// MockDaemonServer is a mock of DaemonServer interface. +type MockDaemonServer struct { + ctrl *gomock.Controller + recorder *MockDaemonServerMockRecorder +} + +// MockDaemonServerMockRecorder is the mock recorder for MockDaemonServer. +type MockDaemonServerMockRecorder struct { + mock *MockDaemonServer +} + +// NewMockDaemonServer creates a new mock instance. +func NewMockDaemonServer(ctrl *gomock.Controller) *MockDaemonServer { + mock := &MockDaemonServer{ctrl: ctrl} + mock.recorder = &MockDaemonServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemonServer) EXPECT() *MockDaemonServerMockRecorder { + return m.recorder +} + +// CheckHealth mocks base method. +func (m *MockDaemonServer) CheckHealth(arg0 context.Context, arg1 *emptypb.Empty) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckHealth", arg0, arg1) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckHealth indicates an expected call of CheckHealth. +func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonServer)(nil).CheckHealth), arg0, arg1) +} + +// Download mocks base method. +func (m *MockDaemonServer) Download(arg0 *dfdaemon.DownRequest, arg1 dfdaemon.Daemon_DownloadServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Download", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Download indicates an expected call of Download. +func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonServer)(nil).Download), arg0, arg1) +} + +// GetPieceTasks mocks base method. +func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).GetPieceTasks), arg0, arg1) +} + +// mustEmbedUnimplementedDaemonServer mocks base method. +func (m *MockDaemonServer) mustEmbedUnimplementedDaemonServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedDaemonServer") +} + +// mustEmbedUnimplementedDaemonServer indicates an expected call of mustEmbedUnimplementedDaemonServer. +func (mr *MockDaemonServerMockRecorder) mustEmbedUnimplementedDaemonServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedDaemonServer", reflect.TypeOf((*MockDaemonServer)(nil).mustEmbedUnimplementedDaemonServer)) +} + +// MockUnsafeDaemonServer is a mock of UnsafeDaemonServer interface. +type MockUnsafeDaemonServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeDaemonServerMockRecorder +} + +// MockUnsafeDaemonServerMockRecorder is the mock recorder for MockUnsafeDaemonServer. +type MockUnsafeDaemonServerMockRecorder struct { + mock *MockUnsafeDaemonServer +} + +// NewMockUnsafeDaemonServer creates a new mock instance. +func NewMockUnsafeDaemonServer(ctrl *gomock.Controller) *MockUnsafeDaemonServer { + mock := &MockUnsafeDaemonServer{ctrl: ctrl} + mock.recorder = &MockUnsafeDaemonServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsafeDaemonServer) EXPECT() *MockUnsafeDaemonServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedDaemonServer mocks base method. +func (m *MockUnsafeDaemonServer) mustEmbedUnimplementedDaemonServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedDaemonServer") +} + +// mustEmbedUnimplementedDaemonServer indicates an expected call of mustEmbedUnimplementedDaemonServer. +func (mr *MockUnsafeDaemonServerMockRecorder) mustEmbedUnimplementedDaemonServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedDaemonServer", reflect.TypeOf((*MockUnsafeDaemonServer)(nil).mustEmbedUnimplementedDaemonServer)) +} + +// MockDaemon_DownloadServer is a mock of Daemon_DownloadServer interface. +type MockDaemon_DownloadServer struct { + ctrl *gomock.Controller + recorder *MockDaemon_DownloadServerMockRecorder +} + +// MockDaemon_DownloadServerMockRecorder is the mock recorder for MockDaemon_DownloadServer. +type MockDaemon_DownloadServerMockRecorder struct { + mock *MockDaemon_DownloadServer +} + +// NewMockDaemon_DownloadServer creates a new mock instance. +func NewMockDaemon_DownloadServer(ctrl *gomock.Controller) *MockDaemon_DownloadServer { + mock := &MockDaemon_DownloadServer{ctrl: ctrl} + mock.recorder = &MockDaemon_DownloadServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemon_DownloadServer) EXPECT() *MockDaemon_DownloadServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockDaemon_DownloadServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockDaemon_DownloadServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Context)) +} + +// RecvMsg mocks base method. +func (m_2 *MockDaemon_DownloadServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockDaemon_DownloadServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockDaemon_DownloadServer) Send(arg0 *dfdaemon.DownResult) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockDaemon_DownloadServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockDaemon_DownloadServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockDaemon_DownloadServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockDaemon_DownloadServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockDaemon_DownloadServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockDaemon_DownloadServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockDaemon_DownloadServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockDaemon_DownloadServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockDaemon_DownloadServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetTrailer), arg0) +} diff --git a/pkg/rpc/dfdaemon/server/mocks/server_mock.go b/pkg/rpc/dfdaemon/server/mocks/server_mock.go new file mode 100644 index 00000000000..1392961b505 --- /dev/null +++ b/pkg/rpc/dfdaemon/server/mocks/server_mock.go @@ -0,0 +1,80 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/dfdaemon/server/server.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + base "d7y.io/dragonfly/v2/pkg/rpc/base" + dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" + gomock "github.com/golang/mock/gomock" +) + +// MockDaemonServer is a mock of DaemonServer interface. +type MockDaemonServer struct { + ctrl *gomock.Controller + recorder *MockDaemonServerMockRecorder +} + +// MockDaemonServerMockRecorder is the mock recorder for MockDaemonServer. +type MockDaemonServerMockRecorder struct { + mock *MockDaemonServer +} + +// NewMockDaemonServer creates a new mock instance. +func NewMockDaemonServer(ctrl *gomock.Controller) *MockDaemonServer { + mock := &MockDaemonServer{ctrl: ctrl} + mock.recorder = &MockDaemonServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDaemonServer) EXPECT() *MockDaemonServerMockRecorder { + return m.recorder +} + +// CheckHealth mocks base method. +func (m *MockDaemonServer) CheckHealth(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckHealth", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CheckHealth indicates an expected call of CheckHealth. +func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonServer)(nil).CheckHealth), arg0) +} + +// Download mocks base method. +func (m *MockDaemonServer) Download(arg0 context.Context, arg1 *dfdaemon.DownRequest, arg2 chan<- *dfdaemon.DownResult) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Download", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Download indicates an expected call of Download. +func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonServer)(nil).Download), arg0, arg1, arg2) +} + +// GetPieceTasks mocks base method. +func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).GetPieceTasks), arg0, arg1) +} diff --git a/scheduler/config/mocks/manager_client_mock.go b/pkg/rpc/manager/client/mocks/client_mock.go similarity index 98% rename from scheduler/config/mocks/manager_client_mock.go rename to pkg/rpc/manager/client/mocks/client_mock.go index 672b783953e..8b204e265fc 100644 --- a/scheduler/config/mocks/manager_client_mock.go +++ b/pkg/rpc/manager/client/mocks/client_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: ../../pkg/rpc/manager/client/client.go +// Source: pkg/rpc/manager/client/client.go // Package mocks is a generated GoMock package. package mocks diff --git a/pkg/rpc/manager/mocks/manager_mock.go b/pkg/rpc/manager/mocks/manager_mock.go new file mode 100644 index 00000000000..f3c03793e9f --- /dev/null +++ b/pkg/rpc/manager/mocks/manager_mock.go @@ -0,0 +1,589 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/manager/manager_grpc.pb.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + manager "d7y.io/dragonfly/v2/pkg/rpc/manager" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// MockManagerClient is a mock of ManagerClient interface. +type MockManagerClient struct { + ctrl *gomock.Controller + recorder *MockManagerClientMockRecorder +} + +// MockManagerClientMockRecorder is the mock recorder for MockManagerClient. +type MockManagerClientMockRecorder struct { + mock *MockManagerClient +} + +// NewMockManagerClient creates a new mock instance. +func NewMockManagerClient(ctrl *gomock.Controller) *MockManagerClient { + mock := &MockManagerClient{ctrl: ctrl} + mock.recorder = &MockManagerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder { + return m.recorder +} + +// GetCDN mocks base method. +func (m *MockManagerClient) GetCDN(ctx context.Context, in *manager.GetCDNRequest, opts ...grpc.CallOption) (*manager.CDN, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetCDN", varargs...) + ret0, _ := ret[0].(*manager.CDN) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCDN indicates an expected call of GetCDN. +func (mr *MockManagerClientMockRecorder) GetCDN(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerClient)(nil).GetCDN), varargs...) +} + +// GetScheduler mocks base method. +func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetScheduler", varargs...) + ret0, _ := ret[0].(*manager.Scheduler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetScheduler indicates an expected call of GetScheduler. +func (mr *MockManagerClientMockRecorder) GetScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerClient)(nil).GetScheduler), varargs...) +} + +// KeepAlive mocks base method. +func (m *MockManagerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (manager.Manager_KeepAliveClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "KeepAlive", varargs...) + ret0, _ := ret[0].(manager.Manager_KeepAliveClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// KeepAlive indicates an expected call of KeepAlive. +func (mr *MockManagerClientMockRecorder) KeepAlive(ctx interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerClient)(nil).KeepAlive), varargs...) +} + +// ListSchedulers mocks base method. +func (m *MockManagerClient) ListSchedulers(ctx context.Context, in *manager.ListSchedulersRequest, opts ...grpc.CallOption) (*manager.ListSchedulersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListSchedulers", varargs...) + ret0, _ := ret[0].(*manager.ListSchedulersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSchedulers indicates an expected call of ListSchedulers. +func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...) +} + +// UpdateCDN mocks base method. +func (m *MockManagerClient) UpdateCDN(ctx context.Context, in *manager.UpdateCDNRequest, opts ...grpc.CallOption) (*manager.CDN, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateCDN", varargs...) + ret0, _ := ret[0].(*manager.CDN) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateCDN indicates an expected call of UpdateCDN. +func (mr *MockManagerClientMockRecorder) UpdateCDN(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerClient)(nil).UpdateCDN), varargs...) +} + +// UpdateScheduler mocks base method. +func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateScheduler", varargs...) + ret0, _ := ret[0].(*manager.Scheduler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateScheduler indicates an expected call of UpdateScheduler. +func (mr *MockManagerClientMockRecorder) UpdateScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerClient)(nil).UpdateScheduler), varargs...) +} + +// MockManager_KeepAliveClient is a mock of Manager_KeepAliveClient interface. +type MockManager_KeepAliveClient struct { + ctrl *gomock.Controller + recorder *MockManager_KeepAliveClientMockRecorder +} + +// MockManager_KeepAliveClientMockRecorder is the mock recorder for MockManager_KeepAliveClient. +type MockManager_KeepAliveClientMockRecorder struct { + mock *MockManager_KeepAliveClient +} + +// NewMockManager_KeepAliveClient creates a new mock instance. +func NewMockManager_KeepAliveClient(ctrl *gomock.Controller) *MockManager_KeepAliveClient { + mock := &MockManager_KeepAliveClient{ctrl: ctrl} + mock.recorder = &MockManager_KeepAliveClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager_KeepAliveClient) EXPECT() *MockManager_KeepAliveClientMockRecorder { + return m.recorder +} + +// CloseAndRecv mocks base method. +func (m *MockManager_KeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseAndRecv") + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CloseAndRecv indicates an expected call of CloseAndRecv. +func (mr *MockManager_KeepAliveClientMockRecorder) CloseAndRecv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseAndRecv)) +} + +// CloseSend mocks base method. +func (m *MockManager_KeepAliveClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockManager_KeepAliveClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockManager_KeepAliveClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockManager_KeepAliveClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockManager_KeepAliveClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockManager_KeepAliveClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Header)) +} + +// RecvMsg mocks base method. +func (m_2 *MockManager_KeepAliveClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockManager_KeepAliveClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockManager_KeepAliveClient) Send(arg0 *manager.KeepAliveRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockManager_KeepAliveClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Send), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockManager_KeepAliveClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockManager_KeepAliveClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockManager_KeepAliveClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockManager_KeepAliveClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Trailer)) +} + +// MockManagerServer is a mock of ManagerServer interface. +type MockManagerServer struct { + ctrl *gomock.Controller + recorder *MockManagerServerMockRecorder +} + +// MockManagerServerMockRecorder is the mock recorder for MockManagerServer. +type MockManagerServerMockRecorder struct { + mock *MockManagerServer +} + +// NewMockManagerServer creates a new mock instance. +func NewMockManagerServer(ctrl *gomock.Controller) *MockManagerServer { + mock := &MockManagerServer{ctrl: ctrl} + mock.recorder = &MockManagerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder { + return m.recorder +} + +// GetCDN mocks base method. +func (m *MockManagerServer) GetCDN(arg0 context.Context, arg1 *manager.GetCDNRequest) (*manager.CDN, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCDN", arg0, arg1) + ret0, _ := ret[0].(*manager.CDN) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCDN indicates an expected call of GetCDN. +func (mr *MockManagerServerMockRecorder) GetCDN(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerServer)(nil).GetCDN), arg0, arg1) +} + +// GetScheduler mocks base method. +func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetScheduler", arg0, arg1) + ret0, _ := ret[0].(*manager.Scheduler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetScheduler indicates an expected call of GetScheduler. +func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerServer)(nil).GetScheduler), arg0, arg1) +} + +// KeepAlive mocks base method. +func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "KeepAlive", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// KeepAlive indicates an expected call of KeepAlive. +func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerServer)(nil).KeepAlive), arg0) +} + +// ListSchedulers mocks base method. +func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSchedulers", arg0, arg1) + ret0, _ := ret[0].(*manager.ListSchedulersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSchedulers indicates an expected call of ListSchedulers. +func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1) +} + +// UpdateCDN mocks base method. +func (m *MockManagerServer) UpdateCDN(arg0 context.Context, arg1 *manager.UpdateCDNRequest) (*manager.CDN, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCDN", arg0, arg1) + ret0, _ := ret[0].(*manager.CDN) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateCDN indicates an expected call of UpdateCDN. +func (mr *MockManagerServerMockRecorder) UpdateCDN(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerServer)(nil).UpdateCDN), arg0, arg1) +} + +// UpdateScheduler mocks base method. +func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateScheduler", arg0, arg1) + ret0, _ := ret[0].(*manager.Scheduler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateScheduler indicates an expected call of UpdateScheduler. +func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerServer)(nil).UpdateScheduler), arg0, arg1) +} + +// mustEmbedUnimplementedManagerServer mocks base method. +func (m *MockManagerServer) mustEmbedUnimplementedManagerServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedManagerServer") +} + +// mustEmbedUnimplementedManagerServer indicates an expected call of mustEmbedUnimplementedManagerServer. +func (mr *MockManagerServerMockRecorder) mustEmbedUnimplementedManagerServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedManagerServer", reflect.TypeOf((*MockManagerServer)(nil).mustEmbedUnimplementedManagerServer)) +} + +// MockUnsafeManagerServer is a mock of UnsafeManagerServer interface. +type MockUnsafeManagerServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeManagerServerMockRecorder +} + +// MockUnsafeManagerServerMockRecorder is the mock recorder for MockUnsafeManagerServer. +type MockUnsafeManagerServerMockRecorder struct { + mock *MockUnsafeManagerServer +} + +// NewMockUnsafeManagerServer creates a new mock instance. +func NewMockUnsafeManagerServer(ctrl *gomock.Controller) *MockUnsafeManagerServer { + mock := &MockUnsafeManagerServer{ctrl: ctrl} + mock.recorder = &MockUnsafeManagerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsafeManagerServer) EXPECT() *MockUnsafeManagerServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedManagerServer mocks base method. +func (m *MockUnsafeManagerServer) mustEmbedUnimplementedManagerServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedManagerServer") +} + +// mustEmbedUnimplementedManagerServer indicates an expected call of mustEmbedUnimplementedManagerServer. +func (mr *MockUnsafeManagerServerMockRecorder) mustEmbedUnimplementedManagerServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedManagerServer", reflect.TypeOf((*MockUnsafeManagerServer)(nil).mustEmbedUnimplementedManagerServer)) +} + +// MockManager_KeepAliveServer is a mock of Manager_KeepAliveServer interface. +type MockManager_KeepAliveServer struct { + ctrl *gomock.Controller + recorder *MockManager_KeepAliveServerMockRecorder +} + +// MockManager_KeepAliveServerMockRecorder is the mock recorder for MockManager_KeepAliveServer. +type MockManager_KeepAliveServerMockRecorder struct { + mock *MockManager_KeepAliveServer +} + +// NewMockManager_KeepAliveServer creates a new mock instance. +func NewMockManager_KeepAliveServer(ctrl *gomock.Controller) *MockManager_KeepAliveServer { + mock := &MockManager_KeepAliveServer{ctrl: ctrl} + mock.recorder = &MockManager_KeepAliveServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager_KeepAliveServer) EXPECT() *MockManager_KeepAliveServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockManager_KeepAliveServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockManager_KeepAliveServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Context)) +} + +// Recv mocks base method. +func (m *MockManager_KeepAliveServer) Recv() (*manager.KeepAliveRequest, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*manager.KeepAliveRequest) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockManager_KeepAliveServerMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockManager_KeepAliveServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockManager_KeepAliveServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).RecvMsg), m) +} + +// SendAndClose mocks base method. +func (m *MockManager_KeepAliveServer) SendAndClose(arg0 *emptypb.Empty) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendAndClose", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendAndClose indicates an expected call of SendAndClose. +func (mr *MockManager_KeepAliveServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendAndClose), arg0) +} + +// SendHeader mocks base method. +func (m *MockManager_KeepAliveServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockManager_KeepAliveServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockManager_KeepAliveServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockManager_KeepAliveServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockManager_KeepAliveServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockManager_KeepAliveServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockManager_KeepAliveServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockManager_KeepAliveServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetTrailer), arg0) +} diff --git a/pkg/rpc/manager/server/mocks/server_mock.go b/pkg/rpc/manager/server/mocks/server_mock.go new file mode 100644 index 00000000000..013fd619ebc --- /dev/null +++ b/pkg/rpc/manager/server/mocks/server_mock.go @@ -0,0 +1,125 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/manager/server/server.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + manager "d7y.io/dragonfly/v2/pkg/rpc/manager" + gomock "github.com/golang/mock/gomock" +) + +// MockManagerServer is a mock of ManagerServer interface. +type MockManagerServer struct { + ctrl *gomock.Controller + recorder *MockManagerServerMockRecorder +} + +// MockManagerServerMockRecorder is the mock recorder for MockManagerServer. +type MockManagerServerMockRecorder struct { + mock *MockManagerServer +} + +// NewMockManagerServer creates a new mock instance. +func NewMockManagerServer(ctrl *gomock.Controller) *MockManagerServer { + mock := &MockManagerServer{ctrl: ctrl} + mock.recorder = &MockManagerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder { + return m.recorder +} + +// GetCDN mocks base method. +func (m *MockManagerServer) GetCDN(arg0 context.Context, arg1 *manager.GetCDNRequest) (*manager.CDN, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCDN", arg0, arg1) + ret0, _ := ret[0].(*manager.CDN) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCDN indicates an expected call of GetCDN. +func (mr *MockManagerServerMockRecorder) GetCDN(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDN", reflect.TypeOf((*MockManagerServer)(nil).GetCDN), arg0, arg1) +} + +// GetScheduler mocks base method. +func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetScheduler", arg0, arg1) + ret0, _ := ret[0].(*manager.Scheduler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetScheduler indicates an expected call of GetScheduler. +func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerServer)(nil).GetScheduler), arg0, arg1) +} + +// KeepAlive mocks base method. +func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "KeepAlive", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// KeepAlive indicates an expected call of KeepAlive. +func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerServer)(nil).KeepAlive), arg0) +} + +// ListSchedulers mocks base method. +func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSchedulers", arg0, arg1) + ret0, _ := ret[0].(*manager.ListSchedulersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSchedulers indicates an expected call of ListSchedulers. +func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1) +} + +// UpdateCDN mocks base method. +func (m *MockManagerServer) UpdateCDN(arg0 context.Context, arg1 *manager.UpdateCDNRequest) (*manager.CDN, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCDN", arg0, arg1) + ret0, _ := ret[0].(*manager.CDN) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateCDN indicates an expected call of UpdateCDN. +func (mr *MockManagerServerMockRecorder) UpdateCDN(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCDN", reflect.TypeOf((*MockManagerServer)(nil).UpdateCDN), arg0, arg1) +} + +// UpdateScheduler mocks base method. +func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateScheduler", arg0, arg1) + ret0, _ := ret[0].(*manager.Scheduler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateScheduler indicates an expected call of UpdateScheduler. +func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerServer)(nil).UpdateScheduler), arg0, arg1) +} diff --git a/pkg/rpc/scheduler/client/mocks/client_mock.go b/pkg/rpc/scheduler/client/mocks/client_mock.go new file mode 100644 index 00000000000..c1cbc535449 --- /dev/null +++ b/pkg/rpc/scheduler/client/mocks/client_mock.go @@ -0,0 +1,143 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/scheduler/client/client.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + dfnet "d7y.io/dragonfly/v2/internal/dfnet" + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + client "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockSchedulerClient is a mock of SchedulerClient interface. +type MockSchedulerClient struct { + ctrl *gomock.Controller + recorder *MockSchedulerClientMockRecorder +} + +// MockSchedulerClientMockRecorder is the mock recorder for MockSchedulerClient. +type MockSchedulerClientMockRecorder struct { + mock *MockSchedulerClient +} + +// NewMockSchedulerClient creates a new mock instance. +func NewMockSchedulerClient(ctrl *gomock.Controller) *MockSchedulerClient { + mock := &MockSchedulerClient{ctrl: ctrl} + mock.recorder = &MockSchedulerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSchedulerClient) EXPECT() *MockSchedulerClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockSchedulerClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockSchedulerClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockSchedulerClient)(nil).Close)) +} + +// LeaveTask mocks base method. +func (m *MockSchedulerClient) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget, arg2 ...grpc.CallOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "LeaveTask", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// LeaveTask indicates an expected call of LeaveTask. +func (mr *MockSchedulerClientMockRecorder) LeaveTask(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerClient)(nil).LeaveTask), varargs...) +} + +// RegisterPeerTask mocks base method. +func (m *MockSchedulerClient) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest, arg2 ...grpc.CallOption) (*scheduler.RegisterResult, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...) + ret0, _ := ret[0].(*scheduler.RegisterResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterPeerTask indicates an expected call of RegisterPeerTask. +func (mr *MockSchedulerClientMockRecorder) RegisterPeerTask(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerClient)(nil).RegisterPeerTask), varargs...) +} + +// ReportPeerResult mocks base method. +func (m *MockSchedulerClient) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult, arg2 ...grpc.CallOption) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReportPeerResult", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReportPeerResult indicates an expected call of ReportPeerResult. +func (mr *MockSchedulerClientMockRecorder) ReportPeerResult(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPeerResult), varargs...) +} + +// ReportPieceResult mocks base method. +func (m *MockSchedulerClient) ReportPieceResult(arg0 context.Context, arg1 string, arg2 *scheduler.PeerTaskRequest, arg3 ...grpc.CallOption) (client.PeerPacketStream, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReportPieceResult", varargs...) + ret0, _ := ret[0].(client.PeerPacketStream) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReportPieceResult indicates an expected call of ReportPieceResult. +func (mr *MockSchedulerClientMockRecorder) ReportPieceResult(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPieceResult), varargs...) +} + +// UpdateState mocks base method. +func (m *MockSchedulerClient) UpdateState(addrs []dfnet.NetAddr) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateState", addrs) +} + +// UpdateState indicates an expected call of UpdateState. +func (mr *MockSchedulerClientMockRecorder) UpdateState(addrs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateState", reflect.TypeOf((*MockSchedulerClient)(nil).UpdateState), addrs) +} diff --git a/pkg/rpc/scheduler/client/mocks/peer_packet_stream_mock.go b/pkg/rpc/scheduler/client/mocks/peer_packet_stream_mock.go new file mode 100644 index 00000000000..952de067483 --- /dev/null +++ b/pkg/rpc/scheduler/client/mocks/peer_packet_stream_mock.go @@ -0,0 +1,64 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/scheduler/client/peer_packet_stream.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + gomock "github.com/golang/mock/gomock" +) + +// MockPeerPacketStream is a mock of PeerPacketStream interface. +type MockPeerPacketStream struct { + ctrl *gomock.Controller + recorder *MockPeerPacketStreamMockRecorder +} + +// MockPeerPacketStreamMockRecorder is the mock recorder for MockPeerPacketStream. +type MockPeerPacketStreamMockRecorder struct { + mock *MockPeerPacketStream +} + +// NewMockPeerPacketStream creates a new mock instance. +func NewMockPeerPacketStream(ctrl *gomock.Controller) *MockPeerPacketStream { + mock := &MockPeerPacketStream{ctrl: ctrl} + mock.recorder = &MockPeerPacketStreamMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPeerPacketStream) EXPECT() *MockPeerPacketStreamMockRecorder { + return m.recorder +} + +// Recv mocks base method. +func (m *MockPeerPacketStream) Recv() (*scheduler.PeerPacket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*scheduler.PeerPacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockPeerPacketStreamMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockPeerPacketStream)(nil).Recv)) +} + +// Send mocks base method. +func (m *MockPeerPacketStream) Send(pr *scheduler.PieceResult) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", pr) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockPeerPacketStreamMockRecorder) Send(pr interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockPeerPacketStream)(nil).Send), pr) +} diff --git a/pkg/rpc/scheduler/mocks/scheduler_mock.go b/pkg/rpc/scheduler/mocks/scheduler_mock.go new file mode 100644 index 00000000000..9f0e27f5589 --- /dev/null +++ b/pkg/rpc/scheduler/mocks/scheduler_mock.go @@ -0,0 +1,519 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/scheduler/scheduler_grpc.pb.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// MockSchedulerClient is a mock of SchedulerClient interface. +type MockSchedulerClient struct { + ctrl *gomock.Controller + recorder *MockSchedulerClientMockRecorder +} + +// MockSchedulerClientMockRecorder is the mock recorder for MockSchedulerClient. +type MockSchedulerClientMockRecorder struct { + mock *MockSchedulerClient +} + +// NewMockSchedulerClient creates a new mock instance. +func NewMockSchedulerClient(ctrl *gomock.Controller) *MockSchedulerClient { + mock := &MockSchedulerClient{ctrl: ctrl} + mock.recorder = &MockSchedulerClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSchedulerClient) EXPECT() *MockSchedulerClientMockRecorder { + return m.recorder +} + +// LeaveTask mocks base method. +func (m *MockSchedulerClient) LeaveTask(ctx context.Context, in *scheduler.PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "LeaveTask", varargs...) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LeaveTask indicates an expected call of LeaveTask. +func (mr *MockSchedulerClientMockRecorder) LeaveTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerClient)(nil).LeaveTask), varargs...) +} + +// RegisterPeerTask mocks base method. +func (m *MockSchedulerClient) RegisterPeerTask(ctx context.Context, in *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...) + ret0, _ := ret[0].(*scheduler.RegisterResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterPeerTask indicates an expected call of RegisterPeerTask. +func (mr *MockSchedulerClientMockRecorder) RegisterPeerTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerClient)(nil).RegisterPeerTask), varargs...) +} + +// ReportPeerResult mocks base method. +func (m *MockSchedulerClient) ReportPeerResult(ctx context.Context, in *scheduler.PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReportPeerResult", varargs...) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReportPeerResult indicates an expected call of ReportPeerResult. +func (mr *MockSchedulerClientMockRecorder) ReportPeerResult(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPeerResult), varargs...) +} + +// ReportPieceResult mocks base method. +func (m *MockSchedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReportPieceResult", varargs...) + ret0, _ := ret[0].(scheduler.Scheduler_ReportPieceResultClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReportPieceResult indicates an expected call of ReportPieceResult. +func (mr *MockSchedulerClientMockRecorder) ReportPieceResult(ctx interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPieceResult), varargs...) +} + +// MockScheduler_ReportPieceResultClient is a mock of Scheduler_ReportPieceResultClient interface. +type MockScheduler_ReportPieceResultClient struct { + ctrl *gomock.Controller + recorder *MockScheduler_ReportPieceResultClientMockRecorder +} + +// MockScheduler_ReportPieceResultClientMockRecorder is the mock recorder for MockScheduler_ReportPieceResultClient. +type MockScheduler_ReportPieceResultClientMockRecorder struct { + mock *MockScheduler_ReportPieceResultClient +} + +// NewMockScheduler_ReportPieceResultClient creates a new mock instance. +func NewMockScheduler_ReportPieceResultClient(ctrl *gomock.Controller) *MockScheduler_ReportPieceResultClient { + mock := &MockScheduler_ReportPieceResultClient{ctrl: ctrl} + mock.recorder = &MockScheduler_ReportPieceResultClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduler_ReportPieceResultClient) EXPECT() *MockScheduler_ReportPieceResultClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockScheduler_ReportPieceResultClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockScheduler_ReportPieceResultClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockScheduler_ReportPieceResultClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockScheduler_ReportPieceResultClient) Recv() (*scheduler.PeerPacket, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*scheduler.PeerPacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockScheduler_ReportPieceResultClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockScheduler_ReportPieceResultClient) Send(arg0 *scheduler.PieceResult) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Send), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockScheduler_ReportPieceResultClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockScheduler_ReportPieceResultClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Trailer)) +} + +// MockSchedulerServer is a mock of SchedulerServer interface. +type MockSchedulerServer struct { + ctrl *gomock.Controller + recorder *MockSchedulerServerMockRecorder +} + +// MockSchedulerServerMockRecorder is the mock recorder for MockSchedulerServer. +type MockSchedulerServerMockRecorder struct { + mock *MockSchedulerServer +} + +// NewMockSchedulerServer creates a new mock instance. +func NewMockSchedulerServer(ctrl *gomock.Controller) *MockSchedulerServer { + mock := &MockSchedulerServer{ctrl: ctrl} + mock.recorder = &MockSchedulerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSchedulerServer) EXPECT() *MockSchedulerServerMockRecorder { + return m.recorder +} + +// LeaveTask mocks base method. +func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LeaveTask", arg0, arg1) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LeaveTask indicates an expected call of LeaveTask. +func (mr *MockSchedulerServerMockRecorder) LeaveTask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerServer)(nil).LeaveTask), arg0, arg1) +} + +// RegisterPeerTask mocks base method. +func (m *MockSchedulerServer) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterPeerTask", arg0, arg1) + ret0, _ := ret[0].(*scheduler.RegisterResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterPeerTask indicates an expected call of RegisterPeerTask. +func (mr *MockSchedulerServerMockRecorder) RegisterPeerTask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerServer)(nil).RegisterPeerTask), arg0, arg1) +} + +// ReportPeerResult mocks base method. +func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult) (*emptypb.Empty, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReportPeerResult", arg0, arg1) + ret0, _ := ret[0].(*emptypb.Empty) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReportPeerResult indicates an expected call of ReportPeerResult. +func (mr *MockSchedulerServerMockRecorder) ReportPeerResult(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPeerResult), arg0, arg1) +} + +// ReportPieceResult mocks base method. +func (m *MockSchedulerServer) ReportPieceResult(arg0 scheduler.Scheduler_ReportPieceResultServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReportPieceResult", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReportPieceResult indicates an expected call of ReportPieceResult. +func (mr *MockSchedulerServerMockRecorder) ReportPieceResult(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPieceResult), arg0) +} + +// mustEmbedUnimplementedSchedulerServer mocks base method. +func (m *MockSchedulerServer) mustEmbedUnimplementedSchedulerServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedSchedulerServer") +} + +// mustEmbedUnimplementedSchedulerServer indicates an expected call of mustEmbedUnimplementedSchedulerServer. +func (mr *MockSchedulerServerMockRecorder) mustEmbedUnimplementedSchedulerServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedSchedulerServer", reflect.TypeOf((*MockSchedulerServer)(nil).mustEmbedUnimplementedSchedulerServer)) +} + +// MockUnsafeSchedulerServer is a mock of UnsafeSchedulerServer interface. +type MockUnsafeSchedulerServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeSchedulerServerMockRecorder +} + +// MockUnsafeSchedulerServerMockRecorder is the mock recorder for MockUnsafeSchedulerServer. +type MockUnsafeSchedulerServerMockRecorder struct { + mock *MockUnsafeSchedulerServer +} + +// NewMockUnsafeSchedulerServer creates a new mock instance. +func NewMockUnsafeSchedulerServer(ctrl *gomock.Controller) *MockUnsafeSchedulerServer { + mock := &MockUnsafeSchedulerServer{ctrl: ctrl} + mock.recorder = &MockUnsafeSchedulerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsafeSchedulerServer) EXPECT() *MockUnsafeSchedulerServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedSchedulerServer mocks base method. +func (m *MockUnsafeSchedulerServer) mustEmbedUnimplementedSchedulerServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedSchedulerServer") +} + +// mustEmbedUnimplementedSchedulerServer indicates an expected call of mustEmbedUnimplementedSchedulerServer. +func (mr *MockUnsafeSchedulerServerMockRecorder) mustEmbedUnimplementedSchedulerServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedSchedulerServer", reflect.TypeOf((*MockUnsafeSchedulerServer)(nil).mustEmbedUnimplementedSchedulerServer)) +} + +// MockScheduler_ReportPieceResultServer is a mock of Scheduler_ReportPieceResultServer interface. +type MockScheduler_ReportPieceResultServer struct { + ctrl *gomock.Controller + recorder *MockScheduler_ReportPieceResultServerMockRecorder +} + +// MockScheduler_ReportPieceResultServerMockRecorder is the mock recorder for MockScheduler_ReportPieceResultServer. +type MockScheduler_ReportPieceResultServerMockRecorder struct { + mock *MockScheduler_ReportPieceResultServer +} + +// NewMockScheduler_ReportPieceResultServer creates a new mock instance. +func NewMockScheduler_ReportPieceResultServer(ctrl *gomock.Controller) *MockScheduler_ReportPieceResultServer { + mock := &MockScheduler_ReportPieceResultServer{ctrl: ctrl} + mock.recorder = &MockScheduler_ReportPieceResultServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduler_ReportPieceResultServer) EXPECT() *MockScheduler_ReportPieceResultServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockScheduler_ReportPieceResultServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Context)) +} + +// Recv mocks base method. +func (m *MockScheduler_ReportPieceResultServer) Recv() (*scheduler.PieceResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*scheduler.PieceResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockScheduler_ReportPieceResultServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockScheduler_ReportPieceResultServer) Send(arg0 *scheduler.PeerPacket) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockScheduler_ReportPieceResultServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockScheduler_ReportPieceResultServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockScheduler_ReportPieceResultServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockScheduler_ReportPieceResultServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetTrailer), arg0) +} diff --git a/pkg/rpc/scheduler/scheduler.go b/pkg/rpc/scheduler/scheduler.go index bcfcc20c825..eda94ac2aa0 100644 --- a/pkg/rpc/scheduler/scheduler.go +++ b/pkg/rpc/scheduler/scheduler.go @@ -26,7 +26,7 @@ func NewZeroPieceResult(taskID, peerID string) *PieceResult { TaskId: taskID, SrcPid: peerID, PieceInfo: &base.PieceInfo{ - PieceNum: common.ZeroOfPiece, + PieceNum: common.BeginOfPiece, RangeStart: 0, RangeSize: 0, PieceMd5: "", diff --git a/pkg/rpc/scheduler/server/mocks/server_mock.go b/pkg/rpc/scheduler/server/mocks/server_mock.go new file mode 100644 index 00000000000..9e89ad4de5f --- /dev/null +++ b/pkg/rpc/scheduler/server/mocks/server_mock.go @@ -0,0 +1,93 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: pkg/rpc/scheduler/server/server.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + gomock "github.com/golang/mock/gomock" +) + +// MockSchedulerServer is a mock of SchedulerServer interface. +type MockSchedulerServer struct { + ctrl *gomock.Controller + recorder *MockSchedulerServerMockRecorder +} + +// MockSchedulerServerMockRecorder is the mock recorder for MockSchedulerServer. +type MockSchedulerServerMockRecorder struct { + mock *MockSchedulerServer +} + +// NewMockSchedulerServer creates a new mock instance. +func NewMockSchedulerServer(ctrl *gomock.Controller) *MockSchedulerServer { + mock := &MockSchedulerServer{ctrl: ctrl} + mock.recorder = &MockSchedulerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSchedulerServer) EXPECT() *MockSchedulerServerMockRecorder { + return m.recorder +} + +// LeaveTask mocks base method. +func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LeaveTask", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// LeaveTask indicates an expected call of LeaveTask. +func (mr *MockSchedulerServerMockRecorder) LeaveTask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerServer)(nil).LeaveTask), arg0, arg1) +} + +// RegisterPeerTask mocks base method. +func (m *MockSchedulerServer) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterPeerTask", arg0, arg1) + ret0, _ := ret[0].(*scheduler.RegisterResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterPeerTask indicates an expected call of RegisterPeerTask. +func (mr *MockSchedulerServerMockRecorder) RegisterPeerTask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerServer)(nil).RegisterPeerTask), arg0, arg1) +} + +// ReportPeerResult mocks base method. +func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReportPeerResult", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReportPeerResult indicates an expected call of ReportPeerResult. +func (mr *MockSchedulerServerMockRecorder) ReportPeerResult(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPeerResult), arg0, arg1) +} + +// ReportPieceResult mocks base method. +func (m *MockSchedulerServer) ReportPieceResult(arg0 scheduler.Scheduler_ReportPieceResultServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReportPieceResult", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReportPieceResult indicates an expected call of ReportPieceResult. +func (mr *MockSchedulerServerMockRecorder) ReportPieceResult(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPieceResult), arg0) +} diff --git a/pkg/util/structutils/struct_utils_test.go b/pkg/util/structutils/struct_utils_test.go index 2be7022b97b..6b1606cb78e 100644 --- a/pkg/util/structutils/struct_utils_test.go +++ b/pkg/util/structutils/struct_utils_test.go @@ -29,7 +29,7 @@ func TestStructToMap(t *testing.T) { expect func(*testing.T, map[string]interface{}, error) }{ { - name: "conver struct to map succeeded", + name: "conver struct to map", s: struct { Name string Age float64 @@ -62,7 +62,7 @@ func TestStructToMap(t *testing.T) { }, }, { - name: "conver nil to map succeeded", + name: "conver nil to map", s: nil, expect: func(t *testing.T, m map[string]interface{}, err error) { assert := assert.New(t) diff --git a/scheduler/config/config.go b/scheduler/config/config.go index d3c80178875..1f45dfa7680 100644 --- a/scheduler/config/config.go +++ b/scheduler/config/config.go @@ -17,137 +17,268 @@ package config import ( - "net" - "runtime" "time" "github.com/pkg/errors" "d7y.io/dragonfly/v2/cmd/dependency/base" - dc "d7y.io/dragonfly/v2/internal/dynconfig" "d7y.io/dragonfly/v2/pkg/util/hostutils" "d7y.io/dragonfly/v2/pkg/util/net/iputils" ) type Config struct { + // Base options base.Options `yaml:",inline" mapstructure:",squash"` - Scheduler *SchedulerConfig `yaml:"scheduler" mapstructure:"scheduler"` - Server *ServerConfig `yaml:"server" mapstructure:"server"` - DynConfig *DynConfig `yaml:"dynConfig" mapstructure:"dynConfig"` - Manager *ManagerConfig `yaml:"manager" mapstructure:"manager"` - Host *HostConfig `yaml:"host" mapstructure:"host"` - Job *JobConfig `yaml:"job" mapstructure:"job"` - Metrics *MetricsConfig `yaml:"metrics" mapstructure:"metrics"` - DisableCDN bool `yaml:"disableCDN" mapstructure:"disableCDN"` + + // Scheduler configuration + Scheduler *SchedulerConfig `yaml:"scheduler" mapstructure:"scheduler"` + + // Server configuration + Server *ServerConfig `yaml:"server" mapstructure:"server"` + + // Dynconfig configuration + DynConfig *DynConfig `yaml:"dynConfig" mapstructure:"dynConfig"` + + // Manager configuration + Manager *ManagerConfig `yaml:"manager" mapstructure:"manager"` + + // Host configuration + Host *HostConfig `yaml:"host" mapstructure:"host"` + + // Job configuration + Job *JobConfig `yaml:"job" mapstructure:"job"` + + // Metrics configuration + Metrics *MetricsConfig `yaml:"metrics" mapstructure:"metrics"` } +// New default configuration func New() *Config { return &Config{ + Server: &ServerConfig{ + IP: iputils.IPv4, + Host: hostutils.FQDNHostname, + Port: 8002, + ListenLimit: 1000, + }, Scheduler: &SchedulerConfig{ - Algorithm: "default", - WorkerNum: runtime.GOMAXPROCS(0), - BackSourceCount: 3, - AccessWindow: 3 * time.Minute, - CandidateParentCount: 10, - Scheduler: "basic", - CDNLoad: 100, - ClientLoad: 10, - OpenMonitor: false, + Algorithm: "default", + BackSourceCount: 3, + RetryLimit: 10, + RetryInterval: 1 * time.Second, GC: &GCConfig{ PeerGCInterval: 1 * time.Minute, + PeerTTL: 5 * time.Minute, TaskGCInterval: 1 * time.Minute, - PeerTTL: 10 * time.Minute, - PeerTTI: 3 * time.Minute, TaskTTL: 10 * time.Minute, - TaskTTI: 3 * time.Minute, }, }, - Server: &ServerConfig{ - IP: iputils.IPv4, - Host: hostutils.FQDNHostname, - Port: 8002, - }, DynConfig: &DynConfig{ - Type: dc.LocalSourceType, - ExpireTime: 30 * time.Second, - CDNDirPath: "", - Data: &DynconfigData{ - CDNs: []*CDN{ - { - HostName: "localhost", - IP: "127.0.0.1", - Port: 8003, - DownloadPort: 8001, - SecurityGroup: "", - Location: "", - IDC: "", - }, - }, - }, + RefreshInterval: 5 * time.Minute, }, + Host: &HostConfig{}, Manager: &ManagerConfig{ - Addr: "", - SchedulerClusterID: 0, + Enable: true, + SchedulerClusterID: 1, KeepAlive: KeepAliveConfig{ Interval: 5 * time.Second, }, }, - Host: &HostConfig{ - Location: "", - IDC: "", - }, Job: &JobConfig{ + Enable: true, GlobalWorkerNum: 10, SchedulerWorkerNum: 10, LocalWorkerNum: 10, Redis: &RedisConfig{ - Host: "", Port: 6379, - Password: "", BrokerDB: 1, BackendDB: 2, }, }, - DisableCDN: false, + Metrics: &MetricsConfig{ + Enable: false, + EnablePeerHost: false, + }, } } +// Validate config parameters func (c *Config) Validate() error { - if c.DynConfig.CDNDirPath == "" { - if c.DynConfig.Type == dc.LocalSourceType && c.DynConfig.Data == nil { - return errors.New("dynconfig is LocalSourceType type requires parameter data") - } + if c.Server.IP == "" { + return errors.New("server requires parameter ip") } - if c.DynConfig.Type == dc.ManagerSourceType { - if c.DynConfig.ExpireTime == 0 { - return errors.New("dynconfig is ManagerSourceType type requires parameter expireTime") - } + if c.Server.Host == "" { + return errors.New("server requires parameter host") + } + + if c.Server.Port <= 0 { + return errors.New("server requires parameter port") + } + + if c.Server.ListenLimit <= 0 { + return errors.New("server requires parameter listenLimit") + } + + if c.Scheduler.Algorithm == "" { + return errors.New("scheduler requires parameter algorithm") + } + + if c.Scheduler.RetryLimit <= 0 { + return errors.New("scheduler requires parameter retryLimit") + } + + if c.Scheduler.RetryInterval <= 0 { + return errors.New("scheduler requires parameter retryInterval") + } + + if c.Scheduler.GC.PeerGCInterval <= 0 { + return errors.New("scheduler requires parameter peerGCInterval") + } + + if c.Scheduler.GC.PeerTTL <= 0 { + return errors.New("scheduler requires parameter peerTTL") + } + + if c.Scheduler.GC.TaskGCInterval <= 0 { + return errors.New("scheduler requires parameter taskGCInterval") + } + + if c.Scheduler.GC.TaskTTL <= 0 { + return errors.New("scheduler requires parameter taskTTL") + } + + if c.DynConfig.RefreshInterval <= 0 { + return errors.New("dynconfig requires parameter refreshInterval") + } + if c.Manager.Enable { if c.Manager.Addr == "" { - return errors.New("dynconfig is ManagerSourceType type requires parameter manager addr") + return errors.New("manager requires parameter addr") } if c.Manager.SchedulerClusterID == 0 { - return errors.New("dynconfig is ManagerSourceType type requires parameter manager schedulerClusterID") + return errors.New("manager requires parameter schedulerClusterID") + } + + if c.Manager.KeepAlive.Interval <= 0 { + return errors.New("manager requires parameter keepAlive interval") } } - return nil -} + if c.Job.Enable { + if c.Job.GlobalWorkerNum == 0 { + return errors.New("job requires parameter globalWorkerNum") + } + + if c.Job.SchedulerWorkerNum == 0 { + return errors.New("job requires parameter schedulerWorkerNum") + } + + if c.Job.LocalWorkerNum == 0 { + return errors.New("job requires parameter localWorkerNum") + } + + if c.Job.Redis.Host == "" { + return errors.New("job requires parameter redis host") + } + + if c.Job.Redis.Port <= 0 { + return errors.New("job requires parameter redis port") + } + + if c.Job.Redis.BrokerDB <= 0 { + return errors.New("job requires parameter redis brokerDB") + } -func (c *Config) Convert() error { - if c.Manager.Addr != "" && c.Job.Redis.Host == "" { - host, _, err := net.SplitHostPort(c.Manager.Addr) - if err != nil { - return err + if c.Job.Redis.BackendDB <= 0 { + return errors.New("job requires parameter redis backendDB") } - c.Job.Redis.Host = host } + + if c.Metrics.Enable { + if c.Metrics.Addr == "" { + return errors.New("metrics requires parameter addr") + } + } + return nil } +type ServerConfig struct { + // Server ip + IP string `yaml:"ip" mapstructure:"ip"` + + // Server hostname + Host string `yaml:"host" mapstructure:"host"` + + // Server port + Port int `yaml:"port" mapstructure:"port"` + + // Limit the number of requests + ListenLimit int `yaml:"listenLimit" mapstructure:"listenLimit"` + + // Server dynamic config cache directory + CacheDir string `yaml:"cacheDir" mapstructure:"cacheDir"` + + // Server log directory + LogDir string `yaml:"logDir" mapstructure:"logDir"` +} + +type SchedulerConfig struct { + // Scheduling algorithm used by the scheduler + Algorithm string `yaml:"algorithm" mapstructure:"algorithm"` + + // Single task allows the client to back-to-source count + BackSourceCount int `yaml:"backSourceCount" mapstructure:"backSourceCount"` + + // Retry scheduling limit times + RetryLimit int `yaml:"retryLimit" mapstructure:"retryLimit"` + + // Retry scheduling interval + RetryInterval time.Duration `yaml:"retryInterval" mapstructure:"retryInterval"` + + // Task and peer gc configuration + GC *GCConfig `yaml:"gc" mapstructure:"gc"` +} + +type GCConfig struct { + // Peer gc interval + PeerGCInterval time.Duration `yaml:"peerGCInterval" mapstructure:"peerGCInterval"` + + // Peer time to live + PeerTTL time.Duration `yaml:"peerTTL" mapstructure:"peerTTL"` + + // Task gc interval + TaskGCInterval time.Duration `yaml:"taskGCInterval" mapstructure:"taskGCInterval"` + + // Task time to live + TaskTTL time.Duration `yaml:"taskTTL" mapstructure:"taskTTL"` +} + +type DynConfig struct { + // RefreshInterval is refresh interval for manager cache. + RefreshInterval time.Duration `yaml:"refreshInterval" mapstructure:"refreshInterval"` + + // CDNDir is cdn dir path. + CDNDir string `yaml:"cdnDir" mapstructure:"cdnDir"` +} + +type HostConfig struct { + // IDC for scheduler + IDC string `mapstructure:"idc" yaml:"idc"` + + // NetTopology for scheduler + NetTopology string `mapstructure:"netTopology" yaml:"netTopology"` + + // Location for scheduler + Location string `mapstructure:"location" yaml:"location"` +} + type ManagerConfig struct { + // Enable is to enable contact with manager + Enable bool `yaml:"enable" mapstructure:"enable"` + // Addr is manager address. Addr string `yaml:"addr" mapstructure:"addr"` @@ -163,76 +294,47 @@ type KeepAliveConfig struct { Interval time.Duration `yaml:"interval" mapstructure:"interval"` } -type DynConfig struct { - // Type is dynconfig source type. - Type dc.SourceType `yaml:"type" mapstructure:"type"` +type JobConfig struct { + // Enable job service + Enable bool `yaml:"enable" mapstructure:"enable"` - // ExpireTime is expire time for manager cache. - ExpireTime time.Duration `yaml:"expireTime" mapstructure:"expireTime"` + // Number of workers in global queue + GlobalWorkerNum uint `yaml:"globalWorkerNum" mapstructure:"globalWorkerNum"` - // CDNDirPath is cdn dir. - CDNDirPath string `yaml:"cdnDirPath" mapstructure:"cdnDirPath"` + // Number of workers in scheduler queue + SchedulerWorkerNum uint `yaml:"schedulerWorkerNum" mapstructure:"schedulerWorkerNum"` - // Data is dynconfig local data. - Data *DynconfigData `yaml:"data" mapstructure:"data"` -} + // Number of workers in local queue + LocalWorkerNum uint `yaml:"localWorkerNum" mapstructure:"localWorkerNum"` -type SchedulerConfig struct { - Algorithm string `yaml:"algorithm" mapstructure:"algorithm"` - WorkerNum int `yaml:"workerNum" mapstructure:"workerNum"` - BackSourceCount int32 `yaml:"backSourceCount" mapstructure:"backSourceCount"` - // AccessWindow should less than CDN task expireTime - AccessWindow time.Duration `yaml:"accessWindow" mapstructure:"accessWindow"` - CandidateParentCount int `yaml:"candidateParentCount" mapstructure:"candidateParentCount"` - Scheduler string `yaml:"scheduler" mapstructure:"scheduler"` - CDNLoad int `yaml:"cdnLoad" mapstructure:"cdnLoad"` - ClientLoad int32 `yaml:"clientLoad" mapstructure:"clientLoad"` - OpenMonitor bool `yaml:"openMonitor" mapstructure:"openMonitor"` - GC *GCConfig `yaml:"gc" mapstructure:"gc"` + // Redis configuration + Redis *RedisConfig `yaml:"redis" mapstructure:"redis"` } -type ServerConfig struct { - IP string `yaml:"ip" mapstructure:"ip"` - Host string `yaml:"host" mapstructure:"host"` - Port int `yaml:"port" mapstructure:"port"` - CacheDir string `yaml:"cacheDir" mapstructure:"cacheDir"` - LogDir string `yaml:"logDir" mapstructure:"logDir"` -} +type RedisConfig struct { + // Server hostname + Host string `yaml:"host" mapstructure:"host"` -type GCConfig struct { - PeerGCInterval time.Duration `yaml:"peerGCInterval" mapstructure:"peerGCInterval"` - // PeerTTL is advised to set the time to be smaller than the expire time of a task in the CDN - PeerTTL time.Duration `yaml:"peerTTL" mapstructure:"peerTTL"` - PeerTTI time.Duration `yaml:"peerTTI" mapstructure:"peerTTI"` - TaskGCInterval time.Duration `yaml:"taskGCInterval" mapstructure:"taskGCInterval"` - TaskTTL time.Duration `yaml:"taskTTL" mapstructure:"taskTTL"` - TaskTTI time.Duration `yaml:"taskTTI" mapstructure:"taskTTI"` -} + // Server port + Port int `yaml:"port" mapstructure:"port"` -type MetricsConfig struct { - Addr string `yaml:"addr" mapstructure:"addr"` - EnablePeerHost bool `yaml:"enablePeerHost" mapstructure:"enablePeerHost"` -} + // Server password + Password string `yaml:"password" mapstructure:"password"` -type HostConfig struct { - // Location for scheduler - Location string `mapstructure:"location" yaml:"location"` + // Broker database name + BrokerDB int `yaml:"brokerDB" mapstructure:"brokerDB"` - // IDC for scheduler - IDC string `mapstructure:"idc" yaml:"idc"` + // Backend database name + BackendDB int `yaml:"backendDB" mapstructure:"backendDB"` } -type RedisConfig struct { - Host string `yaml:"host" mapstructure:"host"` - Port int `yaml:"port" mapstructure:"port"` - Password string `yaml:"password" mapstructure:"password"` - BrokerDB int `yaml:"brokerDB" mapstructure:"brokerDB"` - BackendDB int `yaml:"backendDB" mapstructure:"backendDB"` -} +type MetricsConfig struct { + // Enable metrics service + Enable bool `yaml:"enable" mapstructure:"enable"` -type JobConfig struct { - GlobalWorkerNum uint `yaml:"globalWorkerNum" mapstructure:"globalWorkerNum"` - SchedulerWorkerNum uint `yaml:"schedulerWorkerNum" mapstructure:"schedulerWorkerNum"` - LocalWorkerNum uint `yaml:"localWorkerNum" mapstructure:"localWorkerNum"` - Redis *RedisConfig `yaml:"redis" mapstructure:"redis"` + // Metrics service address + Addr string `yaml:"addr" mapstructure:"addr"` + + // Enable peer host metrics + EnablePeerHost bool `yaml:"enablePeerHost" mapstructure:"enablePeerHost"` } diff --git a/scheduler/config/config_test.go b/scheduler/config/config_test.go index 969bd9b3ae2..f7401cb160d 100644 --- a/scheduler/config/config_test.go +++ b/scheduler/config/config_test.go @@ -18,64 +18,73 @@ package config import ( "os" - "reflect" "testing" "time" "github.com/mitchellh/mapstructure" testifyassert "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" - - dc "d7y.io/dragonfly/v2/internal/dynconfig" ) -func TestSchedulerConfig_Load(t *testing.T) { +func TestConfig_Load(t *testing.T) { assert := testifyassert.New(t) config := &Config{ - DynConfig: &DynConfig{ - Type: dc.LocalSourceType, - ExpireTime: 1000, - CDNDirPath: "tmp", + Server: &ServerConfig{ + IP: "127.0.0.1", + Host: "foo", + Port: 8002, + ListenLimit: 1000, + CacheDir: "foo", + LogDir: "bar", }, Scheduler: &SchedulerConfig{ - Algorithm: "default", - WorkerNum: 8, + Algorithm: "default", + BackSourceCount: 3, + RetryLimit: 10, + RetryInterval: 1 * time.Second, + GC: &GCConfig{ + PeerGCInterval: 1 * time.Minute, + PeerTTL: 5 * time.Minute, + TaskGCInterval: 1 * time.Minute, + TaskTTL: 10 * time.Minute, + }, }, - Server: &ServerConfig{ - IP: "127.0.0.1", - Host: "foo", - Port: 8002, - CacheDir: "foo", - LogDir: "foo", + DynConfig: &DynConfig{ + RefreshInterval: 5 * time.Minute, + CDNDir: "foo", + }, + Host: &HostConfig{ + IDC: "foo", + NetTopology: "bar", + Location: "baz", }, Manager: &ManagerConfig{ + Enable: true, Addr: "127.0.0.1:65003", SchedulerClusterID: 1, KeepAlive: KeepAliveConfig{ - Interval: 1 * time.Second, + Interval: 5 * time.Second, }, }, - Host: &HostConfig{ - IDC: "foo", - Location: "bar", - }, Job: &JobConfig{ + Enable: true, GlobalWorkerNum: 1, SchedulerWorkerNum: 1, LocalWorkerNum: 5, Redis: &RedisConfig{ Host: "127.0.0.1", Port: 6379, - Password: "password", + Password: "foo", BrokerDB: 1, BackendDB: 2, }, }, Metrics: &MetricsConfig{ - Addr: ":8000", + Enable: false, + Addr: ":8000", + EnablePeerHost: false, }, - DisableCDN: true, } schedulerConfigYAML := &Config{} @@ -88,106 +97,6 @@ func TestSchedulerConfig_Load(t *testing.T) { if err := mapstructure.Decode(dataYAML, &schedulerConfigYAML); err != nil { t.Fatal(err) } - assert.True(reflect.DeepEqual(config, schedulerConfigYAML)) -} -func TestConvert(t *testing.T) { - tests := []struct { - name string - value *Config - expect func(t *testing.T, cfg *Config, err error) - }{ - { - name: "convert common config", - value: &Config{ - Manager: &ManagerConfig{ - Addr: "127.0.0.1:65003", - }, - Job: &JobConfig{ - Redis: &RedisConfig{ - Host: "", - }, - }, - }, - expect: func(t *testing.T, cfg *Config, err error) { - assert := testifyassert.New(t) - assert.Equal("127.0.0.1", cfg.Job.Redis.Host) - }, - }, - { - name: "convert config when host not empty", - value: &Config{ - Manager: &ManagerConfig{ - Addr: "127.0.0.1:65003", - }, - Job: &JobConfig{ - Redis: &RedisConfig{ - Host: "111.111.11.1", - }, - }, - }, - expect: func(t *testing.T, cfg *Config, err error) { - assert := testifyassert.New(t) - assert.Equal("111.111.11.1", cfg.Job.Redis.Host) - }, - }, - { - name: "convert config when manager addr is empty", - value: &Config{ - Manager: &ManagerConfig{ - Addr: "", - }, - Job: &JobConfig{ - Redis: &RedisConfig{ - Host: "111.111.11.1", - }, - }, - }, - expect: func(t *testing.T, cfg *Config, err error) { - assert := testifyassert.New(t) - assert.Equal("111.111.11.1", cfg.Job.Redis.Host) - }, - }, - { - name: "convert config when manager host is empty", - value: &Config{ - Manager: &ManagerConfig{ - Addr: ":65003", - }, - Job: &JobConfig{ - Redis: &RedisConfig{ - Host: "", - }, - }, - }, - expect: func(t *testing.T, cfg *Config, err error) { - assert := testifyassert.New(t) - assert.Equal("", cfg.Job.Redis.Host) - }, - }, - { - name: "convert config when manager host is localhost", - value: &Config{ - Manager: &ManagerConfig{ - Addr: "localhost:65003", - }, - Job: &JobConfig{ - Redis: &RedisConfig{ - Host: "", - }, - }, - }, - expect: func(t *testing.T, cfg *Config, err error) { - assert := testifyassert.New(t) - assert.Equal("localhost", cfg.Job.Redis.Host) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - err := tc.value.Convert() - tc.expect(t, tc.value, err) - }) - } + assert.EqualValues(config, schedulerConfigYAML) } diff --git a/scheduler/config/constants_otel.go b/scheduler/config/constants_otel.go deleted file mode 100644 index a9ae117e5dd..00000000000 --- a/scheduler/config/constants_otel.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package config - -import "go.opentelemetry.io/otel/attribute" - -const ( - AttributePeerRegisterRequest = attribute.Key("d7y.peer.register.request") - AttributeTaskSizeScope = attribute.Key("d7y.task.size.scope") - AttributeSinglePiece = attribute.Key("d7y.peer.single.piece") - AttributePieceReceived = attribute.Key("d7y.peer.piece.received") - AttributeLeavePeerID = attribute.Key("d7y.leave.peer.id") - AttributeLeaveTaskID = attribute.Key("d7y.leave.task.id") - AttributeReportPeerID = attribute.Key("d7y.report.peer.id") - AttributePeerDownloadSuccess = attribute.Key("d7y.peer.download.success") - AttributeDownloadFileURL = attribute.Key("d7y.file.url") - AttributeContentLength = attribute.Key("d7y.source.content.length") - AttributePeerDownloadResult = attribute.Key("d7y.peer.download.result") - AttributeSchedulePacket = attribute.Key("d7y.schedule.packet") - AttributeTaskID = attribute.Key("d7y.peer.task.id") - AttributePeerID = attribute.Key("d7y.peer.id") - AttributeCDNSeedRequest = attribute.Key("d7y.cdn.seed.request") - AttributeNeedSeedCDN = attribute.Key("d7y.need.seed.cdn") - AttributeTaskStatus = attribute.Key("d7y.task.status") - AttributeLastTriggerTime = attribute.Key("d7y.task.last.trigger.time") - AttributeClientBackSource = attribute.Key("d7y.need.client.back-source") - AttributeTriggerCDNError = attribute.Key("d7y.trigger.cdn.error") -) - -const ( - SpanPeerRegister = "peer-register" - SpanTriggerCDNSeed = "trigger-cdn-seed" - SpanReportPieceResult = "report-piece-result" - SpanReportPeerResult = "report-peer-result" - SpanPeerLeave = "peer-leave" - SpanPreheat = "preheat" -) - -const ( - EventSmallTaskSelectParentFail = "small-task-select-parent-fail" - EventPeerNotFound = "peer-not-found" - EventHostNotFound = "host-not-found" - EventCreateCDNPeer = "create-cdn-peer" - EventCDNPieceReceived = "receive-cdn-piece" - EventPeerDownloaded = "downloaded" - EventDownloadTinyFile = "download-tiny-file" - EventCDNFailBackClientSource = "cdn-fail-back-client-source" -) diff --git a/scheduler/config/dynconfig.go b/scheduler/config/dynconfig.go index b25f3a7b778..0ae36dfca44 100644 --- a/scheduler/config/dynconfig.go +++ b/scheduler/config/dynconfig.go @@ -30,8 +30,11 @@ import ( ) var ( + // Cache filename cacheFileName = "scheduler_dynconfig" - watchInterval = 1 * time.Second + + // Notify observer interval + watchInterval = 10 * time.Second ) type DynconfigData struct { @@ -40,15 +43,14 @@ type DynconfigData struct { } type CDN struct { - ID uint `yaml:"id" mapstructure:"id" json:"id"` - HostName string `yaml:"hostname" mapstructure:"hostname" json:"host_name"` - IP string `yaml:"ip" mapstructure:"ip" json:"ip"` - Port int32 `yaml:"port" mapstructure:"port" json:"port"` - DownloadPort int32 `yaml:"downloadPort" mapstructure:"downloadPort" json:"download_port"` - SecurityGroup string `yaml:"securityGroup" mapstructure:"securityGroup" json:"security_group"` - Location string `yaml:"location" mapstructure:"location" json:"location"` - IDC string `yaml:"idc" mapstructure:"idc" json:"idc"` - CDNCluster *CDNCluster `yaml:"cdnCluster" mapstructure:"cdnCluster" json:"cdn_cluster"` + ID uint `yaml:"id" mapstructure:"id" json:"id"` + Hostname string `yaml:"hostname" mapstructure:"hostname" json:"host_name"` + IP string `yaml:"ip" mapstructure:"ip" json:"ip"` + Port int32 `yaml:"port" mapstructure:"port" json:"port"` + DownloadPort int32 `yaml:"downloadPort" mapstructure:"downloadPort" json:"download_port"` + Location string `yaml:"location" mapstructure:"location" json:"location"` + IDC string `yaml:"idc" mapstructure:"idc" json:"idc"` + CDNCluster *CDNCluster `yaml:"cdnCluster" mapstructure:"cdnCluster" json:"cdn_cluster"` } type CDNCluster struct { @@ -109,30 +111,36 @@ type Observer interface { type dynconfig struct { *dc.Dynconfig - observers map[Observer]struct{} - done chan bool - cdnDirPath string - cachePath string - sourceType dc.SourceType + observers map[Observer]struct{} + done chan bool + cdnDir string + cachePath string } // TODO(Gaius) Rely on manager to delete cdnDirPath -func NewDynconfig(sourceType dc.SourceType, cacheDir string, cdnDirPath string, options ...dc.Option) (DynconfigInterface, error) { +func NewDynconfig(rawManagerClient managerclient.Client, cacheDir string, cfg *Config) (DynconfigInterface, error) { cachePath := filepath.Join(cacheDir, cacheFileName) d := &dynconfig{ - observers: map[Observer]struct{}{}, - done: make(chan bool), - cdnDirPath: cdnDirPath, - sourceType: sourceType, - cachePath: cachePath, + observers: map[Observer]struct{}{}, + done: make(chan bool), + cdnDir: cfg.DynConfig.CDNDir, + cachePath: cachePath, } - options = append(options, dc.WithCachePath(cachePath)) - client, err := dc.New(sourceType, options...) - if err != nil { - return nil, err + if rawManagerClient != nil { + client, err := dc.New( + dc.ManagerSourceType, + dc.WithCachePath(cachePath), + dc.WithExpireTime(cfg.DynConfig.RefreshInterval), + dc.WithManagerClient(newManagerClient(rawManagerClient, cfg)), + ) + if err != nil { + return nil, err + } + + d.Dynconfig = client } - d.Dynconfig = client + return d, nil } @@ -192,7 +200,7 @@ func (d *dynconfig) GetCDNClusterConfig(id uint) (types.CDNClusterConfig, bool) func (d *dynconfig) Get() (*DynconfigData, error) { var config DynconfigData - if d.cdnDirPath != "" { + if d.cdnDir != "" { cdns, err := d.getCDNFromDirPath() if err != nil { return nil, err @@ -201,28 +209,14 @@ func (d *dynconfig) Get() (*DynconfigData, error) { return &config, nil } - if d.sourceType == dc.ManagerSourceType { - if err := d.Unmarshal(&config); err != nil { - return nil, err - } - return &config, nil - } - - if err := d.Unmarshal(&struct { - Dynconfig *DynConfig `yaml:"dynconfig" mapstructure:"dynconfig"` - }{ - Dynconfig: &DynConfig{ - Data: &config, - }, - }); err != nil { + if err := d.Unmarshal(&config); err != nil { return nil, err } - return &config, nil } func (d *dynconfig) getCDNFromDirPath() ([]*CDN, error) { - files, err := os.ReadDir(d.cdnDirPath) + files, err := os.ReadDir(d.cdnDir) if err != nil { return nil, err } @@ -234,7 +228,7 @@ func (d *dynconfig) getCDNFromDirPath() ([]*CDN, error) { continue } - p := filepath.Join(d.cdnDirPath, file.Name()) + p := filepath.Join(d.cdnDir, file.Name()) if file.Type()&os.ModeSymlink != 0 { stat, err := os.Stat(p) if err != nil { @@ -289,7 +283,6 @@ func (d *dynconfig) Serve() error { } go d.watch() - return nil } @@ -317,12 +310,13 @@ func (d *dynconfig) Stop() error { return nil } +// Manager client for dynconfig type managerClient struct { managerclient.Client config *Config } -func NewManagerClient(client managerclient.Client, cfg *Config) dc.ManagerClient { +func newManagerClient(client managerclient.Client, cfg *Config) dc.ManagerClient { return &managerClient{ Client: client, config: cfg, diff --git a/scheduler/config/dynconfig_test.go b/scheduler/config/dynconfig_test.go index 20351368595..5dc0fed91a9 100644 --- a/scheduler/config/dynconfig_test.go +++ b/scheduler/config/dynconfig_test.go @@ -26,26 +26,34 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" - dc "d7y.io/dragonfly/v2/internal/dynconfig" "d7y.io/dragonfly/v2/pkg/rpc/manager" - "d7y.io/dragonfly/v2/scheduler/config/mocks" + "d7y.io/dragonfly/v2/pkg/rpc/manager/client/mocks" ) -func TestDynconfigGet_ManagerSourceType(t *testing.T) { +func TestDynconfig_GetManagerSourceType(t *testing.T) { mockCacheDir := t.TempDir() + mockConfig := &Config{ + DynConfig: &DynConfig{}, + Server: &ServerConfig{ + Host: "localhost", + }, + Manager: &ManagerConfig{ + SchedulerClusterID: 1, + }, + } mockCachePath := filepath.Join(mockCacheDir, cacheFileName) tests := []struct { - name string - expire time.Duration - sleep func() - cleanFileCache func(t *testing.T) - mock func(m *mocks.MockClientMockRecorder) - expect func(t *testing.T, data *DynconfigData, err error) + name string + refreshInterval time.Duration + sleep func() + cleanFileCache func(t *testing.T) + mock func(m *mocks.MockClientMockRecorder) + expect func(t *testing.T, data *DynconfigData, err error) }{ { - name: "get dynconfig success", - expire: 10 * time.Second, + name: "get dynconfig success", + refreshInterval: 10 * time.Second, cleanFileCache: func(t *testing.T) { if err := os.Remove(mockCachePath); err != nil { t.Fatal(err) @@ -66,15 +74,15 @@ func TestDynconfigGet_ManagerSourceType(t *testing.T) { }, expect: func(t *testing.T, data *DynconfigData, err error) { assert := assert.New(t) - assert.Equal(data.CDNs[0].HostName, "foo") + assert.Equal(data.CDNs[0].Hostname, "foo") assert.Equal(data.CDNs[0].IP, "127.0.0.1") assert.Equal(data.CDNs[0].Port, int32(8001)) assert.Equal(data.CDNs[0].DownloadPort, int32(8003)) }, }, { - name: "client failed to return for the second time", - expire: 10 * time.Millisecond, + name: "refresh dynconfig", + refreshInterval: 10 * time.Millisecond, cleanFileCache: func(t *testing.T) { if err := os.Remove(mockCachePath); err != nil { t.Fatal(err) @@ -100,7 +108,7 @@ func TestDynconfigGet_ManagerSourceType(t *testing.T) { }, expect: func(t *testing.T, data *DynconfigData, err error) { assert := assert.New(t) - assert.Equal(data.CDNs[0].HostName, "foo") + assert.Equal(data.CDNs[0].Hostname, "foo") assert.Equal(data.CDNs[0].IP, "127.0.0.1") assert.Equal(data.CDNs[0].Port, int32(8001)) assert.Equal(data.CDNs[0].DownloadPort, int32(8003)) @@ -115,13 +123,8 @@ func TestDynconfigGet_ManagerSourceType(t *testing.T) { mockManagerClient := mocks.NewMockClient(ctl) tc.mock(mockManagerClient.EXPECT()) - d, err := NewDynconfig(dc.ManagerSourceType, mockCacheDir, "", []dc.Option{ - dc.WithManagerClient(NewManagerClient(mockManagerClient, &Config{ - Manager: &ManagerConfig{SchedulerClusterID: uint(1)}, - Server: &ServerConfig{Host: "foo"}, - })), - dc.WithExpireTime(tc.expire), - }...) + mockConfig.DynConfig.RefreshInterval = tc.refreshInterval + d, err := NewDynconfig(mockManagerClient, mockCacheDir, mockConfig) if err != nil { t.Fatal(err) } @@ -134,62 +137,7 @@ func TestDynconfigGet_ManagerSourceType(t *testing.T) { } } -func TestDynconfigGet_LocalSourceType(t *testing.T) { - mockCacheDir := t.TempDir() - - tests := []struct { - name string - configPath string - expect func(t *testing.T, data *DynconfigData, err error) - }{ - { - name: "get CDN from local config", - configPath: filepath.Join("./testdata", "dynconfig", "scheduler.yaml"), - expect: func(t *testing.T, data *DynconfigData, err error) { - assert := assert.New(t) - assert.Equal( - &DynconfigData{ - CDNs: []*CDN{ - { - HostName: "foo", - IP: "127.0.0.1", - Port: 8001, - DownloadPort: 8003, - }, - { - HostName: "bar", - IP: "127.0.0.1", - Port: 8001, - DownloadPort: 8003, - }, - }, - }, data) - }, - }, - { - name: "directory does not exist", - configPath: filepath.Join("./testdata", "foo"), - expect: func(t *testing.T, data *DynconfigData, err error) { - assert := assert.New(t) - assert.EqualError(err, "open testdata/foo: no such file or directory") - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - d, err := NewDynconfig(dc.LocalSourceType, mockCacheDir, "", dc.WithLocalConfigPath(tc.configPath)) - if err != nil { - t.Fatal(err) - } - - data, err := d.Get() - tc.expect(t, data, err) - }) - } -} - -func TestDynconfigGetCDNFromDirPath(t *testing.T) { +func TestDynconfig_GetCDNFromDirPath(t *testing.T) { mockCacheDir := t.TempDir() tests := []struct { @@ -202,8 +150,8 @@ func TestDynconfigGetCDNFromDirPath(t *testing.T) { cdnDirPath: filepath.Join("./testdata", "dynconfig", "cdn"), expect: func(t *testing.T, data *DynconfigData, err error) { assert := assert.New(t) - assert.Equal(data.CDNs[0].HostName, "foo") - assert.Equal(data.CDNs[1].HostName, "bar") + assert.Equal(data.CDNs[0].Hostname, "foo") + assert.Equal(data.CDNs[1].Hostname, "bar") assert.Equal(data.CDNs[0].Port, int32(8001)) assert.Equal(data.CDNs[1].Port, int32(8001)) assert.Equal(data.CDNs[0].DownloadPort, int32(8003)) @@ -223,7 +171,11 @@ func TestDynconfigGetCDNFromDirPath(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - d, err := NewDynconfig(dc.LocalSourceType, mockCacheDir, tc.cdnDirPath, dc.WithLocalConfigPath("./testdata/scheduler.yaml")) + d, err := NewDynconfig(nil, mockCacheDir, &Config{ + DynConfig: &DynConfig{ + CDNDir: tc.cdnDirPath, + }, + }) if err != nil { t.Fatal(err) } diff --git a/scheduler/config/mocks/dyncofig_mock.go b/scheduler/config/mocks/dyncofig_mock.go new file mode 100644 index 00000000000..2477e8b5cb4 --- /dev/null +++ b/scheduler/config/mocks/dyncofig_mock.go @@ -0,0 +1,197 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/config/dynconfig.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + + types "d7y.io/dragonfly/v2/manager/types" + config "d7y.io/dragonfly/v2/scheduler/config" + gomock "github.com/golang/mock/gomock" +) + +// MockDynconfigInterface is a mock of DynconfigInterface interface. +type MockDynconfigInterface struct { + ctrl *gomock.Controller + recorder *MockDynconfigInterfaceMockRecorder +} + +// MockDynconfigInterfaceMockRecorder is the mock recorder for MockDynconfigInterface. +type MockDynconfigInterfaceMockRecorder struct { + mock *MockDynconfigInterface +} + +// NewMockDynconfigInterface creates a new mock instance. +func NewMockDynconfigInterface(ctrl *gomock.Controller) *MockDynconfigInterface { + mock := &MockDynconfigInterface{ctrl: ctrl} + mock.recorder = &MockDynconfigInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDynconfigInterface) EXPECT() *MockDynconfigInterfaceMockRecorder { + return m.recorder +} + +// Deregister mocks base method. +func (m *MockDynconfigInterface) Deregister(arg0 config.Observer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Deregister", arg0) +} + +// Deregister indicates an expected call of Deregister. +func (mr *MockDynconfigInterfaceMockRecorder) Deregister(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Deregister", reflect.TypeOf((*MockDynconfigInterface)(nil).Deregister), arg0) +} + +// Get mocks base method. +func (m *MockDynconfigInterface) Get() (*config.DynconfigData, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get") + ret0, _ := ret[0].(*config.DynconfigData) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDynconfigInterfaceMockRecorder) Get() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDynconfigInterface)(nil).Get)) +} + +// GetCDNClusterConfig mocks base method. +func (m *MockDynconfigInterface) GetCDNClusterConfig(arg0 uint) (types.CDNClusterConfig, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCDNClusterConfig", arg0) + ret0, _ := ret[0].(types.CDNClusterConfig) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetCDNClusterConfig indicates an expected call of GetCDNClusterConfig. +func (mr *MockDynconfigInterfaceMockRecorder) GetCDNClusterConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCDNClusterConfig", reflect.TypeOf((*MockDynconfigInterface)(nil).GetCDNClusterConfig), arg0) +} + +// GetSchedulerClusterClientConfig mocks base method. +func (m *MockDynconfigInterface) GetSchedulerClusterClientConfig() (types.SchedulerClusterClientConfig, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSchedulerClusterClientConfig") + ret0, _ := ret[0].(types.SchedulerClusterClientConfig) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetSchedulerClusterClientConfig indicates an expected call of GetSchedulerClusterClientConfig. +func (mr *MockDynconfigInterfaceMockRecorder) GetSchedulerClusterClientConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchedulerClusterClientConfig", reflect.TypeOf((*MockDynconfigInterface)(nil).GetSchedulerClusterClientConfig)) +} + +// GetSchedulerClusterConfig mocks base method. +func (m *MockDynconfigInterface) GetSchedulerClusterConfig() (types.SchedulerClusterConfig, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSchedulerClusterConfig") + ret0, _ := ret[0].(types.SchedulerClusterConfig) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetSchedulerClusterConfig indicates an expected call of GetSchedulerClusterConfig. +func (mr *MockDynconfigInterfaceMockRecorder) GetSchedulerClusterConfig() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchedulerClusterConfig", reflect.TypeOf((*MockDynconfigInterface)(nil).GetSchedulerClusterConfig)) +} + +// Notify mocks base method. +func (m *MockDynconfigInterface) Notify() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Notify") + ret0, _ := ret[0].(error) + return ret0 +} + +// Notify indicates an expected call of Notify. +func (mr *MockDynconfigInterfaceMockRecorder) Notify() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Notify", reflect.TypeOf((*MockDynconfigInterface)(nil).Notify)) +} + +// Register mocks base method. +func (m *MockDynconfigInterface) Register(arg0 config.Observer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Register", arg0) +} + +// Register indicates an expected call of Register. +func (mr *MockDynconfigInterfaceMockRecorder) Register(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockDynconfigInterface)(nil).Register), arg0) +} + +// Serve mocks base method. +func (m *MockDynconfigInterface) Serve() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Serve") + ret0, _ := ret[0].(error) + return ret0 +} + +// Serve indicates an expected call of Serve. +func (mr *MockDynconfigInterfaceMockRecorder) Serve() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Serve", reflect.TypeOf((*MockDynconfigInterface)(nil).Serve)) +} + +// Stop mocks base method. +func (m *MockDynconfigInterface) Stop() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop") + ret0, _ := ret[0].(error) + return ret0 +} + +// Stop indicates an expected call of Stop. +func (mr *MockDynconfigInterfaceMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockDynconfigInterface)(nil).Stop)) +} + +// MockObserver is a mock of Observer interface. +type MockObserver struct { + ctrl *gomock.Controller + recorder *MockObserverMockRecorder +} + +// MockObserverMockRecorder is the mock recorder for MockObserver. +type MockObserverMockRecorder struct { + mock *MockObserver +} + +// NewMockObserver creates a new mock instance. +func NewMockObserver(ctrl *gomock.Controller) *MockObserver { + mock := &MockObserver{ctrl: ctrl} + mock.recorder = &MockObserverMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockObserver) EXPECT() *MockObserverMockRecorder { + return m.recorder +} + +// OnNotify mocks base method. +func (m *MockObserver) OnNotify(arg0 *config.DynconfigData) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnNotify", arg0) +} + +// OnNotify indicates an expected call of OnNotify. +func (mr *MockObserverMockRecorder) OnNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNotify", reflect.TypeOf((*MockObserver)(nil).OnNotify), arg0) +} diff --git a/scheduler/config/testdata/scheduler.yaml b/scheduler/config/testdata/scheduler.yaml index aa7c7c4ead2..e91e04e571a 100644 --- a/scheduler/config/testdata/scheduler.yaml +++ b/scheduler/config/testdata/scheduler.yaml @@ -1,55 +1,51 @@ -dynconfig: - type: local - expireTime: 1000 - cdnDirPath: tmp +server: + ip: 127.0.0.1 + host: foo + port: 8002 + listenLimit: 1000 + cacheDir: foo + logDir: bar scheduler: - workerNum: 8 - workerJobPoolSize: 10000 - senderNum: 10 - senderJobPoolSize: 10000 algorithm: default + backSourceCount: 3 + retryLimit: 10 + retryInterval: 1000000000 + gc: + peerGCInterval: 60000000000 + peerTTL: 300000000000 + taskGCInterval: 60000000000 + taskTTL: 600000000000 -server: - ip: "127.0.0.1" - host: "foo" - port: 8002 - cacheDir: "foo" - logDir: "foo" - -cdn: - servers: - - name: "cdn" - ip: "127.0.0.1" - rpcPort: 8003 - downloadPort: 8001 +dynconfig: + refreshInterval: 300000000000 + cdnDir: foo -gc: - taskDelay: 3600000 - peerTaskDelay: 3600000 +host: + idc: foo + netTopology: bar + location: baz manager: + enable: true addr: 127.0.0.1:65003 schedulerClusterID: 1 keepAlive: - interval: 1000000000 - -host: - idc: foo - location: bar + interval: 5000000000 job: + enable: true globalWorkerNum: 1 schedulerWorkerNum: 1 localWorkerNum: 5 redis: - host: "127.0.0.1" + host: 127.0.0.1 port: 6379 - password: "password" + password: foo brokerDB: 1 backendDB: 2 metrics: + enable: false addr: ":8000" - -disableCDN: true + enablePeerHost: false diff --git a/scheduler/core/evaluator/evaluator_base_test.go b/scheduler/core/evaluator/evaluator_base_test.go deleted file mode 100644 index c5753897a89..00000000000 --- a/scheduler/core/evaluator/evaluator_base_test.go +++ /dev/null @@ -1,667 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package evaluator - -import ( - "testing" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - - "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/util/mathutils" - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -const ( - cdnHostType = "cdn" - clientHostType = "client" -) - -const ( - mockIP = "127.0.0.1" - mockTaskURL = "https://example.com" -) - -type factor struct { - hostType string - securityDomain string - idc string - location string - netTopology string - totalUploadLoad uint32 - currentUploadLoad uint32 - finishedPieceCount int32 - hostUUID string - taskPieceCount int32 -} - -func TestEvaluatorEvaluate(t *testing.T) { - tests := []struct { - name string - parent *factor - child *factor - expect func(t *testing.T, v float64) - }{ - { - name: "evaluate succeeded with cdn peer", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.6))) - }, - }, - { - name: "evaluate with different securityDomain", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foz", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0))) - }, - }, - { - name: "evaluate with empty securityDomain", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.6))) - }, - }, - { - name: "evaluate with different idc", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "baz", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.45))) - }, - }, - { - name: "evaluate with different location", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.59))) - }, - }, - { - name: "evaluate with empty location", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.55))) - }, - }, - { - name: "evaluate with excessive location", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e|f", - netTopology: "a|b|c|d|e", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e|f", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.6))) - }, - }, - { - name: "evaluate with different netTopology", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.58))) - }, - }, - { - name: "evaluate with empty netTopology", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.5))) - }, - }, - { - name: "evaluate with excessive netTopology", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e|f", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 0, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e|f", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.6))) - }, - }, - { - name: "evaluate with task piece count", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e|f", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 20, - hostUUID: "example", - taskPieceCount: 100, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e|f", - finishedPieceCount: 0, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(0.68))) - }, - }, - { - name: "evaluate without task piece count", - parent: &factor{ - hostType: cdnHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e|f", - totalUploadLoad: 100, - currentUploadLoad: 0, - finishedPieceCount: 20, - hostUUID: "example", - taskPieceCount: 0, - }, - child: &factor{ - hostType: clientHostType, - securityDomain: "foo", - idc: "bar", - location: "a|b|c|d|e", - netTopology: "a|b|c|d|e|f", - finishedPieceCount: 10, - hostUUID: "example", - }, - expect: func(t *testing.T, v float64) { - assert := assert.New(t) - assert.True(mathutils.EqualFloat64(v, float64(4.6))) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := supervisor.NewTask(idgen.TaskID(mockTaskURL, nil), mockTaskURL, nil) - - parentHost := supervisor.NewClientHost( - tc.parent.hostUUID, "", "", 0, 0, - tc.parent.securityDomain, - tc.parent.location, - tc.parent.idc, - supervisor.WithNetTopology(tc.parent.netTopology), - supervisor.WithTotalUploadLoad(tc.parent.totalUploadLoad), - ) - parentHost.CurrentUploadLoad.Store(tc.parent.currentUploadLoad) - parent := supervisor.NewPeer(idgen.PeerID(mockIP), task, parentHost) - parent.TotalPieceCount.Store(tc.parent.finishedPieceCount) - - childHost := supervisor.NewClientHost( - tc.parent.hostUUID, "", "", 0, 0, - tc.child.securityDomain, - tc.child.location, - tc.child.idc, - supervisor.WithNetTopology(tc.child.netTopology), - ) - child := supervisor.NewPeer(idgen.PeerID(mockIP), task, childHost) - child.TotalPieceCount.Store(tc.child.finishedPieceCount) - - e := NewEvaluatorBase() - tc.expect(t, e.Evaluate(parent, child, tc.parent.taskPieceCount)) - }) - } -} - -func TestEvaluatorNeedAdjustParent(t *testing.T) { - tests := []struct { - name string - parent *factor - child *factor - expect func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) - }{ - { - name: "peer is CDN", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: cdnHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - assert.Equal(e.NeedAdjustParent(child), false) - }, - }, - { - name: "peer has no parent", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - assert.Equal(e.NeedAdjustParent(child), true) - }, - }, - { - name: "peer has done", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - assert.Equal(e.NeedAdjustParent(child), true) - }, - }, - { - name: "parent has leaved", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - child.SetParent(parent) - parent.Leave() - assert.Equal(e.NeedAdjustParent(child), true) - }, - }, - { - name: "empty costs", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - child.SetParent(parent) - assert.Equal(e.NeedAdjustParent(child), false) - }, - }, - { - name: "costs are not normal distribution and peer should not be scheduler", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - child.SetParent(parent) - child.SetPieceCosts([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}...) - assert.Equal(e.NeedAdjustParent(child), false) - }, - }, - { - name: "costs are not normal distribution and peer should be scheduler", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - child.SetParent(parent) - child.SetPieceCosts([]int{1, 2, 3, 4, 5, 6, 7, 8, 23}...) - assert.Equal(e.NeedAdjustParent(child), true) - }, - }, - { - name: "costs are normal distribution and peer should not be scheduler", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - child.SetParent(parent) - child.SetPieceCosts([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 5}...) - assert.Equal(e.NeedAdjustParent(child), false) - }, - }, - { - name: "costs are normal distribution and peer should be scheduler", - parent: &factor{ - hostType: clientHostType, - }, - child: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, parent *supervisor.Peer, child *supervisor.Peer) { - assert := assert.New(t) - child.SetParent(parent) - child.SetPieceCosts([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15}...) - assert.Equal(e.NeedAdjustParent(child), true) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := supervisor.NewTask(idgen.TaskID(mockTaskURL, nil), mockTaskURL, nil) - - parentHost := supervisor.NewClientHost(uuid.NewString(), "", "", 0, 0, "", "", "") - parent := supervisor.NewPeer(idgen.PeerID(mockIP), task, parentHost) - - var child *supervisor.Peer - if tc.child.hostType == cdnHostType { - childHost := supervisor.NewCDNHost(uuid.NewString(), "", "", 0, 0, "", "", "") - child = supervisor.NewPeer(idgen.CDNPeerID(mockIP), task, childHost) - } else { - childHost := supervisor.NewClientHost(uuid.NewString(), "", "", 0, 0, "", "", "") - child = supervisor.NewPeer(idgen.PeerID(mockIP), task, childHost) - } - - e := NewEvaluatorBase() - tc.expect(t, e, parent, child) - }) - } -} - -func TestEvaluatorIsBadNode(t *testing.T) { - tests := []struct { - name string - peer *factor - expect func(t *testing.T, e Evaluator, peer *supervisor.Peer) - }{ - { - name: "peer is bad", - peer: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, peer *supervisor.Peer) { - assert := assert.New(t) - peer.SetStatus(supervisor.PeerStatusFail) - assert.Equal(e.IsBadNode(peer), true) - }, - }, - { - name: "peer is CDN", - peer: &factor{ - hostType: cdnHostType, - }, - expect: func(t *testing.T, e Evaluator, peer *supervisor.Peer) { - assert := assert.New(t) - assert.Equal(e.IsBadNode(peer), false) - }, - }, - { - name: "empty costs", - peer: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, peer *supervisor.Peer) { - assert := assert.New(t) - assert.Equal(e.IsBadNode(peer), false) - }, - }, - { - name: "costs length is available and peer is not bad node", - peer: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, peer *supervisor.Peer) { - assert := assert.New(t) - peer.SetPieceCosts([]int{1, 2, 3, 4, 5, 6, 7, 8, 9}...) - assert.Equal(e.IsBadNode(peer), false) - }, - }, - { - name: "costs length is available and peer is bad node", - peer: &factor{ - hostType: clientHostType, - }, - expect: func(t *testing.T, e Evaluator, peer *supervisor.Peer) { - assert := assert.New(t) - peer.SetPieceCosts([]int{1, 2, 3, 4, 5, 6, 7, 8, 181}...) - assert.Equal(e.IsBadNode(peer), true) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := supervisor.NewTask(idgen.TaskID(mockTaskURL, nil), mockTaskURL, nil) - - var peer *supervisor.Peer - if tc.peer.hostType == cdnHostType { - childHost := supervisor.NewCDNHost(uuid.NewString(), "", "", 0, 0, "", "", "") - peer = supervisor.NewPeer(idgen.CDNPeerID(mockIP), task, childHost) - } else { - childHost := supervisor.NewClientHost(uuid.NewString(), "", "", 0, 0, "", "", "") - peer = supervisor.NewPeer(idgen.PeerID(mockIP), task, childHost) - } - - e := NewEvaluatorBase() - tc.expect(t, e, peer) - }) - } -} diff --git a/scheduler/core/events.go b/scheduler/core/events.go deleted file mode 100644 index a6f627ddc71..00000000000 --- a/scheduler/core/events.go +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package core - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/trace" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/workqueue" - - "d7y.io/dragonfly/v2/internal/dferrors" - logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/container/list" - "d7y.io/dragonfly/v2/pkg/rpc/base" - schedulerRPC "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/core/scheduler" - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -type event interface { - hashKey() string - apply(s *state) -} - -type rsPeer struct { - times int32 - peer *supervisor.Peer - blankParents sets.String -} - -type state struct { - sched scheduler.Scheduler - peerManager supervisor.PeerManager - cdn supervisor.CDN - waitScheduleParentPeerQueue workqueue.DelayingInterface -} - -func newState(sched scheduler.Scheduler, peerManager supervisor.PeerManager, cdn supervisor.CDN, wsdq workqueue.DelayingInterface) *state { - return &state{ - sched: sched, - peerManager: peerManager, - cdn: cdn, - waitScheduleParentPeerQueue: wsdq, - } -} - -type reScheduleParentEvent struct { - rsPeer *rsPeer -} - -var _ event = reScheduleParentEvent{} - -func (e reScheduleParentEvent) apply(s *state) { - rsPeer := e.rsPeer - rsPeer.times = rsPeer.times + 1 - peer := rsPeer.peer - if peer.Task.IsFail() { - if err := peer.CloseChannelWithError(dferrors.New(base.Code_SchedTaskStatusError, "schedule task status failed")); err != nil { - logger.WithTaskAndPeerID(peer.Task.ID, peer.ID).Warnf("close peer channel failed: %v", err) - } - return - } - oldParent, ok := peer.GetParent() - blankParents := rsPeer.blankParents - if ok && !blankParents.Has(oldParent.ID) { - logger.WithTaskAndPeerID(peer.Task.ID, - peer.ID).Warnf("reScheduleParent: peer already schedule a parent %s and new parent is not in blank parents", oldParent.ID) - return - } - - parent, candidates, hasParent := s.sched.ScheduleParent(peer, blankParents) - if !hasParent { - if peer.Task.CanBackToSource() && !peer.Task.ContainsBackToSourcePeer(peer.ID) { - if peer.CloseChannelWithError(dferrors.Newf(base.Code_SchedNeedBackSource, "peer %s need back source", peer.ID)) == nil { - peer.Task.AddBackToSourcePeer(peer.ID) - } - return - } - logger.Errorf("reScheduleParent: failed to schedule parent to peer %s, reschedule it later", peer.ID) - s.waitScheduleParentPeerQueue.AddAfter(rsPeer, time.Second) - return - } - - // TODO if parentPeer is equal with oldParent, need schedule again ? - if err := peer.SendSchedulePacket(constructSuccessPeerPacket(peer, parent, candidates)); err != nil { - sendErrorHandler(err, s, peer) - } -} - -func (e reScheduleParentEvent) hashKey() string { - return e.rsPeer.peer.Task.ID -} - -type startReportPieceResultEvent struct { - ctx context.Context - peer *supervisor.Peer -} - -var _ event = startReportPieceResultEvent{} - -func (e startReportPieceResultEvent) apply(s *state) { - span := trace.SpanFromContext(e.ctx) - if parent, ok := e.peer.GetParent(); ok { - e.peer.Log().Warnf("startReportPieceResultEvent: no need schedule parent because peer already had parent %s", parent.ID) - if err := e.peer.SendSchedulePacket(constructSuccessPeerPacket(e.peer, parent, nil)); err != nil { - sendErrorHandler(err, s, e.peer) - } - return - } - if e.peer.Task.ContainsBackToSourcePeer(e.peer.ID) { - e.peer.Log().Info("startReportPieceResultEvent: no need schedule parent because peer is back source peer") - return - } - - parent, candidates, hasParent := s.sched.ScheduleParent(e.peer, sets.NewString()) - // No parent node is currently available - if !hasParent { - if e.peer.Task.CanBackToSource() && !e.peer.Task.ContainsBackToSourcePeer(e.peer.ID) { - span.SetAttributes(config.AttributeClientBackSource.Bool(true)) - if e.peer.CloseChannelWithError(dferrors.Newf(base.Code_SchedNeedBackSource, "peer %s need back source", e.peer.ID)) == nil { - e.peer.Task.AddBackToSourcePeer(e.peer.ID) - } - logger.WithTaskAndPeerID(e.peer.Task.ID, - e.peer.ID).Info("startReportPieceResultEvent: peer need back source because no parent node is available for scheduling") - return - } - e.peer.Log().Warnf("startReportPieceResultEvent: no parent node is currently available,reschedule it later") - s.waitScheduleParentPeerQueue.AddAfter(&rsPeer{peer: e.peer}, time.Second) - return - } - if err := e.peer.SendSchedulePacket(constructSuccessPeerPacket(e.peer, parent, candidates)); err != nil { - sendErrorHandler(err, s, e.peer) - } -} - -func (e startReportPieceResultEvent) hashKey() string { - return e.peer.Task.ID -} - -type peerDownloadPieceSuccessEvent struct { - ctx context.Context - peer *supervisor.Peer - pr *schedulerRPC.PieceResult -} - -var _ event = peerDownloadPieceSuccessEvent{} - -func (e peerDownloadPieceSuccessEvent) apply(s *state) { - e.peer.UpdateProgress(e.pr.FinishedCount, int(e.pr.EndTime-e.pr.BeginTime)) - if e.peer.Task.ContainsBackToSourcePeer(e.peer.ID) { - e.peer.Task.GetOrAddPiece(e.pr.PieceInfo) - if !e.peer.Task.CanSchedule() { - e.peer.Log().Warnf("peerDownloadPieceSuccessEvent: update task status seeding") - e.peer.Task.SetStatus(supervisor.TaskStatusSeeding) - } - return - } - - var candidates []*supervisor.Peer - parentPeer, ok := s.peerManager.Get(e.pr.DstPid) - if !ok { - e.peer.Log().Warnf("parent peer %s not found", e.pr.DstPid) - return - } - - if parentPeer.IsLeave() { - e.peer.Log().Warnf("peerDownloadPieceSuccessEvent: need reschedule parent for peer because it's parent is already left") - e.peer.ReplaceParent(nil) - var hasParent bool - parentPeer, candidates, hasParent = s.sched.ScheduleParent(e.peer, sets.NewString(parentPeer.ID)) - if !hasParent { - e.peer.Log().Warnf("peerDownloadPieceSuccessEvent: no parent node is currently available, " + - "reschedule it later") - s.waitScheduleParentPeerQueue.AddAfter(&rsPeer{peer: e.peer, blankParents: sets.NewString(parentPeer.ID)}, time.Second) - return - } - } - - if oldParent, ok := e.peer.GetParent(); e.pr.DstPid != e.peer.ID && (!ok || oldParent.ID != e.pr.DstPid) { - logger.WithTaskAndPeerID(e.peer.Task.ID, e.peer.ID).Debugf("parent peerID is not same as DestPid, replace it's parent node with %s", - e.pr.DstPid) - e.peer.ReplaceParent(parentPeer) - } - - parentPeer.Touch() - if parentPeer.ID == e.pr.DstPid { - return - } - - // TODO if parentPeer is equal with oldParent, need schedule again ? - if err := e.peer.SendSchedulePacket(constructSuccessPeerPacket(e.peer, parentPeer, candidates)); err != nil { - sendErrorHandler(err, s, e.peer) - } -} - -func (e peerDownloadPieceSuccessEvent) hashKey() string { - return e.peer.Task.ID -} - -type peerDownloadPieceFailEvent struct { - ctx context.Context - peer *supervisor.Peer - pr *schedulerRPC.PieceResult -} - -var _ event = peerDownloadPieceFailEvent{} - -func (e peerDownloadPieceFailEvent) apply(s *state) { - if e.peer.Task.ContainsBackToSourcePeer(e.peer.ID) { - return - } - switch e.pr.Code { - case base.Code_ClientWaitPieceReady: - return - // FIXME check dst peer healthy before delete the peer - case base.Code_ClientPieceRequestFail, base.Code_PeerTaskNotFound: - s.peerManager.Delete(e.pr.DstPid) - case base.Code_CDNTaskNotFound, base.Code_CDNError, base.Code_CDNTaskDownloadFail: - s.peerManager.Delete(e.pr.DstPid) - go func() { - if _, err := s.cdn.StartSeedTask(e.ctx, e.peer.Task); err != nil { - e.peer.Log().Errorf("peerDownloadPieceFailEvent: seed task failed: %v", err) - } - }() - default: - e.peer.Log().Debugf("report piece download fail message, piece result %s", e.pr.String()) - } - s.waitScheduleParentPeerQueue.Add(&rsPeer{peer: e.peer, blankParents: sets.NewString(e.pr.DstPid)}) -} -func (e peerDownloadPieceFailEvent) hashKey() string { - return e.peer.Task.ID -} - -type taskSeedFailEvent struct { - task *supervisor.Task -} - -var _ event = taskSeedFailEvent{} - -func (e taskSeedFailEvent) apply(s *state) { - handleCDNSeedTaskFail(e.task) -} - -func (e taskSeedFailEvent) hashKey() string { - return e.task.ID -} - -type peerDownloadSuccessEvent struct { - peer *supervisor.Peer - peerResult *schedulerRPC.PeerResult -} - -var _ event = peerDownloadSuccessEvent{} - -func (e peerDownloadSuccessEvent) apply(s *state) { - e.peer.SetStatus(supervisor.PeerStatusSuccess) - if e.peer.Task.ContainsBackToSourcePeer(e.peer.ID) && !e.peer.Task.IsSuccess() { - e.peer.Task.UpdateSuccess(e.peerResult.TotalPieceCount, e.peerResult.ContentLength) - } - removePeerFromCurrentTree(e.peer, s) - children := s.sched.ScheduleChildren(e.peer, sets.NewString()) - for _, child := range children { - if err := child.SendSchedulePacket(constructSuccessPeerPacket(child, e.peer, nil)); err != nil { - sendErrorHandler(err, s, child) - } - } -} - -func (e peerDownloadSuccessEvent) hashKey() string { - return e.peer.Task.ID -} - -type peerDownloadFailEvent struct { - peer *supervisor.Peer - peerResult *schedulerRPC.PeerResult -} - -var _ event = peerDownloadFailEvent{} - -func (e peerDownloadFailEvent) apply(s *state) { - e.peer.SetStatus(supervisor.PeerStatusFail) - if e.peer.Task.ContainsBackToSourcePeer(e.peer.ID) && !e.peer.Task.IsSuccess() { - e.peer.Task.SetStatus(supervisor.TaskStatusFail) - handleCDNSeedTaskFail(e.peer.Task) - return - } - removePeerFromCurrentTree(e.peer, s) - e.peer.GetChildren().Range(func(key, value interface{}) bool { - child := (value).(*supervisor.Peer) - parent, candidates, hasParent := s.sched.ScheduleParent(child, sets.NewString(e.peer.ID)) - if !hasParent { - e.peer.Log().Warnf("peerDownloadFailEvent: there is no available parent, reschedule it later") - s.waitScheduleParentPeerQueue.AddAfter(&rsPeer{peer: e.peer, blankParents: sets.NewString(e.peer.ID)}, time.Second) - return true - } - if err := child.SendSchedulePacket(constructSuccessPeerPacket(child, parent, candidates)); err != nil { - sendErrorHandler(err, s, child) - } - return true - }) -} - -func (e peerDownloadFailEvent) hashKey() string { - return e.peer.Task.ID -} - -type peerLeaveEvent struct { - ctx context.Context - peer *supervisor.Peer -} - -var _ event = peerLeaveEvent{} - -func (e peerLeaveEvent) apply(s *state) { - e.peer.Leave() - removePeerFromCurrentTree(e.peer, s) - e.peer.GetChildren().Range(func(key, value interface{}) bool { - child := value.(*supervisor.Peer) - parent, candidates, hasParent := s.sched.ScheduleParent(child, sets.NewString(e.peer.ID)) - if !hasParent { - e.peer.Log().Warnf("handlePeerLeave: there is no available parent,reschedule it later") - s.waitScheduleParentPeerQueue.AddAfter(&rsPeer{peer: child, blankParents: sets.NewString(e.peer.ID)}, time.Second) - return true - } - if err := child.SendSchedulePacket(constructSuccessPeerPacket(child, parent, candidates)); err != nil { - sendErrorHandler(err, s, child) - } - return true - }) - s.peerManager.Delete(e.peer.ID) -} - -func (e peerLeaveEvent) hashKey() string { - return e.peer.Task.ID -} - -// constructSuccessPeerPacket construct success peer schedule packet -func constructSuccessPeerPacket(peer *supervisor.Peer, parent *supervisor.Peer, candidates []*supervisor.Peer) *schedulerRPC.PeerPacket { - mainPeer := &schedulerRPC.PeerPacket_DestPeer{ - Ip: parent.Host.IP, - RpcPort: parent.Host.RPCPort, - PeerId: parent.ID, - } - var stealPeers []*schedulerRPC.PeerPacket_DestPeer - for _, candidate := range candidates { - stealPeers = append(stealPeers, &schedulerRPC.PeerPacket_DestPeer{ - Ip: candidate.Host.IP, - RpcPort: candidate.Host.RPCPort, - PeerId: candidate.ID, - }) - } - peerPacket := &schedulerRPC.PeerPacket{ - TaskId: peer.Task.ID, - SrcPid: peer.ID, - ParallelCount: 1, - MainPeer: mainPeer, - StealPeers: stealPeers, - Code: base.Code_Success, - } - logger.Debugf("send peerPacket %#v to peer %s", peerPacket, peer.ID) - return peerPacket -} - -func handleCDNSeedTaskFail(task *supervisor.Task) { - if task.CanBackToSource() { - task.GetPeers().Range(func(item list.Item) bool { - peer, ok := item.(*supervisor.Peer) - if !ok { - return true - } - - if task.CanBackToSource() { - if !task.ContainsBackToSourcePeer(peer.ID) { - if peer.CloseChannelWithError(dferrors.Newf(base.Code_SchedNeedBackSource, "peer %s need back source because cdn seed task failed", peer.ID)) == nil { - task.AddBackToSourcePeer(peer.ID) - } - } - return true - } - - return false - }) - } else { - task.SetStatus(supervisor.TaskStatusFail) - task.GetPeers().Range(func(item list.Item) bool { - peer, ok := item.(*supervisor.Peer) - if !ok { - return true - } - - if err := peer.CloseChannelWithError(dferrors.New(base.Code_SchedTaskStatusError, "schedule task status failed")); err != nil { - peer.Log().Warnf("close peer conn channel failed: %v", err) - } - return true - }) - } -} - -func removePeerFromCurrentTree(peer *supervisor.Peer, s *state) { - parent, ok := peer.GetParent() - peer.ReplaceParent(nil) - - // parent frees up upload resources - if ok { - children := s.sched.ScheduleChildren(parent, sets.NewString(peer.ID)) - for _, child := range children { - if err := child.SendSchedulePacket(constructSuccessPeerPacket(child, parent, nil)); err != nil { - sendErrorHandler(err, s, child) - } - } - } -} - -func sendErrorHandler(err error, s *state, p *supervisor.Peer) { - if err == supervisor.ErrChannelBusy { - p.Log().Info("send schedule packet channel busy") - s.waitScheduleParentPeerQueue.AddAfter(&rsPeer{peer: p}, 10*time.Millisecond) - } else { - p.Log().Errorf("send schedule packet failed: %v", err) - } -} diff --git a/scheduler/core/monitor.go b/scheduler/core/monitor.go deleted file mode 100644 index bf7cc432a98..00000000000 --- a/scheduler/core/monitor.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package core - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - "time" - - "github.com/olekukonko/tablewriter" - "go.uber.org/zap" - "k8s.io/client-go/util/workqueue" - - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -type monitor struct { - downloadMonitorQueue workqueue.DelayingInterface - peerManager supervisor.PeerManager - log *zap.SugaredLogger -} - -func newMonitor(openMonitor bool, peerManager supervisor.PeerManager) *monitor { - if !openMonitor { - return nil - } - config := zap.NewDevelopmentConfig() - logger, _ := config.Build() - return &monitor{ - downloadMonitorQueue: workqueue.NewDelayingQueue(), - peerManager: peerManager, - log: logger.Sugar(), - } -} - -func (m *monitor) start(done <-chan struct{}) { - ticker := time.NewTicker(time.Second * 10) - for { - select { - case <-ticker.C: - m.log.Info(m.printDebugInfo()) - case <-done: - return - } - } -} - -func (m *monitor) printDebugInfo() string { - var peers, roots []*supervisor.Peer - m.peerManager.GetPeers().Range(func(key interface{}, value interface{}) (ok bool) { - ok = true - peer := value.(*supervisor.Peer) - if peer == nil { - m.log.Error("encounter a nil peer") - return - } - - if _, ok := peer.GetParent(); !ok { - roots = append(roots, peer) - } - peers = append(peers, peer) - return - }) - - sort.Slice(peers, func(i, j int) bool { - return peers[i].GetStatus() > peers[j].GetStatus() - }) - buffer := bytes.NewBuffer([]byte{}) - table := tablewriter.NewWriter(buffer) - table.SetHeader([]string{"PeerID", "TaskID", "URL", "Parent", "Status", "start time", "Finished Piece Num", "Finished", "Free Load"}) - - for _, peer := range peers { - parentID := "" - if parent, ok := peer.GetParent(); ok { - parentID = parent.ID - } - - table.Append([]string{peer.ID, peer.Task.ID, peer.Task.URL[len(peer.Task.URL)-15 : len(peer.Task.URL)], parentID, peer.GetStatus().String(), - peer.CreateAt.Load().String(), strconv.Itoa(int(peer.TotalPieceCount.Load())), - strconv.FormatBool(peer.IsSuccess()), strconv.Itoa(int(peer.Host.GetFreeUploadLoad()))}) - } - table.Render() - - var msgs []string - msgs = append(msgs, buffer.String()) - - var printTree func(node *supervisor.Peer, path []string) - printTree = func(node *supervisor.Peer, path []string) { - if node == nil { - return - } - nPath := append(path, fmt.Sprintf("%s(%d)(%s)", node.ID, node.GetTreeNodeCount(), node.GetStatus())) - if len(path) >= 1 { - msgs = append(msgs, node.ID+" || "+strings.Join(nPath, "-")) - } - node.GetChildren().Range(func(key, value interface{}) bool { - child := (value).(*supervisor.Peer) - printTree(child, nPath) - return true - }) - } - - for _, root := range roots { - printTree(root, nil) - } - - msg := "============\n" + strings.Join(append(msgs, "peer count: "+strconv.Itoa(table.NumLines())), "\n") + "\n===============" - return msg -} diff --git a/scheduler/core/scheduler/basic/basic_scheduler.go b/scheduler/core/scheduler/basic/basic_scheduler.go deleted file mode 100644 index 848201a03aa..00000000000 --- a/scheduler/core/scheduler/basic/basic_scheduler.go +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package basic - -import ( - "sort" - - "k8s.io/apimachinery/pkg/util/sets" - - logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/core/evaluator" - "d7y.io/dragonfly/v2/scheduler/core/scheduler" - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -const name = "basic" - -func init() { - scheduler.Register(newBasicSchedulerBuilder()) -} - -type basicSchedulerBuilder struct { - name string -} - -func newBasicSchedulerBuilder() scheduler.Builder { - return &basicSchedulerBuilder{ - name: name, - } -} - -func (builder *basicSchedulerBuilder) Build(cfg *config.SchedulerConfig, opts *scheduler.BuildOptions) (scheduler.Scheduler, error) { - logger.Debugf("start create basic scheduler...") - evaluator := evaluator.New(cfg.Algorithm, opts.PluginDir) - sched := &Scheduler{ - evaluator: evaluator, - peerManager: opts.PeerManager, - cfg: cfg, - } - logger.Debugf("create basic scheduler successfully") - return sched, nil -} - -func (builder *basicSchedulerBuilder) Name() string { - return builder.name -} - -type Scheduler struct { - evaluator evaluator.Evaluator - peerManager supervisor.PeerManager - cfg *config.SchedulerConfig -} - -func (s *Scheduler) ScheduleChildren(peer *supervisor.Peer, blankChildren sets.String) (children []*supervisor.Peer) { - if s.evaluator.IsBadNode(peer) { - peer.Log().Debug("terminate schedule children flow because peer is bad node") - return - } - freeUpload := peer.Host.GetFreeUploadLoad() - candidateChildren := s.selectCandidateChildren(peer, int(freeUpload)*2, blankChildren) - if len(candidateChildren) == 0 { - return nil - } - evalResult := make(map[float64][]*supervisor.Peer) - var evalScore []float64 - taskTotalPieceCount := peer.Task.TotalPieceCount.Load() - for _, child := range candidateChildren { - score := s.evaluator.Evaluate(peer, child, taskTotalPieceCount) - evalResult[score] = append(evalResult[score], child) - evalScore = append(evalScore, score) - } - sort.Float64s(evalScore) - for i := range evalScore { - if freeUpload <= 0 { - break - } - peers := evalResult[evalScore[len(evalScore)-i-1]] - for _, child := range peers { - if freeUpload <= 0 { - break - } - if parent, ok := child.GetParent(); ok && parent == peer { - continue - } - children = append(children, child) - freeUpload-- - } - } - for _, child := range children { - child.ReplaceParent(peer) - } - peer.Log().Debugf("schedule children result: %v", children) - return -} - -func (s *Scheduler) ScheduleParent(peer *supervisor.Peer, blankParents sets.String) (*supervisor.Peer, []*supervisor.Peer, bool) { - candidateParents := s.selectCandidateParents(peer, s.cfg.CandidateParentCount, blankParents) - if len(candidateParents) == 0 { - return nil, nil, false - } - evalResult := make(map[float64][]*supervisor.Peer) - var evalScore []float64 - taskTotalPieceCount := peer.Task.TotalPieceCount.Load() - for _, parent := range candidateParents { - score := s.evaluator.Evaluate(parent, peer, taskTotalPieceCount) - peer.Log().Debugf("evaluate score candidate %s is %f", parent.ID, score) - evalResult[score] = append(evalResult[score], parent) - evalScore = append(evalScore, score) - } - sort.Float64s(evalScore) - var parents = make([]*supervisor.Peer, 0, len(candidateParents)) - for i := range evalScore { - parents = append(parents, evalResult[evalScore[len(evalScore)-i-1]]...) - } - - if parent, ok := peer.GetParent(); ok && parents[0] != parent { - peer.ReplaceParent(parents[0]) - } - - peer.Log().Debugf("primary parent %s is selected", parents[0].ID) - return parents[0], parents[1:], true -} - -func (s *Scheduler) selectCandidateChildren(peer *supervisor.Peer, limit int, blankChildren sets.String) (candidateChildren []*supervisor.Peer) { - peer.Log().Debug("start schedule children flow") - defer peer.Log().Debugf("finish schedule children flow, select num %d candidate children, "+ - "current task tree node count %d, back source peers: %v", len(candidateChildren), peer.Task.GetPeers().Len(), peer.Task.GetBackToSourcePeers()) - candidateChildren = peer.Task.Pick(limit, func(candidateNode *supervisor.Peer) bool { - if candidateNode == nil { - peer.Log().Debugf("******candidate child peer is not selected because it is nil******") - return false - } - - if blankChildren != nil && blankChildren.Has(candidateNode.ID) { - logger.WithTaskAndPeerID(peer.Task.ID, peer.ID).Debugf("******candidate child peer is not selected because it in blank children set******") - return false - } - - if candidateNode.IsDone() { - peer.Log().Debugf("******candidate child peer %s is not selected because it has done******", candidateNode.ID) - return false - } - - if candidateNode.IsLeave() { - peer.Log().Debugf("******candidate child peer %s is not selected because it has left******", candidateNode.ID) - return false - } - - if candidateNode.IsWaiting() { - peer.Log().Debugf("******candidate child peer %s is not selected because it's status is Waiting******", candidateNode.ID) - return false - } - - if candidateNode == peer { - peer.Log().Debugf("******candidate child peer %s is not selected because it and peer are the same******", candidateNode.ID) - return false - } - - if candidateNode.IsAncestor(peer) { - peer.Log().Debugf("******candidate child peer %s is not selected because peer's ancestor is candidate peer******", candidateNode.ID) - return false - } - - if candidateNode.TotalPieceCount.Load() >= peer.TotalPieceCount.Load() { - peer.Log().Debugf("******candidate child peer %s is not selected because it finished number of download is equal to or greater than peer's"+ - "******", candidateNode.ID) - return false - } - - if candidateNode.Host != nil && candidateNode.Host.IsCDN { - peer.Log().Debugf("******candidate child peer %s is not selected because it is a cdn host******", candidateNode.ID) - return false - } - - if !candidateNode.IsConnected() { - peer.Log().Debugf("******candidate child peer %s is not selected because it is not connected******", candidateNode.ID) - return false - } - - if _, ok := candidateNode.GetParent(); !ok { - peer.Log().Debugf("******[selected]candidate child peer %s is selected because it has not parent[selected]******", candidateNode.ID) - return true - } - - if parent, ok := candidateNode.GetParent(); ok && s.evaluator.IsBadNode(parent) { - peer.Log().Debugf("******[selected]candidate child peer %s is selected because parent's status is not health[selected]******", - candidateNode.ID) - return true - } - - peer.Log().Debugf("******[default]candidate child peer %s is selected[default]******", candidateNode.ID) - return true - }) - return -} - -func (s *Scheduler) selectCandidateParents(peer *supervisor.Peer, limit int, blankParents sets.String) (candidateParents []*supervisor.Peer) { - peer.Log().Debug("start schedule parent flow") - defer peer.Log().Debugf("finish schedule parent flow, select num %d candidates parents, "+ - "current task tree node count %d, back source peers: %v", len(candidateParents), peer.Task.GetPeers().Len(), peer.Task.GetBackToSourcePeers()) - if !peer.Task.CanSchedule() { - peer.Log().Debugf("++++++peer can not be scheduled because task cannot be scheduled at this time,waiting task status become seeding. "+ - "it current status is %s++++++", peer.Task.GetStatus()) - return nil - } - candidateParents = peer.Task.PickReverse(limit, func(candidateNode *supervisor.Peer) bool { - if candidateNode == nil { - peer.Log().Debugf("++++++candidate parent peer is not selected because it is nil++++++") - return false - } - if blankParents != nil && blankParents.Has(candidateNode.ID) { - logger.WithTaskAndPeerID(peer.Task.ID, peer.ID).Debugf("++++++candidate parent peer is not selected because it in blank parent set++++++") - return false - } - if s.evaluator.IsBadNode(candidateNode) { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it is badNode++++++", - candidateNode.ID) - return false - } - if candidateNode.IsLeave() { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it has already left++++++", - candidateNode.ID) - return false - } - if candidateNode == peer { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it and peer are the same++++++", - candidateNode.ID) - return false - } - if candidateNode.IsDescendant(peer) { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it's ancestor is peer++++++", - candidateNode.ID) - return false - } - if candidateNode.Host.GetFreeUploadLoad() <= 0 { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it's free upload load equal to less than zero++++++", - candidateNode.ID) - return false - } - if candidateNode.IsWaiting() { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it's status is waiting++++++", - candidateNode.ID) - return false - } - if candidateNode.TotalPieceCount.Load() <= peer.TotalPieceCount.Load() { - peer.Log().Debugf("++++++candidate parent peer %s is not selected because it finished number of download is equal to or smaller than peer's"+ - "++++++", candidateNode.ID) - return false - } - peer.Log().Debugf("++++++[default]candidate parent peer %s is selected[default]", candidateNode.ID) - return true - }) - return -} diff --git a/scheduler/core/scheduler/scheduler.go b/scheduler/core/scheduler/scheduler.go deleted file mode 100644 index 5abfbb8ee0c..00000000000 --- a/scheduler/core/scheduler/scheduler.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package scheduler - -import ( - "strings" - - "k8s.io/apimachinery/pkg/util/sets" - - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -type Scheduler interface { - // ScheduleChildren schedule children to a peer - ScheduleChildren(peer *supervisor.Peer, blankChildren sets.String) (children []*supervisor.Peer) - - // ScheduleParent schedule a parent and candidates to a peer - ScheduleParent(peer *supervisor.Peer, blankParents sets.String) (parent *supervisor.Peer, candidateParents []*supervisor.Peer, hasParent bool) -} - -type BuildOptions struct { - TaskManager supervisor.TaskManager - PeerManager supervisor.PeerManager - PluginDir string -} - -var ( - m = make(map[string]Builder) - defaultScheduler = "basic" -) - -func Register(b Builder) { - m[strings.ToLower(b.Name())] = b -} - -func Get(name string) Builder { - if b, ok := m[strings.ToLower(name)]; ok { - return b - } - return nil -} - -func SetDefaultScheduler(scheduler string) { - defaultScheduler = scheduler -} - -func GetDefaultScheduler() string { - return defaultScheduler -} - -type Builder interface { - Build(cfg *config.SchedulerConfig, opts *BuildOptions) (Scheduler, error) - - Name() string -} diff --git a/scheduler/core/service.go b/scheduler/core/service.go deleted file mode 100644 index 3d6d3e57950..00000000000 --- a/scheduler/core/service.go +++ /dev/null @@ -1,366 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package core - -import ( - "context" - "sync" - "time" - - "github.com/pkg/errors" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/workqueue" - - "d7y.io/dragonfly/v2/internal/dferrors" - logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/gc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - schedulerRPC "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - pkgsync "d7y.io/dragonfly/v2/pkg/sync" - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/core/scheduler" - "d7y.io/dragonfly/v2/scheduler/metrics" - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -const maxRescheduleTimes = 8 - -type Options struct { - openTel bool - disableCDN bool -} - -type Option func(options *Options) - -func WithOpenTel(openTel bool) Option { - return func(options *Options) { - options.openTel = openTel - } -} - -func WithDisableCDN(disableCDN bool) Option { - return func(options *Options) { - options.disableCDN = disableCDN - } -} - -type SchedulerService struct { - // CDN manager - CDN supervisor.CDN - // task manager - taskManager supervisor.TaskManager - // host manager - hostManager supervisor.HostManager - // Peer manager - peerManager supervisor.PeerManager - - sched scheduler.Scheduler - worker worker - monitor *monitor - done chan struct{} - wg sync.WaitGroup - kmu *pkgsync.Krwmutex - - config *config.SchedulerConfig - dynconfig config.DynconfigInterface - metricsConfig *config.MetricsConfig -} - -func NewSchedulerService(cfg *config.SchedulerConfig, pluginDir string, metricsConfig *config.MetricsConfig, dynConfig config.DynconfigInterface, gc gc.GC, options ...Option) (*SchedulerService, error) { - ops := &Options{} - for _, op := range options { - op(ops) - } - - hostManager := supervisor.NewHostManager() - - peerManager, err := supervisor.NewPeerManager(cfg.GC, gc, hostManager) - if err != nil { - return nil, err - } - - taskManager, err := supervisor.NewTaskManager(cfg.GC, gc, peerManager) - if err != nil { - return nil, err - } - - sched, err := scheduler.Get(cfg.Scheduler).Build(cfg, &scheduler.BuildOptions{ - PeerManager: peerManager, - PluginDir: pluginDir, - }) - if err != nil { - return nil, errors.Wrapf(err, "build scheduler %v", cfg.Scheduler) - } - - work := newEventLoopGroup(cfg.WorkerNum) - downloadMonitor := newMonitor(cfg.OpenMonitor, peerManager) - s := &SchedulerService{ - taskManager: taskManager, - hostManager: hostManager, - peerManager: peerManager, - worker: work, - monitor: downloadMonitor, - sched: sched, - config: cfg, - metricsConfig: metricsConfig, - dynconfig: dynConfig, - done: make(chan struct{}), - wg: sync.WaitGroup{}, - kmu: pkgsync.NewKrwmutex(), - } - if !ops.disableCDN { - var opts []grpc.DialOption - if ops.openTel { - opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor())) - } - client, err := supervisor.NewCDNDynmaicClient(dynConfig, peerManager, hostManager, opts) - if err != nil { - return nil, errors.Wrap(err, "new refreshable cdn client") - } - - cdn := supervisor.NewCDN(client, peerManager, hostManager) - if err != nil { - return nil, errors.Wrap(err, "new cdn manager") - } - s.CDN = cdn - } - return s, nil -} - -func (s *SchedulerService) Serve() { - s.wg.Add(2) - wsdq := workqueue.NewNamedDelayingQueue("wait reSchedule parent") - go s.runWorkerLoop(wsdq) - go s.runReScheduleParentLoop(wsdq) - go s.runMonitor() - logger.Debugf("start scheduler service successfully") -} - -func (s *SchedulerService) runWorkerLoop(wsdq workqueue.DelayingInterface) { - defer s.wg.Done() - s.worker.start(newState(s.sched, s.peerManager, s.CDN, wsdq)) -} - -func (s *SchedulerService) runReScheduleParentLoop(wsdq workqueue.DelayingInterface) { - for { - select { - case <-s.done: - wsdq.ShutDown() - return - default: - v, shutdown := wsdq.Get() - if shutdown { - logger.Infof("wait schedule delay queue is shutdown") - break - } - rsPeer := v.(*rsPeer) - peer := rsPeer.peer - wsdq.Done(v) - if rsPeer.times > maxRescheduleTimes { - if peer.CloseChannelWithError(dferrors.Newf(base.Code_SchedNeedBackSource, "reschedule parent for peer %s already reaches max reschedule times", - peer.ID)) == nil { - peer.Task.AddBackToSourcePeer(peer.ID) - } - continue - } - if peer.Task.ContainsBackToSourcePeer(peer.ID) { - logger.WithTaskAndPeerID(peer.Task.ID, peer.ID).Debugf("runReScheduleLoop: peer is back source client, no need to reschedule it") - continue - } - if peer.IsDone() || peer.IsLeave() { - peer.Log().Debugf("runReScheduleLoop: peer has left from waitScheduleParentPeerQueue because peer is done or leave, peer status is %s, "+ - "isLeave %t", peer.GetStatus(), peer.IsLeave()) - continue - } - s.worker.send(reScheduleParentEvent{rsPeer: rsPeer}) - } - } -} - -func (s *SchedulerService) runMonitor() { - defer s.wg.Done() - if s.monitor != nil { - s.monitor.start(s.done) - } -} - -func (s *SchedulerService) Stop() { - close(s.done) - if s.worker != nil { - s.worker.stop() - } - s.wg.Wait() -} - -func (s *SchedulerService) SelectParent(peer *supervisor.Peer) (parent *supervisor.Peer, err error) { - parent, _, hasParent := s.sched.ScheduleParent(peer, sets.NewString()) - if !hasParent || parent == nil { - return nil, errors.Errorf("no parent peer available for peer %s", peer.ID) - } - return parent, nil -} - -func (s *SchedulerService) GetPeer(id string) (*supervisor.Peer, bool) { - return s.peerManager.Get(id) -} - -func (s *SchedulerService) RegisterTask(req *schedulerRPC.PeerTaskRequest, task *supervisor.Task) *supervisor.Peer { - // get or create host - peerHost := req.PeerHost - host, ok := s.hostManager.Get(peerHost.Uuid) - if !ok { - var options []supervisor.HostOption - if clientConfig, ok := s.dynconfig.GetSchedulerClusterClientConfig(); ok { - options = []supervisor.HostOption{ - supervisor.WithTotalUploadLoad(clientConfig.LoadLimit), - } - } - - host = supervisor.NewClientHost(peerHost.Uuid, peerHost.Ip, peerHost.HostName, peerHost.RpcPort, peerHost.DownPort, - peerHost.SecurityDomain, peerHost.Location, peerHost.Idc, options...) - s.hostManager.Add(host) - } - // get or creat PeerTask - peer, ok := s.peerManager.Get(req.PeerId) - if ok { - logger.Warnf("peer %s has already registered", peer.ID) - return peer - } - peer = supervisor.NewPeer(req.PeerId, task, host) - s.peerManager.Add(peer) - return peer -} - -func (s *SchedulerService) GetOrAddTask(ctx context.Context, task *supervisor.Task) *supervisor.Task { - span := trace.SpanFromContext(ctx) - - s.kmu.RLock(task.ID) - task, ok := s.taskManager.GetOrAdd(task) - if ok { - span.SetAttributes(config.AttributeTaskStatus.String(task.GetStatus().String())) - span.SetAttributes(config.AttributeLastTriggerTime.String(task.LastTriggerAt.Load().String())) - if task.LastTriggerAt.Load().Add(s.config.AccessWindow).After(time.Now()) || task.IsHealth() { - span.SetAttributes(config.AttributeNeedSeedCDN.Bool(false)) - s.kmu.RUnlock(task.ID) - return task - } - } else { - task.Log().Infof("add new task %s", task.ID) - } - s.kmu.RUnlock(task.ID) - - s.kmu.Lock(task.ID) - defer s.kmu.Unlock(task.ID) - - // do trigger - span.SetAttributes(config.AttributeTaskStatus.String(task.GetStatus().String())) - span.SetAttributes(config.AttributeLastTriggerTime.String(task.LastTriggerAt.Load().String())) - if task.IsHealth() { - span.SetAttributes(config.AttributeNeedSeedCDN.Bool(false)) - return task - } - - task.LastTriggerAt.Store(time.Now()) - task.SetStatus(supervisor.TaskStatusRunning) - if s.CDN == nil { - // client back source - span.SetAttributes(config.AttributeClientBackSource.Bool(true)) - task.BackToSourceWeight.Store(s.config.BackSourceCount) - return task - } - span.SetAttributes(config.AttributeNeedSeedCDN.Bool(true)) - - go func() { - if cdnPeer, err := s.CDN.StartSeedTask(ctx, task); err != nil { - // fall back to client back source - task.Log().Errorf("seed task failed: %v", err) - span.AddEvent(config.EventCDNFailBackClientSource, trace.WithAttributes(config.AttributeTriggerCDNError.String(err.Error()))) - task.BackToSourceWeight.Store(s.config.BackSourceCount) - if ok = s.worker.send(taskSeedFailEvent{task}); !ok { - logger.Error("send taskSeed fail event failed, eventLoop is shutdown") - } - } else { - if ok = s.worker.send(peerDownloadSuccessEvent{cdnPeer, nil}); !ok { - logger.Error("send taskSeed success event failed, eventLoop is shutdown") - } - logger.Infof("successfully obtain seeds from cdn, task: %#v", task) - } - }() - - return task -} - -func (s *SchedulerService) HandlePieceResult(ctx context.Context, peer *supervisor.Peer, pieceResult *schedulerRPC.PieceResult) error { - peer.Touch() - if pieceResult.Success && s.metricsConfig != nil && s.metricsConfig.EnablePeerHost { - // TODO parse PieceStyle - metrics.PeerHostTraffic.WithLabelValues("download", peer.Host.UUID, peer.Host.IP).Add(float64(pieceResult.PieceInfo.RangeSize)) - if p, ok := s.peerManager.Get(pieceResult.DstPid); ok { - metrics.PeerHostTraffic.WithLabelValues("upload", p.Host.UUID, p.Host.IP).Add(float64(pieceResult.PieceInfo.RangeSize)) - } else { - logger.Warnf("dst peer %s not found for pieceResult %#v, pieceInfo %#v", pieceResult.DstPid, pieceResult, pieceResult.PieceInfo) - } - } - if pieceResult.PieceInfo != nil && pieceResult.PieceInfo.PieceNum == common.EndOfPiece { - return nil - } else if pieceResult.PieceInfo != nil && pieceResult.PieceInfo.PieceNum == common.ZeroOfPiece { - s.worker.send(startReportPieceResultEvent{ctx, peer}) - return nil - } else if pieceResult.Success { - s.worker.send(peerDownloadPieceSuccessEvent{ - ctx: ctx, - peer: peer, - pr: pieceResult, - }) - return nil - } else if pieceResult.Code != base.Code_Success { - s.worker.send(peerDownloadPieceFailEvent{ - ctx: ctx, - peer: peer, - pr: pieceResult, - }) - return nil - } - return nil -} - -func (s *SchedulerService) HandlePeerResult(ctx context.Context, peer *supervisor.Peer, peerResult *schedulerRPC.PeerResult) error { - peer.Touch() - if peerResult.Success { - if !s.worker.send(peerDownloadSuccessEvent{peer: peer, peerResult: peerResult}) { - logger.Errorf("send peer download success event failed") - } - } else if !s.worker.send(peerDownloadFailEvent{peer: peer, peerResult: peerResult}) { - logger.Errorf("send peer download fail event failed") - } - return nil -} - -func (s *SchedulerService) HandleLeaveTask(ctx context.Context, peer *supervisor.Peer) error { - peer.Touch() - if !s.worker.send(peerLeaveEvent{ - ctx: ctx, - peer: peer, - }) { - logger.Errorf("send peer leave event failed") - } - return nil -} diff --git a/scheduler/core/worker.go b/scheduler/core/worker.go deleted file mode 100644 index 3af042a079e..00000000000 --- a/scheduler/core/worker.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package core - -import ( - "hash/crc32" - - logger "d7y.io/dragonfly/v2/internal/dflog" -) - -type worker interface { - start(*state) - stop() - send(event) bool -} - -type workerGroup struct { - workerNum int - workerList []*baseWorker -} - -var _ worker = (*workerGroup)(nil) - -func newEventLoopGroup(workerNum int) worker { - return &workerGroup{ - workerNum: workerNum, - workerList: make([]*baseWorker, 0, workerNum), - } -} - -func (wg *workerGroup) start(s *state) { - for i := 0; i < wg.workerNum; i++ { - w := newWorker() - go w.start(s) - wg.workerList = append(wg.workerList, w) - } - logger.Infof("start scheduler worker number:%d", wg.workerNum) -} - -func (wg *workerGroup) send(e event) bool { - choiceWorkerID := crc32.ChecksumIEEE([]byte(e.hashKey())) % uint32(wg.workerNum) - return wg.workerList[choiceWorkerID].send(e) -} - -func (wg *workerGroup) stop() { - for _, worker := range wg.workerList { - worker.stop() - } -} - -type baseWorker struct { - events chan event - done chan struct{} -} - -var _ worker = (*baseWorker)(nil) - -func newWorker() *baseWorker { - return &baseWorker{ - events: make(chan event), - done: make(chan struct{}), - } -} - -func (w *baseWorker) start(s *state) { - for { - select { - case e := <-w.events: - e.apply(s) - case <-w.done: - return - } - } -} - -func (w *baseWorker) stop() { - close(w.done) -} - -func (w *baseWorker) send(e event) bool { - select { - case w.events <- e: - return true - case <-w.done: - return false - } -} diff --git a/scheduler/job/job.go b/scheduler/job/job.go index 0116ec1b05b..ddaed98edcf 100644 --- a/scheduler/job/job.go +++ b/scheduler/job/job.go @@ -20,8 +20,6 @@ import ( "context" "github.com/go-playground/validator/v10" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" logger "d7y.io/dragonfly/v2/internal/dflog" internaljob "d7y.io/dragonfly/v2/internal/job" @@ -29,13 +27,11 @@ import ( "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/core" + "d7y.io/dragonfly/v2/scheduler/service" ) -var tracer = otel.Tracer("worker") - type Job interface { - Serve() error + Serve() Stop() } @@ -43,18 +39,17 @@ type job struct { globalJob *internaljob.Job schedulerJob *internaljob.Job localJob *internaljob.Job - ctx context.Context - service *core.SchedulerService - cfg *config.JobConfig + service service.Service + config *config.Config } -func New(ctx context.Context, cfg *config.JobConfig, clusterID uint, hostname string, service *core.SchedulerService) (Job, error) { +func New(cfg *config.Config, service service.Service) (Job, error) { redisConfig := &internaljob.Config{ - Host: cfg.Redis.Host, - Port: cfg.Redis.Port, - Password: cfg.Redis.Password, - BrokerDB: cfg.Redis.BrokerDB, - BackendDB: cfg.Redis.BackendDB, + Host: cfg.Job.Redis.Host, + Port: cfg.Job.Redis.Port, + Password: cfg.Job.Redis.Password, + BrokerDB: cfg.Job.Redis.BrokerDB, + BackendDB: cfg.Job.Redis.BackendDB, } globalJob, err := internaljob.New(redisConfig, internaljob.GlobalQueue) @@ -71,7 +66,7 @@ func New(ctx context.Context, cfg *config.JobConfig, clusterID uint, hostname st } logger.Infof("create scheduler job queue: %v", schedulerJob) - localQueue, err := internaljob.GetSchedulerQueue(clusterID, hostname) + localQueue, err := internaljob.GetSchedulerQueue(cfg.Manager.SchedulerClusterID, cfg.Server.Host) if err != nil { logger.Errorf("get local job queue name error: %v", err) return nil, err @@ -88,9 +83,8 @@ func New(ctx context.Context, cfg *config.JobConfig, clusterID uint, hostname st globalJob: globalJob, schedulerJob: schedulerJob, localJob: localJob, - ctx: ctx, service: service, - cfg: cfg, + config: cfg, } namedJobFuncs := map[string]interface{}{ @@ -105,23 +99,27 @@ func New(ctx context.Context, cfg *config.JobConfig, clusterID uint, hostname st return t, nil } -func (t *job) Serve() error { +func (t *job) Serve() { go func() { - logger.Infof("ready to launch %d worker(s) on global queue", t.cfg.GlobalWorkerNum) - if err := t.globalJob.LaunchWorker("global_worker", int(t.cfg.GlobalWorkerNum)); err != nil { + logger.Infof("ready to launch %d worker(s) on global queue", t.config.Job.GlobalWorkerNum) + if err := t.globalJob.LaunchWorker("global_worker", int(t.config.Job.GlobalWorkerNum)); err != nil { logger.Fatalf("global queue worker error: %v", err) } }() go func() { - logger.Infof("ready to launch %d worker(s) on scheduler queue", t.cfg.SchedulerWorkerNum) - if err := t.schedulerJob.LaunchWorker("scheduler_worker", int(t.cfg.SchedulerWorkerNum)); err != nil { + logger.Infof("ready to launch %d worker(s) on scheduler queue", t.config.Job.SchedulerWorkerNum) + if err := t.schedulerJob.LaunchWorker("scheduler_worker", int(t.config.Job.SchedulerWorkerNum)); err != nil { logger.Fatalf("scheduler queue worker error: %v", err) } }() - logger.Infof("ready to launch %d worker(s) on local queue", t.cfg.LocalWorkerNum) - return t.localJob.LaunchWorker("local_worker", int(t.cfg.LocalWorkerNum)) + go func() { + logger.Infof("ready to launch %d worker(s) on local queue", t.config.Job.LocalWorkerNum) + if err := t.localJob.LaunchWorker("local_worker", int(t.config.Job.LocalWorkerNum)); err != nil { + logger.Fatalf("scheduler queue worker error: %v", err) + } + }() } func (t *job) Stop() { @@ -131,11 +129,6 @@ func (t *job) Stop() { } func (t *job) preheat(ctx context.Context, req string) error { - // machinery can't passing context to worker, refer https://github.com/RichardKnop/machinery/issues/175 - var span trace.Span - ctx, span = tracer.Start(ctx, config.SpanPreheat, trace.WithSpanKind(trace.SpanKindConsumer)) - defer span.End() - request := &internaljob.PreheatRequest{} if err := internaljob.UnmarshalRequest(req, request); err != nil { logger.Errorf("unmarshal request err: %v, request body: %s", err, req) @@ -168,7 +161,7 @@ func (t *job) preheat(ctx context.Context, req string) error { // Trigger CDN download seeds plogger := logger.WithTaskIDAndURL(taskID, request.URL) plogger.Info("ready to preheat") - stream, err := t.service.CDN.GetClient().ObtainSeeds(ctx, &cdnsystem.SeedRequest{ + stream, err := t.service.CDN().Client().ObtainSeeds(ctx, &cdnsystem.SeedRequest{ TaskId: taskID, Url: request.URL, UrlMeta: meta, diff --git a/scheduler/metrics/metrics.go b/scheduler/metrics/metrics.go index 6b02fec126e..beef3150315 100644 --- a/scheduler/metrics/metrics.go +++ b/scheduler/metrics/metrics.go @@ -96,8 +96,8 @@ var ( }) ) -func New(cfg *config.MetricsConfig, grpcServer *grpc.Server) *http.Server { - grpc_prometheus.Register(grpcServer) +func New(cfg *config.MetricsConfig, svr *grpc.Server) *http.Server { + grpc_prometheus.Register(svr) mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) diff --git a/scheduler/resource/cdn.go b/scheduler/resource/cdn.go new file mode 100644 index 00000000000..bce9d16aec5 --- /dev/null +++ b/scheduler/resource/cdn.go @@ -0,0 +1,295 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "context" + "fmt" + "reflect" + + "github.com/pkg/errors" + "google.golang.org/grpc" + + logger "d7y.io/dragonfly/v2/internal/dflog" + "d7y.io/dragonfly/v2/internal/dfnet" + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" + cdnclient "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/scheduler/config" +) + +type CDN interface { + // TriggerTask start to trigger cdn task + TriggerTask(context.Context, *Task) (*Peer, *rpcscheduler.PeerResult, error) + + // Client is cdn grpc client + Client() CDNClient +} + +type cdn struct { + // client is cdn dynamic client + client CDNClient + // peerManager is peer manager + peerManager PeerManager + // hostManager is host manager + hostManager HostManager +} + +// New cdn interface +func newCDN(peerManager PeerManager, hostManager HostManager, dynconfig config.DynconfigInterface, opts ...grpc.DialOption) (CDN, error) { + client, err := newCDNClient(dynconfig, hostManager, opts...) + if err != nil { + return nil, err + } + + return &cdn{ + client: client, + peerManager: peerManager, + hostManager: hostManager, + }, nil +} + +// TriggerTask start to trigger cdn task +func (c *cdn) TriggerTask(ctx context.Context, task *Task) (*Peer, *rpcscheduler.PeerResult, error) { + stream, err := c.client.ObtainSeeds(ctx, &cdnsystem.SeedRequest{ + TaskId: task.ID, + Url: task.URL, + UrlMeta: task.URLMeta, + }) + if err != nil { + return nil, nil, err + } + + var ( + initialized bool + peer *Peer + ) + + // Receive pieces from cdn + for { + piece, err := stream.Recv() + if err != nil { + return nil, nil, err + } + + task.Log.Infof("receive piece: %#v %#v", piece, piece.PieceInfo) + + // Init cdn peer + if !initialized { + initialized = true + + peer, err = c.initPeer(task, piece) + if err != nil { + return nil, nil, err + } + + if err := peer.FSM.Event(PeerEventDownload); err != nil { + return nil, nil, err + } + } + + // Get end piece + if piece.Done { + peer.Log.Infof("receive end of piece: %#v %#v", piece, piece.PieceInfo) + + // Handle tiny scope size task + if piece.ContentLength <= TinyFileSize { + peer.Log.Info("peer type is tiny file") + data, err := peer.DownloadTinyFile(ctx) + if err != nil { + return nil, nil, err + } + + // Tiny file downloaded directly from CDN is exception + if len(data) != int(piece.ContentLength) { + return nil, nil, errors.Errorf( + "piece actual data length is different from content length, content length is %d, data length is %d", + piece.ContentLength, len(data), + ) + } + + // Tiny file downloaded successfully + task.DirectPiece = data + } + + return peer, &rpcscheduler.PeerResult{ + TotalPieceCount: piece.TotalPieceCount, + ContentLength: piece.ContentLength, + }, nil + } + + // Update piece info + peer.Pieces.Set(uint(piece.PieceInfo.PieceNum)) + // TODO(244372610) CDN should set piece cost + peer.AppendPieceCost(0) + task.StorePiece(piece.PieceInfo) + } +} + +// Initialize cdn peer +func (c *cdn) initPeer(task *Task, ps *cdnsystem.PieceSeed) (*Peer, error) { + // Load peer from manager + peer, ok := c.peerManager.Load(ps.PeerId) + if ok { + return peer, nil + } + task.Log.Infof("can not find cdn peer: %s", ps.PeerId) + + // Load host from manager + host, ok := c.hostManager.Load(ps.HostUuid) + if !ok { + task.Log.Errorf("can not find cdn host uuid: %s", ps.HostUuid) + return nil, errors.Errorf("can not find host uuid: %s", ps.HostUuid) + } + + // New cdn peer + peer = NewPeer(ps.PeerId, task, host) + peer.Log.Info("new cdn peer successfully") + + // Store cdn peer + c.peerManager.Store(peer) + peer.Log.Info("cdn peer has been stored") + + if err := peer.FSM.Event(PeerEventRegisterNormal); err != nil { + return nil, err + } + + return peer, nil +} + +// Client is cdn grpc client +func (c *cdn) Client() CDNClient { + return c.client +} + +type CDNClient interface { + // cdnclient is cdn grpc client interface + cdnclient.CdnClient + + // Observer is dynconfig observer interface + config.Observer +} + +type cdnClient struct { + // hostManager is host manager + hostManager HostManager + + // cdnClient is cdn grpc client instance + cdnclient.CdnClient + + // data is dynconfig data + data *config.DynconfigData +} + +// New cdn client interface +func newCDNClient(dynconfig config.DynconfigInterface, hostManager HostManager, opts ...grpc.DialOption) (CDNClient, error) { + config, err := dynconfig.Get() + if err != nil { + return nil, err + } + + // Initialize cdn grpc client + client, err := cdnclient.GetClientByAddr(cdnsToNetAddrs(config.CDNs), opts...) + if err != nil { + return nil, err + } + + // Initialize cdn hosts + for _, host := range cdnsToHosts(config.CDNs) { + hostManager.Store(host) + } + + dc := &cdnClient{ + hostManager: hostManager, + CdnClient: client, + data: config, + } + + dynconfig.Register(dc) + return dc, nil +} + +// Dynamic config notify function +func (c *cdnClient) OnNotify(data *config.DynconfigData) { + ips := getCDNIPs(data.CDNs) + if reflect.DeepEqual(c.data, data) { + logger.Infof("cdn addresses deep equal: %v", ips) + return + } + + // Update dynamic data + c.data = data + + // Update host manager + for _, host := range cdnsToHosts(data.CDNs) { + c.hostManager.Store(host) + } + + // Update grpc cdn addresses + c.UpdateState(cdnsToNetAddrs(data.CDNs)) + logger.Infof("cdn addresses have been updated: %v", ips) +} + +// cdnsToHosts coverts []*config.CDN to map[string]*Host. +func cdnsToHosts(cdns []*config.CDN) map[string]*Host { + hosts := map[string]*Host{} + for _, cdn := range cdns { + var netTopology string + options := []HostOption{WithIsCDN(true)} + if config, ok := cdn.GetCDNClusterConfig(); ok { + options = append(options, WithUploadLoadLimit(int32(config.LoadLimit))) + netTopology = config.NetTopology + } + + id := idgen.CDNHostID(cdn.Hostname, cdn.Port) + hosts[id] = NewHost(&rpcscheduler.PeerHost{ + Uuid: id, + Ip: cdn.IP, + RpcPort: cdn.Port, + DownPort: cdn.DownloadPort, + HostName: cdn.Hostname, + Idc: cdn.IDC, + Location: cdn.Location, + NetTopology: netTopology, + }, options...) + } + return hosts +} + +// cdnsToNetAddrs coverts []*config.CDN to []dfnet.NetAddr. +func cdnsToNetAddrs(cdns []*config.CDN) []dfnet.NetAddr { + netAddrs := make([]dfnet.NetAddr, 0, len(cdns)) + for _, cdn := range cdns { + netAddrs = append(netAddrs, dfnet.NetAddr{ + Type: dfnet.TCP, + Addr: fmt.Sprintf("%s:%d", cdn.IP, cdn.Port), + }) + } + + return netAddrs +} + +// getCDNIPs get ips by []*config.CDN. +func getCDNIPs(cdns []*config.CDN) []string { + ips := []string{} + for _, cdn := range cdns { + ips = append(ips, cdn.IP) + } + + return ips +} diff --git a/scheduler/resource/cdn_mock.go b/scheduler/resource/cdn_mock.go new file mode 100644 index 00000000000..7ffcad67f02 --- /dev/null +++ b/scheduler/resource/cdn_mock.go @@ -0,0 +1,173 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/resource/cdn.go + +// Package resource is a generated GoMock package. +package resource + +import ( + context "context" + reflect "reflect" + + dfnet "d7y.io/dragonfly/v2/internal/dfnet" + base "d7y.io/dragonfly/v2/pkg/rpc/base" + cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" + client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + config "d7y.io/dragonfly/v2/scheduler/config" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockCDN is a mock of CDN interface. +type MockCDN struct { + ctrl *gomock.Controller + recorder *MockCDNMockRecorder +} + +// MockCDNMockRecorder is the mock recorder for MockCDN. +type MockCDNMockRecorder struct { + mock *MockCDN +} + +// NewMockCDN creates a new mock instance. +func NewMockCDN(ctrl *gomock.Controller) *MockCDN { + mock := &MockCDN{ctrl: ctrl} + mock.recorder = &MockCDNMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCDN) EXPECT() *MockCDNMockRecorder { + return m.recorder +} + +// Client mocks base method. +func (m *MockCDN) Client() CDNClient { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Client") + ret0, _ := ret[0].(CDNClient) + return ret0 +} + +// Client indicates an expected call of Client. +func (mr *MockCDNMockRecorder) Client() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Client", reflect.TypeOf((*MockCDN)(nil).Client)) +} + +// TriggerTask mocks base method. +func (m *MockCDN) TriggerTask(arg0 context.Context, arg1 *Task) (*Peer, *scheduler.PeerResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TriggerTask", arg0, arg1) + ret0, _ := ret[0].(*Peer) + ret1, _ := ret[1].(*scheduler.PeerResult) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// TriggerTask indicates an expected call of TriggerTask. +func (mr *MockCDNMockRecorder) TriggerTask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerTask", reflect.TypeOf((*MockCDN)(nil).TriggerTask), arg0, arg1) +} + +// MockCDNClient is a mock of CDNClient interface. +type MockCDNClient struct { + ctrl *gomock.Controller + recorder *MockCDNClientMockRecorder +} + +// MockCDNClientMockRecorder is the mock recorder for MockCDNClient. +type MockCDNClientMockRecorder struct { + mock *MockCDNClient +} + +// NewMockCDNClient creates a new mock instance. +func NewMockCDNClient(ctrl *gomock.Controller) *MockCDNClient { + mock := &MockCDNClient{ctrl: ctrl} + mock.recorder = &MockCDNClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCDNClient) EXPECT() *MockCDNClientMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockCDNClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockCDNClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCDNClient)(nil).Close)) +} + +// GetPieceTasks mocks base method. +func (m *MockCDNClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, addr, req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) + ret0, _ := ret[0].(*base.PiecePacket) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPieceTasks indicates an expected call of GetPieceTasks. +func (mr *MockCDNClientMockRecorder) GetPieceTasks(ctx, addr, req interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, addr, req}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockCDNClient)(nil).GetPieceTasks), varargs...) +} + +// ObtainSeeds mocks base method. +func (m *MockCDNClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, sr} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ObtainSeeds", varargs...) + ret0, _ := ret[0].(*client.PieceSeedStream) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ObtainSeeds indicates an expected call of ObtainSeeds. +func (mr *MockCDNClientMockRecorder) ObtainSeeds(ctx, sr interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, sr}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockCDNClient)(nil).ObtainSeeds), varargs...) +} + +// OnNotify mocks base method. +func (m *MockCDNClient) OnNotify(arg0 *config.DynconfigData) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnNotify", arg0) +} + +// OnNotify indicates an expected call of OnNotify. +func (mr *MockCDNClientMockRecorder) OnNotify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNotify", reflect.TypeOf((*MockCDNClient)(nil).OnNotify), arg0) +} + +// UpdateState mocks base method. +func (m *MockCDNClient) UpdateState(addrs []dfnet.NetAddr) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "UpdateState", addrs) +} + +// UpdateState indicates an expected call of UpdateState. +func (mr *MockCDNClientMockRecorder) UpdateState(addrs interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateState", reflect.TypeOf((*MockCDNClient)(nil).UpdateState), addrs) +} diff --git a/scheduler/resource/cdn_test.go b/scheduler/resource/cdn_test.go new file mode 100644 index 00000000000..5965e5213f5 --- /dev/null +++ b/scheduler/resource/cdn_test.go @@ -0,0 +1,404 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "encoding/json" + "errors" + "fmt" + "testing" + + gomock "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/internal/dfnet" + "d7y.io/dragonfly/v2/manager/types" + "d7y.io/dragonfly/v2/scheduler/config" + configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" +) + +func TestCDN_newCDN(t *testing.T) { + tests := []struct { + name string + mock func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) + expect func(t *testing.T, err error) + }{ + { + name: "new cdn", + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + gomock.InOrder( + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{{ID: 1}}, + }, nil).Times(1), + hostManager.Store(gomock.Any()).Return().Times(1), + dynconfig.Register(gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.NoError(err) + }, + }, + { + name: "new cdn failed because of dynconfig get error data", + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + dynconfig.Get().Return(nil, errors.New("foo")).Times(1) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "new cdn failed because of cdn list is empty", + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + gomock.InOrder( + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{}, + }, nil).Times(1), + ) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.EqualError(err, "address list of cdn is empty") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + hostManager := NewMockHostManager(ctl) + peerManager := NewMockPeerManager(ctl) + tc.mock(dynconfig.EXPECT(), hostManager.EXPECT()) + + _, err := newCDN(peerManager, hostManager, dynconfig) + tc.expect(t, err) + }) + } +} + +func TestCDNClient_newCDNClient(t *testing.T) { + tests := []struct { + name string + mock func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) + expect func(t *testing.T, err error) + }{ + { + name: "new cdn client", + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + gomock.InOrder( + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{{ID: 1}}, + }, nil).Times(1), + hostManager.Store(gomock.Any()).Return().Times(1), + dynconfig.Register(gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.NoError(err) + }, + }, + { + name: "new cdn client failed because of dynconfig get error data", + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + dynconfig.Get().Return(nil, errors.New("foo")).Times(1) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "new cdn client failed because of cdn list is empty", + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + gomock.InOrder( + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{}, + }, nil).Times(1), + ) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.EqualError(err, "address list of cdn is empty") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + hostManager := NewMockHostManager(ctl) + tc.mock(dynconfig.EXPECT(), hostManager.EXPECT()) + + _, err := newCDNClient(dynconfig, hostManager) + tc.expect(t, err) + }) + } +} + +func TestCDNClient_OnNotify(t *testing.T) { + tests := []struct { + name string + data *config.DynconfigData + mock func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) + }{ + { + name: "notify client", + data: &config.DynconfigData{ + CDNs: []*config.CDN{{ + ID: 1, + IP: "0.0.0.0", + }}, + }, + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + gomock.InOrder( + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{{ + ID: 1, + IP: "127.0.0.1", + }}, + }, nil).Times(1), + hostManager.Store(gomock.Any()).Return().Times(1), + dynconfig.Register(gomock.Any()).Return().Times(1), + hostManager.Store(gomock.Any()).Return().Times(1), + ) + }, + }, + { + name: "cdn list is deep equal", + data: &config.DynconfigData{ + CDNs: []*config.CDN{{ + ID: 1, + IP: "127.0.0.1", + }}, + }, + mock: func(dynconfig *configmocks.MockDynconfigInterfaceMockRecorder, hostManager *MockHostManagerMockRecorder) { + gomock.InOrder( + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{{ + ID: 1, + IP: "127.0.0.1", + }}, + }, nil).Times(1), + hostManager.Store(gomock.Any()).Return().Times(1), + dynconfig.Register(gomock.Any()).Return().Times(1), + ) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + hostManager := NewMockHostManager(ctl) + tc.mock(dynconfig.EXPECT(), hostManager.EXPECT()) + + client, err := newCDNClient(dynconfig, hostManager) + if err != nil { + t.Fatal(err) + } + client.OnNotify(tc.data) + }) + } +} + +func TestCDNClient_cdnsToHosts(t *testing.T) { + mockCDNClusterConfig, err := json.Marshal(&types.CDNClusterConfig{ + LoadLimit: 10, + NetTopology: "foo", + }) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + cdns []*config.CDN + expect func(t *testing.T, hosts map[string]*Host) + }{ + { + name: "cdns covert to hosts", + cdns: []*config.CDN{ + { + ID: 1, + Hostname: mockRawCDNHost.HostName, + IP: mockRawCDNHost.Ip, + Port: mockRawCDNHost.RpcPort, + DownloadPort: mockRawCDNHost.DownPort, + Location: mockRawCDNHost.Location, + IDC: mockRawCDNHost.Idc, + CDNCluster: &config.CDNCluster{ + Config: mockCDNClusterConfig, + }, + }, + }, + expect: func(t *testing.T, hosts map[string]*Host) { + assert := assert.New(t) + assert.Equal(hosts[mockRawCDNHost.Uuid].ID, mockRawCDNHost.Uuid) + assert.Equal(hosts[mockRawCDNHost.Uuid].IP, mockRawCDNHost.Ip) + assert.Equal(hosts[mockRawCDNHost.Uuid].Hostname, mockRawCDNHost.HostName) + assert.Equal(hosts[mockRawCDNHost.Uuid].Port, mockRawCDNHost.RpcPort) + assert.Equal(hosts[mockRawCDNHost.Uuid].DownloadPort, mockRawCDNHost.DownPort) + assert.Equal(hosts[mockRawCDNHost.Uuid].IDC, mockRawCDNHost.Idc) + assert.Equal(hosts[mockRawCDNHost.Uuid].NetTopology, "foo") + assert.Equal(hosts[mockRawCDNHost.Uuid].Location, mockRawCDNHost.Location) + assert.Equal(hosts[mockRawCDNHost.Uuid].UploadLoadLimit.Load(), int32(10)) + assert.Empty(hosts[mockRawCDNHost.Uuid].Peers) + assert.Equal(hosts[mockRawCDNHost.Uuid].IsCDN, true) + assert.NotEqual(hosts[mockRawCDNHost.Uuid].CreateAt.Load(), 0) + assert.NotEqual(hosts[mockRawCDNHost.Uuid].UpdateAt.Load(), 0) + assert.NotNil(hosts[mockRawCDNHost.Uuid].Log) + }, + }, + { + name: "cdns covert to hosts without cluster config", + cdns: []*config.CDN{ + { + ID: 1, + Hostname: mockRawCDNHost.HostName, + IP: mockRawCDNHost.Ip, + Port: mockRawCDNHost.RpcPort, + DownloadPort: mockRawCDNHost.DownPort, + Location: mockRawCDNHost.Location, + IDC: mockRawCDNHost.Idc, + }, + }, + expect: func(t *testing.T, hosts map[string]*Host) { + assert := assert.New(t) + assert.Equal(hosts[mockRawCDNHost.Uuid].ID, mockRawCDNHost.Uuid) + assert.Equal(hosts[mockRawCDNHost.Uuid].IP, mockRawCDNHost.Ip) + assert.Equal(hosts[mockRawCDNHost.Uuid].Hostname, mockRawCDNHost.HostName) + assert.Equal(hosts[mockRawCDNHost.Uuid].Port, mockRawCDNHost.RpcPort) + assert.Equal(hosts[mockRawCDNHost.Uuid].DownloadPort, mockRawCDNHost.DownPort) + assert.Equal(hosts[mockRawCDNHost.Uuid].IDC, mockRawCDNHost.Idc) + assert.Equal(hosts[mockRawCDNHost.Uuid].NetTopology, "") + assert.Equal(hosts[mockRawCDNHost.Uuid].Location, mockRawCDNHost.Location) + assert.Equal(hosts[mockRawCDNHost.Uuid].UploadLoadLimit.Load(), int32(defaultUploadLoadLimit)) + assert.Empty(hosts[mockRawCDNHost.Uuid].Peers) + assert.Equal(hosts[mockRawCDNHost.Uuid].IsCDN, true) + assert.NotEqual(hosts[mockRawCDNHost.Uuid].CreateAt.Load(), 0) + assert.NotEqual(hosts[mockRawCDNHost.Uuid].UpdateAt.Load(), 0) + assert.NotNil(hosts[mockRawCDNHost.Uuid].Log) + }, + }, + { + name: "cdns is empty", + cdns: []*config.CDN{}, + expect: func(t *testing.T, hosts map[string]*Host) { + assert := assert.New(t) + assert.Equal(len(hosts), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, cdnsToHosts(tc.cdns)) + }) + } +} + +func TestCDNClient_cdnsToNetAddrs(t *testing.T) { + tests := []struct { + name string + cdns []*config.CDN + expect func(t *testing.T, netAddrs []dfnet.NetAddr) + }{ + { + name: "cdns covert to netAddr", + cdns: []*config.CDN{ + { + ID: 1, + Hostname: mockRawCDNHost.HostName, + IP: mockRawCDNHost.Ip, + Port: mockRawCDNHost.RpcPort, + DownloadPort: mockRawCDNHost.DownPort, + Location: mockRawCDNHost.Location, + IDC: mockRawCDNHost.Idc, + }, + }, + expect: func(t *testing.T, netAddrs []dfnet.NetAddr) { + assert := assert.New(t) + assert.Equal(netAddrs[0].Type, dfnet.TCP) + assert.Equal(netAddrs[0].Addr, fmt.Sprintf("%s:%d", mockRawCDNHost.Ip, mockRawCDNHost.RpcPort)) + }, + }, + { + name: "cdns is empty", + cdns: []*config.CDN{}, + expect: func(t *testing.T, netAddrs []dfnet.NetAddr) { + assert := assert.New(t) + assert.Equal(len(netAddrs), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, cdnsToNetAddrs(tc.cdns)) + }) + } +} + +func TestCDNClient_getCDNIPs(t *testing.T) { + tests := []struct { + name string + cdns []*config.CDN + expect func(t *testing.T, ips []string) + }{ + { + name: "cdns covert to hosts", + cdns: []*config.CDN{ + { + ID: 1, + Hostname: mockRawCDNHost.HostName, + IP: mockRawCDNHost.Ip, + Port: mockRawCDNHost.RpcPort, + DownloadPort: mockRawCDNHost.DownPort, + Location: mockRawCDNHost.Location, + IDC: mockRawCDNHost.Idc, + }, + }, + expect: func(t *testing.T, ips []string) { + assert := assert.New(t) + assert.Equal(ips[0], mockRawCDNHost.Ip) + }, + }, + { + name: "cdns is empty", + cdns: []*config.CDN{}, + expect: func(t *testing.T, ips []string) { + assert := assert.New(t) + assert.Equal(len(ips), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, getCDNIPs(tc.cdns)) + }) + } +} diff --git a/scheduler/resource/host.go b/scheduler/resource/host.go new file mode 100644 index 00000000000..6e80c9faef7 --- /dev/null +++ b/scheduler/resource/host.go @@ -0,0 +1,171 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "sync" + "time" + + "go.uber.org/atomic" + + logger "d7y.io/dragonfly/v2/internal/dflog" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler" +) + +const ( + // Host default upload load limit + defaultUploadLoadLimit = 100 +) + +// HostOption is a functional option for configuring the host +type HostOption func(h *Host) *Host + +// WithUploadLoadLimit sets host's UploadLoadLimit +func WithUploadLoadLimit(limit int32) HostOption { + return func(h *Host) *Host { + h.UploadLoadLimit.Store(limit) + return h + } +} + +// WithIsCDN sets host's IsCDN +func WithIsCDN(isCDN bool) HostOption { + return func(h *Host) *Host { + h.IsCDN = isCDN + return h + } +} + +type Host struct { + // ID is host id + ID string + + // IP is host ip + IP string + + // Hostname is host name + Hostname string + + // Port is grpc service port + Port int32 + + // DownloadPort is piece downloading port + DownloadPort int32 + + // SecurityDomain is security domain of host + SecurityDomain string + + // IDC is internet data center of host + IDC string + + // NetTopology is network topology of host + // Example: switch|router|... + NetTopology string + + // Location is location of host + // Example: country|province|... + Location string + + // UploadLoadLimit is upload load limit count + UploadLoadLimit *atomic.Int32 + + // Peer sync map + Peers *sync.Map + + // IsCDN is used as tag cdn + IsCDN bool + + // CreateAt is host create time + CreateAt *atomic.Time + + // UpdateAt is host update time + UpdateAt *atomic.Time + + // Host log + Log *logger.SugaredLoggerOnWith +} + +// New host instance +func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host { + h := &Host{ + ID: rawHost.Uuid, + IP: rawHost.Ip, + Hostname: rawHost.HostName, + Port: rawHost.RpcPort, + DownloadPort: rawHost.DownPort, + SecurityDomain: rawHost.SecurityDomain, + IDC: rawHost.Idc, + NetTopology: rawHost.NetTopology, + Location: rawHost.Location, + UploadLoadLimit: atomic.NewInt32(defaultUploadLoadLimit), + Peers: &sync.Map{}, + IsCDN: false, + CreateAt: atomic.NewTime(time.Now()), + UpdateAt: atomic.NewTime(time.Now()), + Log: logger.WithHostID(rawHost.Uuid), + } + + for _, opt := range options { + opt(h) + } + + return h +} + +// LoadPeer return peer for a key +func (h *Host) LoadPeer(key string) (*Peer, bool) { + rawPeer, ok := h.Peers.Load(key) + if !ok { + return nil, false + } + + return rawPeer.(*Peer), ok +} + +// StorePeer set peer +func (h *Host) StorePeer(peer *Peer) { + h.Peers.Store(peer.ID, peer) +} + +// LoadOrStorePeer returns peer the key if present. +// Otherwise, it stores and returns the given peer. +// The loaded result is true if the peer was loaded, false if stored. +func (h *Host) LoadOrStorePeer(peer *Peer) (*Peer, bool) { + rawPeer, loaded := h.Peers.LoadOrStore(peer.ID, peer) + return rawPeer.(*Peer), loaded +} + +// DeletePeer deletes peer for a key +func (h *Host) DeletePeer(key string) { + h.Peers.Delete(key) +} + +// LenPeers return length of peers sync map +func (h *Host) LenPeers() int { + var len int + h.Peers.Range(func(_, _ interface{}) bool { + len++ + return true + }) + + return len +} + +// FreeUploadLoad return free upload load of host +func (h *Host) FreeUploadLoad() int32 { + return h.UploadLoadLimit.Load() - int32(h.LenPeers()) +} diff --git a/scheduler/resource/host_manager.go b/scheduler/resource/host_manager.go new file mode 100644 index 00000000000..eb4dfa606c2 --- /dev/null +++ b/scheduler/resource/host_manager.go @@ -0,0 +1,69 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "sync" +) + +type HostManager interface { + // Load return host for a key + Load(string) (*Host, bool) + + // Store set host + Store(*Host) + + // LoadOrStore returns host the key if present. + // Otherwise, it stores and returns the given host. + // The loaded result is true if the host was loaded, false if stored. + LoadOrStore(*Host) (*Host, bool) + + // Delete deletes host for a key + Delete(string) +} + +type hostManager struct { + // Host sync map + *sync.Map +} + +// New host manager interface +func newHostManager() HostManager { + return &hostManager{&sync.Map{}} +} + +func (h *hostManager) Load(key string) (*Host, bool) { + rawHost, ok := h.Map.Load(key) + if !ok { + return nil, false + } + + return rawHost.(*Host), ok +} + +func (h *hostManager) Store(host *Host) { + h.Map.Store(host.ID, host) +} + +func (h *hostManager) LoadOrStore(host *Host) (*Host, bool) { + rawHost, loaded := h.Map.LoadOrStore(host.ID, host) + return rawHost.(*Host), loaded +} + +func (h *hostManager) Delete(key string) { + h.Map.Delete(key) +} diff --git a/scheduler/resource/host_manager_mock.go b/scheduler/resource/host_manager_mock.go new file mode 100644 index 00000000000..d21fbdd7b85 --- /dev/null +++ b/scheduler/resource/host_manager_mock.go @@ -0,0 +1,88 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/resource/host_manager.go + +// Package resource is a generated GoMock package. +package resource + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockHostManager is a mock of HostManager interface. +type MockHostManager struct { + ctrl *gomock.Controller + recorder *MockHostManagerMockRecorder +} + +// MockHostManagerMockRecorder is the mock recorder for MockHostManager. +type MockHostManagerMockRecorder struct { + mock *MockHostManager +} + +// NewMockHostManager creates a new mock instance. +func NewMockHostManager(ctrl *gomock.Controller) *MockHostManager { + mock := &MockHostManager{ctrl: ctrl} + mock.recorder = &MockHostManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHostManager) EXPECT() *MockHostManagerMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockHostManager) Delete(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", arg0) +} + +// Delete indicates an expected call of Delete. +func (mr *MockHostManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockHostManager)(nil).Delete), arg0) +} + +// Load mocks base method. +func (m *MockHostManager) Load(arg0 string) (*Host, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Load", arg0) + ret0, _ := ret[0].(*Host) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Load indicates an expected call of Load. +func (mr *MockHostManagerMockRecorder) Load(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockHostManager)(nil).Load), arg0) +} + +// LoadOrStore mocks base method. +func (m *MockHostManager) LoadOrStore(arg0 *Host) (*Host, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadOrStore", arg0) + ret0, _ := ret[0].(*Host) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// LoadOrStore indicates an expected call of LoadOrStore. +func (mr *MockHostManagerMockRecorder) LoadOrStore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrStore", reflect.TypeOf((*MockHostManager)(nil).LoadOrStore), arg0) +} + +// Store mocks base method. +func (m *MockHostManager) Store(arg0 *Host) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Store", arg0) +} + +// Store indicates an expected call of Store. +func (mr *MockHostManagerMockRecorder) Store(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockHostManager)(nil).Store), arg0) +} diff --git a/scheduler/resource/host_manager_test.go b/scheduler/resource/host_manager_test.go new file mode 100644 index 00000000000..113a80c9ac0 --- /dev/null +++ b/scheduler/resource/host_manager_test.go @@ -0,0 +1,199 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestHostManager_newHostManager(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, hostManager HostManager) + }{ + { + name: "new host manager", + expect: func(t *testing.T, hostManager HostManager) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(hostManager).Elem().Name(), "hostManager") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, newHostManager()) + }) + } +} + +func TestHostManager_Load(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, hostManager HostManager, mockHost *Host) + }{ + { + name: "load host", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + hostManager.Store(mockHost) + host, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, true) + assert.Equal(host.ID, mockHost.ID) + }, + }, + { + name: "host does not exist", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + _, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + mockHost.ID = "" + hostManager.Store(mockHost) + host, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, true) + assert.Equal(host.ID, mockHost.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + hostManager := newHostManager() + tc.expect(t, hostManager, mockHost) + }) + } +} + +func TestHostManager_Store(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, hostManager HostManager, mockHost *Host) + }{ + { + name: "store host", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + hostManager.Store(mockHost) + host, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, true) + assert.Equal(host.ID, mockHost.ID) + }, + }, + { + name: "store key is empty", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + mockHost.ID = "" + hostManager.Store(mockHost) + host, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, true) + assert.Equal(host.ID, mockHost.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + hostManager := newHostManager() + tc.expect(t, hostManager, mockHost) + }) + } +} + +func TestHostManager_LoadOrStore(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, hostManager HostManager, mockHost *Host) + }{ + { + name: "load host exist", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + hostManager.Store(mockHost) + host, ok := hostManager.LoadOrStore(mockHost) + assert.Equal(ok, true) + assert.Equal(host.ID, mockHost.ID) + }, + }, + { + name: "load host does not exist", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + host, ok := hostManager.LoadOrStore(mockHost) + assert.Equal(ok, false) + assert.Equal(host.ID, mockHost.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + hostManager := newHostManager() + tc.expect(t, hostManager, mockHost) + }) + } +} + +func TestHostManager_Delete(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, hostManager HostManager, mockHost *Host) + }{ + { + name: "delete host", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + hostManager.Store(mockHost) + hostManager.Delete(mockHost.ID) + _, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, false) + }, + }, + { + name: "delete key does not exist", + expect: func(t *testing.T, hostManager HostManager, mockHost *Host) { + assert := assert.New(t) + mockHost.ID = "" + hostManager.Store(mockHost) + hostManager.Delete(mockHost.ID) + _, ok := hostManager.Load(mockHost.ID) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + hostManager := newHostManager() + tc.expect(t, hostManager, mockHost) + }) + } +} diff --git a/scheduler/resource/host_test.go b/scheduler/resource/host_test.go new file mode 100644 index 00000000000..614ccd094e7 --- /dev/null +++ b/scheduler/resource/host_test.go @@ -0,0 +1,401 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler" +) + +var ( + mockRawHost = &scheduler.PeerHost{ + Uuid: idgen.HostID("hostname", 8003), + Ip: "127.0.0.1", + RpcPort: 8003, + DownPort: 8001, + HostName: "hostname", + SecurityDomain: "security_domain", + Location: "location", + Idc: "idc", + NetTopology: "net_topology", + } + + mockRawCDNHost = &scheduler.PeerHost{ + Uuid: idgen.CDNHostID("hostname", 8003), + Ip: "127.0.0.1", + RpcPort: 8003, + DownPort: 8001, + HostName: "hostname", + SecurityDomain: "security_domain", + Location: "location", + Idc: "idc", + NetTopology: "net_topology", + } +) + +func TestHost_NewHost(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + options []HostOption + expect func(t *testing.T, host *Host) + }{ + { + name: "new host", + rawHost: mockRawHost, + expect: func(t *testing.T, host *Host) { + assert := assert.New(t) + assert.Equal(host.ID, mockRawHost.Uuid) + assert.Equal(host.IP, mockRawHost.Ip) + assert.Equal(host.Port, mockRawHost.RpcPort) + assert.Equal(host.DownloadPort, mockRawHost.DownPort) + assert.Equal(host.Hostname, mockRawHost.HostName) + assert.Equal(host.SecurityDomain, mockRawHost.SecurityDomain) + assert.Equal(host.Location, mockRawHost.Location) + assert.Equal(host.IDC, mockRawHost.Idc) + assert.Equal(host.NetTopology, mockRawHost.NetTopology) + assert.Equal(host.UploadLoadLimit.Load(), int32(defaultUploadLoadLimit)) + assert.Equal(host.LenPeers(), 0) + assert.Equal(host.IsCDN, false) + assert.NotEqual(host.CreateAt.Load(), 0) + assert.NotEqual(host.UpdateAt.Load(), 0) + assert.NotNil(host.Log) + }, + }, + { + name: "new cdn host", + rawHost: mockRawCDNHost, + options: []HostOption{WithIsCDN(true)}, + expect: func(t *testing.T, host *Host) { + assert := assert.New(t) + assert.Equal(host.ID, mockRawCDNHost.Uuid) + assert.Equal(host.IP, mockRawCDNHost.Ip) + assert.Equal(host.Port, mockRawCDNHost.RpcPort) + assert.Equal(host.DownloadPort, mockRawCDNHost.DownPort) + assert.Equal(host.Hostname, mockRawCDNHost.HostName) + assert.Equal(host.SecurityDomain, mockRawCDNHost.SecurityDomain) + assert.Equal(host.Location, mockRawCDNHost.Location) + assert.Equal(host.IDC, mockRawCDNHost.Idc) + assert.Equal(host.NetTopology, mockRawCDNHost.NetTopology) + assert.Equal(host.UploadLoadLimit.Load(), int32(defaultUploadLoadLimit)) + assert.Equal(host.LenPeers(), 0) + assert.Equal(host.IsCDN, true) + assert.NotEqual(host.CreateAt.Load(), 0) + assert.NotEqual(host.UpdateAt.Load(), 0) + assert.NotNil(host.Log) + }, + }, + { + name: "new host and set upload loadlimit", + rawHost: mockRawHost, + options: []HostOption{WithUploadLoadLimit(200)}, + expect: func(t *testing.T, host *Host) { + assert := assert.New(t) + assert.Equal(host.ID, mockRawHost.Uuid) + assert.Equal(host.IP, mockRawHost.Ip) + assert.Equal(host.Port, mockRawHost.RpcPort) + assert.Equal(host.DownloadPort, mockRawHost.DownPort) + assert.Equal(host.Hostname, mockRawHost.HostName) + assert.Equal(host.SecurityDomain, mockRawHost.SecurityDomain) + assert.Equal(host.Location, mockRawHost.Location) + assert.Equal(host.IDC, mockRawHost.Idc) + assert.Equal(host.NetTopology, mockRawHost.NetTopology) + assert.Equal(host.UploadLoadLimit.Load(), int32(200)) + assert.Equal(host.LenPeers(), 0) + assert.Equal(host.IsCDN, false) + assert.NotEqual(host.CreateAt.Load(), 0) + assert.NotEqual(host.UpdateAt.Load(), 0) + assert.NotNil(host.Log) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, NewHost(tc.rawHost, tc.options...)) + }) + } +} + +func TestHost_LoadPeer(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + peerID string + options []HostOption + expect func(t *testing.T, peer *Peer, ok bool) + }{ + { + name: "load peer", + rawHost: mockRawHost, + peerID: mockPeerID, + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + { + name: "peer does not exist", + rawHost: mockRawHost, + peerID: idgen.PeerID("0.0.0.0"), + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + rawHost: mockRawHost, + peerID: "", + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := NewHost(tc.rawHost, tc.options...) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, host) + + host.StorePeer(mockPeer) + peer, ok := host.LoadPeer(tc.peerID) + tc.expect(t, peer, ok) + }) + } +} + +func TestHost_StorePeer(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + peerID string + options []HostOption + expect func(t *testing.T, peer *Peer, ok bool) + }{ + { + name: "store peer", + rawHost: mockRawHost, + peerID: mockPeerID, + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + { + name: "store key is empty", + rawHost: mockRawHost, + peerID: "", + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(peer.ID, "") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := NewHost(tc.rawHost, tc.options...) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(tc.peerID, mockTask, host) + + host.StorePeer(mockPeer) + peer, ok := host.LoadPeer(tc.peerID) + tc.expect(t, peer, ok) + }) + } +} + +func TestHost_LoadOrStorePeer(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + peerID string + options []HostOption + expect func(t *testing.T, host *Host, mockPeer *Peer) + }{ + { + name: "load peer exist", + rawHost: mockRawHost, + peerID: mockPeerID, + expect: func(t *testing.T, host *Host, mockPeer *Peer) { + assert := assert.New(t) + peer, ok := host.LoadOrStorePeer(mockPeer) + + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + { + name: "load peer does not exist", + rawHost: mockRawHost, + peerID: mockPeerID, + expect: func(t *testing.T, host *Host, mockPeer *Peer) { + assert := assert.New(t) + mockPeer.ID = idgen.PeerID("0.0.0.0") + peer, ok := host.LoadOrStorePeer(mockPeer) + + assert.Equal(ok, false) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := NewHost(tc.rawHost, tc.options...) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, host) + + host.StorePeer(mockPeer) + tc.expect(t, host, mockPeer) + }) + } +} + +func TestHost_DeletePeer(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + peerID string + options []HostOption + expect func(t *testing.T, host *Host) + }{ + { + name: "delete peer", + rawHost: mockRawHost, + peerID: mockPeerID, + expect: func(t *testing.T, host *Host) { + assert := assert.New(t) + _, ok := host.LoadPeer(mockPeerID) + assert.Equal(ok, false) + }, + }, + { + name: "delete key is empty", + rawHost: mockRawHost, + peerID: "", + expect: func(t *testing.T, host *Host) { + assert := assert.New(t) + peer, ok := host.LoadPeer(mockPeerID) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := NewHost(tc.rawHost, tc.options...) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, host) + + host.StorePeer(mockPeer) + host.DeletePeer(tc.peerID) + tc.expect(t, host) + }) + } +} + +func TestHost_LenPeers(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + options []HostOption + expect func(t *testing.T, host *Host, mockPeer *Peer) + }{ + { + name: "len peers", + rawHost: mockRawHost, + expect: func(t *testing.T, host *Host, mockPeer *Peer) { + assert := assert.New(t) + host.StorePeer(mockPeer) + mockPeer.ID = idgen.PeerID("0.0.0.0") + host.StorePeer(mockPeer) + assert.Equal(host.LenPeers(), 2) + host.StorePeer(mockPeer) + assert.Equal(host.LenPeers(), 2) + }, + }, + { + name: "peer does not exist", + rawHost: mockRawHost, + expect: func(t *testing.T, host *Host, mockPeer *Peer) { + assert := assert.New(t) + assert.Equal(host.LenPeers(), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := NewHost(tc.rawHost, tc.options...) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, host) + + tc.expect(t, host, mockPeer) + }) + } +} + +func TestHost_FreeUploadLoad(t *testing.T) { + tests := []struct { + name string + rawHost *scheduler.PeerHost + options []HostOption + expect func(t *testing.T, host *Host, mockPeer *Peer) + }{ + { + name: "get free upload load", + rawHost: mockRawHost, + expect: func(t *testing.T, host *Host, mockPeer *Peer) { + assert := assert.New(t) + host.StorePeer(mockPeer) + mockPeer.ID = idgen.PeerID("0.0.0.0") + host.StorePeer(mockPeer) + assert.Equal(host.FreeUploadLoad(), int32(defaultUploadLoadLimit-2)) + }, + }, + { + name: "upload peer does not exist", + rawHost: mockRawHost, + expect: func(t *testing.T, host *Host, mockPeer *Peer) { + assert := assert.New(t) + assert.Equal(host.FreeUploadLoad(), int32(defaultUploadLoadLimit)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := NewHost(tc.rawHost, tc.options...) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, host) + + tc.expect(t, host, mockPeer) + }) + } +} diff --git a/scheduler/resource/peer.go b/scheduler/resource/peer.go new file mode 100644 index 00000000000..0997646de77 --- /dev/null +++ b/scheduler/resource/peer.go @@ -0,0 +1,421 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "sync" + "time" + + "github.com/bits-and-blooms/bitset" + "github.com/looplab/fsm" + "go.uber.org/atomic" + + "d7y.io/dragonfly/v2/internal/dferrors" + logger "d7y.io/dragonfly/v2/internal/dflog" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler" +) + +const ( + // Peer has been created but did not start running + PeerStatePending = "Pending" + + // Peer successfully registered as small scope size + PeerStateReceivedSmall = "ReceivedSmall" + + // Peer successfully registered as normal scope size + PeerStateReceivedNormal = "ReceivedNormal" + + // Peer is downloading resources from peer + PeerStateRunning = "Running" + + // Peer is downloading resources from back-to-source + PeerStateBackToSource = "BackToSource" + + // Peer has been downloaded successfully + PeerStateSucceeded = "Succeeded" + + // Peer has been downloaded failed + PeerStateFailed = "Failed" + + // Peer has been left + PeerStateLeave = "Leave" +) + +const ( + // Peer is downloading + PeerEventDownload = "Download" + + // Peer is registered as small scope size + PeerEventRegisterSmall = "RegisterSmall" + + // Peer is registered as normal scope size + PeerEventRegisterNormal = "RegisterNormal" + + // Peer is downloading from back-to-source + PeerEventDownloadFromBackToSource = "DownloadFromBackToSource" + + // Peer downloaded successfully + PeerEventDownloadSucceeded = "DownloadSucceeded" + + // Peer downloaded failed + PeerEventDownloadFailed = "DownloadFailed" + + // Peer leaves + PeerEventLeave = "Leave" +) + +type Peer struct { + // ID is peer id + ID string + + // Pieces is piece bitset + Pieces *bitset.BitSet + + // pieceCosts is piece downloaded time + pieceCosts []int64 + + // Stream is grpc stream instance + Stream *atomic.Value + + // Record peer report piece grpc interface stop code + StopChannel chan *dferrors.DfError + + // Task state machine + FSM *fsm.FSM + + // Task is peer task + Task *Task + + // Host is peer host + Host *Host + + // Parent is peer parent + Parent *atomic.Value + + // Children is peer children + Children *sync.Map + + // CreateAt is peer create time + CreateAt *atomic.Time + + // UpdateAt is peer update time + UpdateAt *atomic.Time + + // Peer mutex + mu *sync.RWMutex + + // Peer log + Log *logger.SugaredLoggerOnWith +} + +// New Peer instance +func NewPeer(id string, task *Task, host *Host) *Peer { + p := &Peer{ + ID: id, + Pieces: &bitset.BitSet{}, + pieceCosts: []int64{}, + Stream: &atomic.Value{}, + StopChannel: make(chan *dferrors.DfError, 1), + Task: task, + Host: host, + Parent: &atomic.Value{}, + Children: &sync.Map{}, + CreateAt: atomic.NewTime(time.Now()), + UpdateAt: atomic.NewTime(time.Now()), + mu: &sync.RWMutex{}, + Log: logger.WithTaskAndPeerID(task.ID, id), + } + + // Initialize state machine + p.FSM = fsm.NewFSM( + PeerStatePending, + fsm.Events{ + {Name: PeerEventRegisterSmall, Src: []string{PeerStatePending}, Dst: PeerStateReceivedSmall}, + {Name: PeerEventRegisterNormal, Src: []string{PeerStatePending}, Dst: PeerStateReceivedNormal}, + {Name: PeerEventDownload, Src: []string{PeerStateReceivedSmall, PeerStateReceivedNormal}, Dst: PeerStateRunning}, + {Name: PeerEventDownloadFromBackToSource, Src: []string{PeerStateRunning}, Dst: PeerStateBackToSource}, + {Name: PeerEventDownloadSucceeded, Src: []string{PeerStateRunning, PeerStateBackToSource}, Dst: PeerStateSucceeded}, + {Name: PeerEventDownloadFailed, Src: []string{ + PeerStatePending, PeerStateReceivedSmall, PeerStateReceivedNormal, + PeerStateRunning, PeerStateBackToSource, PeerStateSucceeded, + }, Dst: PeerStateFailed}, + {Name: PeerEventLeave, Src: []string{PeerStateFailed, PeerStateSucceeded}, Dst: PeerEventLeave}, + }, + fsm.Callbacks{ + PeerEventDownload: func(e *fsm.Event) { + p.UpdateAt.Store(time.Now()) + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + PeerEventRegisterSmall: func(e *fsm.Event) { + p.UpdateAt.Store(time.Now()) + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + PeerEventRegisterNormal: func(e *fsm.Event) { + p.UpdateAt.Store(time.Now()) + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + PeerEventDownloadFromBackToSource: func(e *fsm.Event) { + p.Task.BackToSourcePeers.Add(p) + p.UpdateAt.Store(time.Now()) + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + PeerEventDownloadSucceeded: func(e *fsm.Event) { + if e.Src == PeerStateBackToSource { + p.Task.BackToSourcePeers.Delete(p) + } + + p.UpdateAt.Store(time.Now()) + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + PeerEventDownloadFailed: func(e *fsm.Event) { + if e.Src == PeerStateBackToSource { + p.Task.BackToSourcePeers.Delete(p) + } + + p.UpdateAt.Store(time.Now()) + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + PeerEventLeave: func(e *fsm.Event) { + p.Log.Infof("peer state is %s", e.FSM.Current()) + }, + }, + ) + + return p +} + +// LoadChild return peer child for a key +func (p *Peer) LoadChild(key string) (*Peer, bool) { + rawChild, ok := p.Children.Load(key) + if !ok { + return nil, false + } + + return rawChild.(*Peer), ok +} + +// StoreChild set peer child +func (p *Peer) StoreChild(child *Peer) { + p.mu.Lock() + defer p.mu.Unlock() + + p.Children.Store(child.ID, child) + p.Host.Peers.Store(child.ID, child) + p.Task.Peers.Store(child.ID, child) + child.Parent.Store(p) +} + +// DeleteChild deletes peer child for a key +func (p *Peer) DeleteChild(key string) { + p.mu.Lock() + defer p.mu.Unlock() + + child, ok := p.LoadChild(key) + if !ok { + return + } + + p.Children.Delete(child.ID) + p.Host.DeletePeer(child.ID) + p.Task.DeletePeer(child.ID) + child.Parent = &atomic.Value{} +} + +// LenChildren return length of children sync map +func (p *Peer) LenChildren() int { + var len int + p.Children.Range(func(_, _ interface{}) bool { + len++ + return true + }) + + return len +} + +// LoadParent return peer parent +func (p *Peer) LoadParent() (*Peer, bool) { + rawParent := p.Parent.Load() + if rawParent == nil { + return nil, false + } + + return rawParent.(*Peer), true +} + +// StoreParent set peer parent +func (p *Peer) StoreParent(parent *Peer) { + p.mu.Lock() + defer p.mu.Unlock() + + p.Parent.Store(parent) + parent.Children.Store(p.ID, p) + parent.Host.Peers.Store(p.ID, p) + parent.Task.Peers.Store(p.ID, p) +} + +// DeleteParent deletes peer parent +func (p *Peer) DeleteParent() { + p.mu.Lock() + defer p.mu.Unlock() + + parent, ok := p.LoadParent() + if !ok { + return + } + + p.Parent = &atomic.Value{} + parent.Children.Delete(p.ID) + parent.Host.Peers.Delete(p.ID) + parent.Task.Peers.Delete(p.ID) +} + +// ReplaceParent replaces peer parent +func (p *Peer) ReplaceParent(parent *Peer) { + p.DeleteParent() + p.StoreParent(parent) +} + +// TreeTotalNodeCount represents tree's total node count +func (p *Peer) TreeTotalNodeCount() int { + count := 1 + p.Children.Range(func(_, value interface{}) bool { + node, ok := value.(*Peer) + if !ok { + return true + } + + count += node.TreeTotalNodeCount() + return true + }) + + return count +} + +// IsDescendant determines whether it is ancestor of peer +func (p *Peer) IsDescendant(ancestor *Peer) bool { + return isDescendant(ancestor, p) +} + +// IsAncestor determines whether it is descendant of peer +func (p *Peer) IsAncestor(descendant *Peer) bool { + return isDescendant(p, descendant) +} + +// isDescendant determines whether it is ancestor of peer +func isDescendant(ancestor, descendant *Peer) bool { + node := descendant + for node != nil { + parent, ok := node.LoadParent() + if !ok { + return false + } + if parent.ID == ancestor.ID { + return true + } + node = parent + } + + return false +} + +// AppendPieceCost append piece cost to costs slice +func (p *Peer) AppendPieceCost(cost int64) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pieceCosts = append(p.pieceCosts, cost) +} + +// PieceCosts return piece costs slice +func (p *Peer) PieceCosts() []int64 { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pieceCosts +} + +// LoadStream return grpc stream +func (p *Peer) LoadStream() (scheduler.Scheduler_ReportPieceResultServer, bool) { + rawStream := p.Stream.Load() + if rawStream == nil { + return nil, false + } + + return rawStream.(scheduler.Scheduler_ReportPieceResultServer), true +} + +// StoreStream set grpc stream +func (p *Peer) StoreStream(stream scheduler.Scheduler_ReportPieceResultServer) { + p.Stream.Store(stream) +} + +// DeleteStream deletes grpc stream +func (p *Peer) DeleteStream() { + p.Stream = &atomic.Value{} +} + +// StopStream stops grpc stream with error code +func (p *Peer) StopStream(dferr *dferrors.DfError) bool { + p.mu.Lock() + defer p.mu.Unlock() + + if _, ok := p.LoadStream(); !ok { + p.Log.Error("stop stream failed: can not find peer stream") + return false + } + p.DeleteStream() + + select { + case p.StopChannel <- dferr: + p.Log.Infof("send stop channel %#v", dferr) + default: + p.Log.Error("stop channel busy") + return false + } + + return true +} + +// Download tiny file from peer +func (p *Peer) DownloadTinyFile(ctx context.Context) ([]byte, error) { + // Download url: http://${host}:${port}/download/${taskIndex}/${taskID}?peerId=scheduler; + url := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", p.Host.IP, p.Host.DownloadPort), + Path: fmt.Sprintf("download/%s/%s", p.Task.ID[:3], p.Task.ID), + RawQuery: "peerId=scheduler", + } + p.Log.Infof("download tiny file url: %#v", url) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) + if err != nil { + return []byte{}, err + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return io.ReadAll(resp.Body) +} diff --git a/scheduler/resource/peer_manager.go b/scheduler/resource/peer_manager.go new file mode 100644 index 00000000000..ba67a7adde3 --- /dev/null +++ b/scheduler/resource/peer_manager.go @@ -0,0 +1,153 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "sync" + "time" + + pkggc "d7y.io/dragonfly/v2/pkg/gc" + "d7y.io/dragonfly/v2/scheduler/config" +) + +const ( + // GC peer id + GCPeerID = "peer" +) + +type PeerManager interface { + // Load return peer for a key + Load(string) (*Peer, bool) + + // Store set peer + Store(*Peer) + + // LoadOrStore returns peer the key if present. + // Otherwise, it stores and returns the given peer. + // The loaded result is true if the peer was loaded, false if stored. + LoadOrStore(*Peer) (*Peer, bool) + + // Delete deletes peer for a key + Delete(string) + + // Try to reclaim peer + RunGC() error +} + +type peerManager struct { + // Peer sync map + *sync.Map + + // Peer time to live + ttl time.Duration + + // Peer mutex + mu *sync.Mutex +} + +// New peer manager interface +func newPeerManager(cfg *config.GCConfig, gc pkggc.GC) (PeerManager, error) { + p := &peerManager{ + Map: &sync.Map{}, + ttl: cfg.PeerTTL, + mu: &sync.Mutex{}, + } + + if err := gc.Add(pkggc.Task{ + ID: GCPeerID, + Interval: cfg.PeerGCInterval, + Timeout: cfg.PeerGCInterval, + Runner: p, + }); err != nil { + return nil, err + } + + return p, nil +} + +func (p *peerManager) Load(key string) (*Peer, bool) { + rawPeer, ok := p.Map.Load(key) + if !ok { + return nil, false + } + + return rawPeer.(*Peer), ok +} + +func (p *peerManager) Store(peer *Peer) { + p.mu.Lock() + defer p.mu.Unlock() + + p.Map.Store(peer.ID, peer) + peer.Host.StorePeer(peer) + peer.Task.StorePeer(peer) +} + +func (p *peerManager) LoadOrStore(peer *Peer) (*Peer, bool) { + p.mu.Lock() + defer p.mu.Unlock() + + rawPeer, loaded := p.Map.LoadOrStore(peer.ID, peer) + if !loaded { + peer.Host.StorePeer(peer) + peer.Task.StorePeer(peer) + } + + return rawPeer.(*Peer), loaded +} + +func (p *peerManager) Delete(key string) { + p.mu.Lock() + defer p.mu.Unlock() + + if peer, ok := p.Load(key); ok { + p.Map.Delete(key) + peer.Host.DeletePeer(key) + peer.Task.DeletePeer(key) + } +} + +func (p *peerManager) RunGC() error { + p.Map.Range(func(_, value interface{}) bool { + peer := value.(*Peer) + elapsed := time.Since(peer.UpdateAt.Load()) + + if elapsed > p.ttl && peer.LenChildren() == 0 { + // If the status is PeerStateLeave, + // clear peer information + if peer.FSM.Is(PeerStateLeave) { + peer.DeleteParent() + p.Delete(peer.ID) + peer.Log.Info("peer has been reclaimed") + return true + } + + // If the peer is not leave, + // first change the state to PeerEventLeave + if err := peer.FSM.Event(PeerEventLeave); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + } + + peer.Log.Info("gc causes the peer to leave") + return true + } + + return true + }) + + return nil +} diff --git a/scheduler/resource/peer_manager_mock.go b/scheduler/resource/peer_manager_mock.go new file mode 100644 index 00000000000..cec4c9ac787 --- /dev/null +++ b/scheduler/resource/peer_manager_mock.go @@ -0,0 +1,102 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/resource/peer_manager.go + +// Package resource is a generated GoMock package. +package resource + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockPeerManager is a mock of PeerManager interface. +type MockPeerManager struct { + ctrl *gomock.Controller + recorder *MockPeerManagerMockRecorder +} + +// MockPeerManagerMockRecorder is the mock recorder for MockPeerManager. +type MockPeerManagerMockRecorder struct { + mock *MockPeerManager +} + +// NewMockPeerManager creates a new mock instance. +func NewMockPeerManager(ctrl *gomock.Controller) *MockPeerManager { + mock := &MockPeerManager{ctrl: ctrl} + mock.recorder = &MockPeerManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPeerManager) EXPECT() *MockPeerManagerMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockPeerManager) Delete(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", arg0) +} + +// Delete indicates an expected call of Delete. +func (mr *MockPeerManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockPeerManager)(nil).Delete), arg0) +} + +// Load mocks base method. +func (m *MockPeerManager) Load(arg0 string) (*Peer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Load", arg0) + ret0, _ := ret[0].(*Peer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Load indicates an expected call of Load. +func (mr *MockPeerManagerMockRecorder) Load(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockPeerManager)(nil).Load), arg0) +} + +// LoadOrStore mocks base method. +func (m *MockPeerManager) LoadOrStore(arg0 *Peer) (*Peer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadOrStore", arg0) + ret0, _ := ret[0].(*Peer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// LoadOrStore indicates an expected call of LoadOrStore. +func (mr *MockPeerManagerMockRecorder) LoadOrStore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrStore", reflect.TypeOf((*MockPeerManager)(nil).LoadOrStore), arg0) +} + +// RunGC mocks base method. +func (m *MockPeerManager) RunGC() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunGC") + ret0, _ := ret[0].(error) + return ret0 +} + +// RunGC indicates an expected call of RunGC. +func (mr *MockPeerManagerMockRecorder) RunGC() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunGC", reflect.TypeOf((*MockPeerManager)(nil).RunGC)) +} + +// Store mocks base method. +func (m *MockPeerManager) Store(arg0 *Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Store", arg0) +} + +// Store indicates an expected call of Store. +func (mr *MockPeerManagerMockRecorder) Store(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockPeerManager)(nil).Store), arg0) +} diff --git a/scheduler/resource/peer_manager_test.go b/scheduler/resource/peer_manager_test.go new file mode 100644 index 00000000000..871c5898f34 --- /dev/null +++ b/scheduler/resource/peer_manager_test.go @@ -0,0 +1,411 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "errors" + "reflect" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/gc" + "d7y.io/dragonfly/v2/scheduler/config" +) + +var ( + mockPeerGCConfig = &config.GCConfig{ + PeerGCInterval: 1 * time.Second, + PeerTTL: 1 * time.Microsecond, + } +) + +func TestPeerManager_newPeerManager(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, peerManager PeerManager, err error) + }{ + { + name: "new peer manager", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, err error) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(peerManager).Elem().Name(), "peerManager") + }, + }, + { + name: "new peer manager failed because of gc error", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(errors.New("foo")).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + peerManager, err := newPeerManager(mockPeerGCConfig, gc) + tc.expect(t, peerManager, err) + }) + } +} + +func TestPeerManager_Load(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, peerManager PeerManager, mockPeer *Peer) + }{ + { + name: "load peer", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + { + name: "peer does not exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + _, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + mockPeer.ID = "" + peerManager.Store(mockPeer) + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, mockHost) + peerManager, err := newPeerManager(mockPeerGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, peerManager, mockPeer) + }) + } +} + +func TestPeerManager_Store(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, peerManager PeerManager, mockPeer *Peer) + }{ + { + name: "store peer", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + { + name: "store key is empty", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + mockPeer.ID = "" + peerManager.Store(mockPeer) + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, mockHost) + peerManager, err := newPeerManager(mockPeerGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, peerManager, mockPeer) + }) + } +} + +func TestPeerManager_LoadOrStore(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, peerManager PeerManager, mockPeer *Peer) + }{ + { + name: "load peer exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + peer, ok := peerManager.LoadOrStore(mockPeer) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + { + name: "load peer does not exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peer, ok := peerManager.LoadOrStore(mockPeer) + assert.Equal(ok, false) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, mockHost) + peerManager, err := newPeerManager(mockPeerGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, peerManager, mockPeer) + }) + } +} + +func TestPeerManager_Delete(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, peerManager PeerManager, mockPeer *Peer) + }{ + { + name: "delete peer", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + peerManager.Delete(mockPeer.ID) + _, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, false) + }, + }, + { + name: "delete key does not exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + mockPeer.ID = "" + peerManager.Store(mockPeer) + peerManager.Delete(mockPeer.ID) + _, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, mockHost) + peerManager, err := newPeerManager(mockPeerGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, peerManager, mockPeer) + }) + } +} + +func TestPeerManager_RunGC(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, peerManager PeerManager, mockPeer *Peer) + }{ + { + name: "peer leave", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + mockPeer.FSM.SetState(PeerStateSucceeded) + err := peerManager.RunGC() + assert.NoError(err) + + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.FSM.Current(), PeerStateLeave) + }, + }, + { + name: "peer reclaimed", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + mockPeer.FSM.SetState(PeerStateSucceeded) + err := peerManager.RunGC() + assert.NoError(err) + + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.FSM.Current(), PeerStateLeave) + + err = peerManager.RunGC() + assert.NoError(err) + + _, ok = peerManager.Load(mockPeer.ID) + assert.Equal(ok, false) + }, + }, + { + name: "peer has children", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + mockPeer.FSM.SetState(PeerStateSucceeded) + mockPeer.StoreChild(mockPeer) + err := peerManager.RunGC() + assert.NoError(err) + + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.FSM.Current(), PeerStateSucceeded) + }, + }, + { + name: "peer state is PeerStatePending", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, peerManager PeerManager, mockPeer *Peer) { + assert := assert.New(t) + peerManager.Store(mockPeer) + mockPeer.FSM.SetState(PeerStatePending) + mockPeer.StoreChild(mockPeer) + err := peerManager.RunGC() + assert.NoError(err) + + peer, ok := peerManager.Load(mockPeer.ID) + assert.Equal(ok, true) + assert.Equal(peer.FSM.Current(), PeerStatePending) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, mockHost) + peerManager, err := newPeerManager(mockPeerGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, peerManager, mockPeer) + }) + } +} diff --git a/scheduler/resource/peer_test.go b/scheduler/resource/peer_test.go new file mode 100644 index 00000000000..0b760b5e344 --- /dev/null +++ b/scheduler/resource/peer_test.go @@ -0,0 +1,1002 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "context" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/internal/dferrors" + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" +) + +var ( + mockPeerID = idgen.PeerID("127.0.0.1") +) + +func TestPeer_NewPeer(t *testing.T) { + tests := []struct { + name string + id string + expect func(t *testing.T, peer *Peer, mockTask *Task, mockHost *Host) + }{ + { + name: "new peer", + id: mockPeerID, + expect: func(t *testing.T, peer *Peer, mockTask *Task, mockHost *Host) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.Empty(peer.Pieces) + assert.Equal(len(peer.PieceCosts()), 0) + assert.Empty(peer.Stream) + assert.Empty(peer.StopChannel) + assert.Equal(peer.FSM.Current(), PeerStatePending) + assert.EqualValues(peer.Task, mockTask) + assert.EqualValues(peer.Host, mockHost) + assert.Empty(peer.Parent) + assert.Empty(peer.Children) + assert.NotEqual(peer.CreateAt.Load(), 0) + assert.NotEqual(peer.UpdateAt.Load(), 0) + assert.NotNil(peer.Log) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + tc.expect(t, NewPeer(tc.id, mockTask, mockHost), mockTask, mockHost) + }) + } +} + +func TestPeer_LoadChild(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, childID string) + }{ + { + name: "load child", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, childID string) { + assert := assert.New(t) + child, ok := peer.LoadChild(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + }, + }, + { + name: "child does not exist", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, childID string) { + assert := assert.New(t) + _, ok := peer.LoadChild(idgen.PeerID("0.0.0.0")) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + childID: "", + expect: func(t *testing.T, peer *Peer, childID string) { + assert := assert.New(t) + child, ok := peer.LoadChild(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + peer.StoreChild(mockChildPeer) + tc.expect(t, peer, tc.childID) + }) + } +} + +func TestPeer_StoreChild(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, childID string) + }{ + { + name: "store child", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, childID string) { + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + child, ok = peer.LoadChild(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + child, ok = peer.Host.LoadPeer(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + child, ok = peer.Task.LoadPeer(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + parent, ok = child.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, peer.ID) + }, + }, + { + name: "store key is empty", + childID: "", + expect: func(t *testing.T, peer *Peer, childID string) { + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + child, ok = peer.LoadChild(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + child, ok = peer.Host.LoadPeer(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + child, ok = peer.Task.LoadPeer(childID) + assert.Equal(ok, true) + assert.Equal(child.ID, childID) + parent, ok = child.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, peer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + peer.StoreChild(mockChildPeer) + tc.expect(t, peer, tc.childID) + }) + } +} + +func TestPeer_DeleteChild(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, mockChildPeer *Peer) + }{ + { + name: "delete child", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + peer.DeleteChild(mockChildPeer.ID) + assert := assert.New(t) + + var ok bool + _, ok = peer.LoadChild(mockChildPeer.ID) + assert.Equal(ok, false) + _, ok = peer.Host.LoadPeer(mockChildPeer.ID) + assert.Equal(ok, false) + _, ok = peer.Task.LoadPeer(mockChildPeer.ID) + assert.Equal(ok, false) + _, ok = mockChildPeer.LoadParent() + assert.Equal(ok, false) + }, + }, + { + name: "delete key does not exist", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + peer.DeleteChild("") + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + child, ok = peer.LoadChild(mockChildPeer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, mockChildPeer.ID) + child, ok = peer.Host.LoadPeer(mockChildPeer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, mockChildPeer.ID) + child, ok = peer.Task.LoadPeer(mockChildPeer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, mockChildPeer.ID) + parent, ok = child.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, peer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + peer.StoreChild(mockChildPeer) + tc.expect(t, peer, mockChildPeer) + }) + } +} + +func TestPeer_LenChildren(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, mockChildPeer *Peer) + }{ + { + name: "len children", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + peer.StoreChild(mockChildPeer) + assert.Equal(peer.LenChildren(), 1) + mockChildPeer.ID = idgen.PeerID("0.0.0.0") + peer.StoreChild(mockChildPeer) + assert.Equal(peer.LenChildren(), 2) + peer.StoreChild(mockChildPeer) + assert.Equal(peer.LenChildren(), 2) + }, + }, + { + name: "child does not exist", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + assert.Equal(peer.LenChildren(), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer, mockChildPeer) + }) + } +} + +func TestPeer_LoadParent(t *testing.T) { + tests := []struct { + name string + parentID string + expect func(t *testing.T, peer *Peer, parentID string) + }{ + { + name: "load parent", + parentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, parentID string) { + assert := assert.New(t) + parent, ok := peer.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, parentID) + }, + }, + { + name: "parent does not exist", + parentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, parentID string) { + assert := assert.New(t) + _, ok := peer.LoadChild(idgen.PeerID("0.0.0.0")) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + parentID: "", + expect: func(t *testing.T, peer *Peer, parentID string) { + assert := assert.New(t) + parent, ok := peer.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, parentID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockParentPeer := NewPeer(tc.parentID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + peer.StoreParent(mockParentPeer) + tc.expect(t, peer, tc.parentID) + }) + } +} + +func TestPeer_StoreParent(t *testing.T) { + tests := []struct { + name string + parentID string + expect func(t *testing.T, peer *Peer, parentID string) + }{ + { + name: "store parent", + parentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, parentID string) { + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + parent, ok = peer.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, parentID) + child, ok = parent.LoadChild(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = peer.Task.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = peer.Host.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + }, + }, + { + name: "store key is empty", + parentID: "", + expect: func(t *testing.T, peer *Peer, parentID string) { + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + parent, ok = peer.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, parentID) + child, ok = parent.LoadChild(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = peer.Task.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = peer.Host.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockParentPeer := NewPeer(tc.parentID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + peer.StoreParent(mockParentPeer) + tc.expect(t, peer, tc.parentID) + }) + } +} + +func TestPeer_DeleteParent(t *testing.T) { + tests := []struct { + name string + parentID string + expect func(t *testing.T, peer *Peer, mockParentPeer *Peer) + }{ + { + name: "delete parent", + parentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockParentPeer *Peer) { + peer.StoreParent(mockParentPeer) + peer.DeleteParent() + assert := assert.New(t) + + var ok bool + _, ok = peer.LoadParent() + assert.Equal(ok, false) + _, ok = mockParentPeer.LoadChild(peer.ID) + assert.Equal(ok, false) + _, ok = mockParentPeer.Task.LoadPeer(peer.ID) + assert.Equal(ok, false) + _, ok = mockParentPeer.Host.LoadPeer(peer.ID) + assert.Equal(ok, false) + }, + }, + { + name: "parent does not exist", + parentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockParentPeer *Peer) { + peer.DeleteParent() + assert := assert.New(t) + + var ok bool + _, ok = peer.LoadParent() + assert.Equal(ok, false) + _, ok = mockParentPeer.LoadChild(peer.ID) + assert.Equal(ok, false) + _, ok = mockParentPeer.Task.LoadPeer(peer.ID) + assert.Equal(ok, false) + _, ok = mockParentPeer.Host.LoadPeer(peer.ID) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockParentPeer := NewPeer(tc.parentID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer, mockParentPeer) + }) + } +} + +func TestPeer_ReplaceParent(t *testing.T) { + tests := []struct { + name string + oldParentID string + newParentID string + expect func(t *testing.T, peer *Peer, mockOldParentPeer *Peer, mockNewParentPeer *Peer) + }{ + { + name: "replace parent", + oldParentID: idgen.PeerID("127.0.0.1"), + newParentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockOldParentPeer *Peer, mockNewParentPeer *Peer) { + peer.StoreParent(mockOldParentPeer) + peer.ReplaceParent(mockNewParentPeer) + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + parent, ok = peer.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, mockNewParentPeer.ID) + _, ok = mockOldParentPeer.LoadChild(peer.ID) + assert.Equal(ok, false) + child, ok = mockNewParentPeer.LoadChild(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = mockNewParentPeer.Task.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = mockNewParentPeer.Host.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + }, + }, + { + name: "old parent does not exist", + oldParentID: idgen.PeerID("127.0.0.1"), + newParentID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockOldParentPeer *Peer, mockNewParentPeer *Peer) { + peer.ReplaceParent(mockNewParentPeer) + assert := assert.New(t) + + var ( + parent *Peer + child *Peer + ok bool + ) + parent, ok = peer.LoadParent() + assert.Equal(ok, true) + assert.Equal(parent.ID, mockNewParentPeer.ID) + _, ok = mockOldParentPeer.LoadChild(peer.ID) + assert.Equal(ok, false) + child, ok = mockNewParentPeer.LoadChild(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = mockNewParentPeer.Task.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + child, ok = mockNewParentPeer.Host.LoadPeer(peer.ID) + assert.Equal(ok, true) + assert.Equal(child.ID, peer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockOldParentPeer := NewPeer(tc.oldParentID, mockTask, mockHost) + mockNewParentPeer := NewPeer(tc.newParentID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer, mockOldParentPeer, mockNewParentPeer) + }) + } +} + +func TestPeer_TreeTotalNodeCount(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, mockChildPeer *Peer) + }{ + { + name: "get tree total node count", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + peer.StoreChild(mockChildPeer) + assert.Equal(peer.TreeTotalNodeCount(), 2) + mockChildPeer.ID = idgen.PeerID("0.0.0.0") + peer.StoreChild(mockChildPeer) + assert.Equal(peer.TreeTotalNodeCount(), 3) + }, + }, + { + name: "tree is empty", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + assert.Equal(peer.TreeTotalNodeCount(), 1) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer, mockChildPeer) + }) + } +} + +func TestPeer_IsDescendant(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, mockChildPeer *Peer) + }{ + { + name: "child is descendant", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + peer.StoreChild(mockChildPeer) + assert.Equal(mockChildPeer.IsDescendant(peer), true) + }, + }, + { + name: "child is not descendant", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + peer.StoreChild(mockChildPeer) + assert.Equal(mockChildPeer.IsDescendant(mockChildPeer), false) + }, + }, + { + name: "parent has no children", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + assert.Equal(mockChildPeer.IsDescendant(peer), false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer, mockChildPeer) + }) + } +} + +func TestPeer_IsAncestor(t *testing.T) { + tests := []struct { + name string + childID string + expect func(t *testing.T, peer *Peer, mockChildPeer *Peer) + }{ + { + name: "parent is ancestor", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + peer.StoreChild(mockChildPeer) + assert.Equal(peer.IsAncestor(mockChildPeer), true) + }, + }, + { + name: "parent is not ancestor", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + peer.StoreChild(mockChildPeer) + assert.Equal(peer.IsDescendant(peer), false) + }, + }, + { + name: "child has no parent", + childID: idgen.PeerID("127.0.0.1"), + expect: func(t *testing.T, peer *Peer, mockChildPeer *Peer) { + assert := assert.New(t) + assert.Equal(peer.IsDescendant(mockChildPeer), false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockChildPeer := NewPeer(tc.childID, mockTask, mockHost) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer, mockChildPeer) + }) + } +} + +func TestPeer_AppendPieceCost(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, peer *Peer) + }{ + { + name: "append piece cost", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + peer.AppendPieceCost(1) + costs := peer.PieceCosts() + assert.Equal(costs[0], int64(1)) + }, + }, + { + name: "piece costs slice is empty", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + costs := peer.PieceCosts() + assert.Equal(len(costs), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer) + }) + } +} + +func TestPeer_PieceCosts(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, peer *Peer) + }{ + { + name: "piece costs slice is not empty", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + peer.AppendPieceCost(1) + costs := peer.PieceCosts() + assert.Equal(costs[0], int64(1)) + }, + }, + { + name: "piece costs slice is empty", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + costs := peer.PieceCosts() + assert.Equal(len(costs), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + + tc.expect(t, peer) + }) + } +} + +func TestPeer_LoadStream(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + }{ + { + name: "load stream", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + peer.StoreStream(stream) + newStream, ok := peer.LoadStream() + assert.Equal(ok, true) + assert.EqualValues(newStream, stream) + }, + }, + { + name: "stream does not exist", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + _, ok := peer.LoadStream() + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + tc.expect(t, peer, stream) + }) + } +} + +func TestPeer_StoreStream(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + }{ + { + name: "store stream", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + peer.StoreStream(stream) + newStream, ok := peer.LoadStream() + assert.Equal(ok, true) + assert.EqualValues(newStream, stream) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + tc.expect(t, peer, stream) + }) + } +} + +func TestPeer_DeleteStream(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + }{ + { + name: "delete stream", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + peer.StoreStream(stream) + peer.DeleteStream() + _, ok := peer.LoadStream() + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + tc.expect(t, peer, stream) + }) + } +} + +func TestPeer_StopStream(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + }{ + { + name: "stop stream with scheduling error", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + peer.StoreStream(stream) + ok := peer.StopStream(dferrors.New(base.Code_SchedError, "")) + assert.Equal(ok, true) + _, ok = peer.LoadStream() + assert.Equal(ok, false) + + select { + case dferr := <-peer.StopChannel: + assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Message, "") + default: + assert.Fail("stop channel can not receive error") + } + }, + }, + { + name: "stop stream with empty stream", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + ok := peer.StopStream(dferrors.New(base.Code_SchedError, "")) + assert.Equal(ok, false) + }, + }, + { + name: "stop stream with channel busy", + expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + assert := assert.New(t) + peer.StoreStream(stream) + peer.StopChannel <- dferrors.New(base.Code_SchedError, "") + ok := peer.StopStream(dferrors.New(base.Code_SchedError, "")) + assert.Equal(ok, false) + _, ok = peer.LoadStream() + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + tc.expect(t, peer, stream) + }) + } +} + +func TestPeer_DownloadTinyFile(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer s.Close() + + tests := []struct { + name string + expect func(t *testing.T, peer *Peer) + }{ + { + name: "download tiny file", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err := peer.DownloadTinyFile(ctx) + assert.NoError(err) + }, + }, + { + name: "download tiny file failed because of port error", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + peer.Host.DownloadPort = 8000 + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err := peer.DownloadTinyFile(ctx) + assert.Error(err) + }, + }, + { + name: "download tiny file failed because of ip error", + expect: func(t *testing.T, peer *Peer) { + assert := assert.New(t) + peer.Host.IP = "127.0.0.2" + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err := peer.DownloadTinyFile(ctx) + assert.Error(err) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + url, err := url.Parse(s.URL) + if err != nil { + t.Fatal(err) + } + + ip, rawPort, err := net.SplitHostPort(url.Host) + if err != nil { + t.Fatal(err) + } + + port, err := strconv.ParseInt(rawPort, 10, 32) + if err != nil { + t.Fatal(err) + } + + mockRawHost.Ip = ip + mockRawHost.DownPort = int32(port) + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := NewPeer(mockPeerID, mockTask, mockHost) + tc.expect(t, peer) + }) + } +} diff --git a/scheduler/resource/resource.go b/scheduler/resource/resource.go new file mode 100644 index 00000000000..474d9fb77a3 --- /dev/null +++ b/scheduler/resource/resource.go @@ -0,0 +1,98 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "google.golang.org/grpc" + + "d7y.io/dragonfly/v2/pkg/gc" + "d7y.io/dragonfly/v2/scheduler/config" +) + +type Resource interface { + // CDN interface + CDN() CDN + + // Host manager interface + HostManager() HostManager + + // Peer manager interface + PeerManager() PeerManager + + // Task manager interface + TaskManager() TaskManager +} + +type resource struct { + // CDN interface + cdn CDN + + // Host manager interface + hostManager HostManager + + // Peer manager interface + peerManager PeerManager + + // Task manager interface + taskManager TaskManager +} + +func New(cfg *config.Config, gc gc.GC, dynconfig config.DynconfigInterface, opts ...grpc.DialOption) (Resource, error) { + // Initialize host manager interface + hostManager := newHostManager() + + // Initialize task manager interface + taskManager, err := newTaskManager(cfg.Scheduler.GC, gc) + if err != nil { + return nil, err + } + + // Initialize peer manager interface + peerManager, err := newPeerManager(cfg.Scheduler.GC, gc) + if err != nil { + return nil, err + } + + // Initialize cdn interface + cdn, err := newCDN(peerManager, hostManager, dynconfig, opts...) + if err != nil { + return nil, err + } + + return &resource{ + cdn: cdn, + hostManager: hostManager, + peerManager: peerManager, + taskManager: taskManager, + }, nil +} + +func (r *resource) CDN() CDN { + return r.cdn +} + +func (r *resource) HostManager() HostManager { + return r.hostManager +} + +func (r *resource) TaskManager() TaskManager { + return r.taskManager +} + +func (r *resource) PeerManager() PeerManager { + return r.peerManager +} diff --git a/scheduler/resource/resource_mock.go b/scheduler/resource/resource_mock.go new file mode 100644 index 00000000000..ce205893b89 --- /dev/null +++ b/scheduler/resource/resource_mock.go @@ -0,0 +1,90 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/resource/resource.go + +// Package resource is a generated GoMock package. +package resource + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockResource is a mock of Resource interface. +type MockResource struct { + ctrl *gomock.Controller + recorder *MockResourceMockRecorder +} + +// MockResourceMockRecorder is the mock recorder for MockResource. +type MockResourceMockRecorder struct { + mock *MockResource +} + +// NewMockResource creates a new mock instance. +func NewMockResource(ctrl *gomock.Controller) *MockResource { + mock := &MockResource{ctrl: ctrl} + mock.recorder = &MockResourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockResource) EXPECT() *MockResourceMockRecorder { + return m.recorder +} + +// CDN mocks base method. +func (m *MockResource) CDN() CDN { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CDN") + ret0, _ := ret[0].(CDN) + return ret0 +} + +// CDN indicates an expected call of CDN. +func (mr *MockResourceMockRecorder) CDN() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CDN", reflect.TypeOf((*MockResource)(nil).CDN)) +} + +// HostManager mocks base method. +func (m *MockResource) HostManager() HostManager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HostManager") + ret0, _ := ret[0].(HostManager) + return ret0 +} + +// HostManager indicates an expected call of HostManager. +func (mr *MockResourceMockRecorder) HostManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HostManager", reflect.TypeOf((*MockResource)(nil).HostManager)) +} + +// PeerManager mocks base method. +func (m *MockResource) PeerManager() PeerManager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PeerManager") + ret0, _ := ret[0].(PeerManager) + return ret0 +} + +// PeerManager indicates an expected call of PeerManager. +func (mr *MockResourceMockRecorder) PeerManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerManager", reflect.TypeOf((*MockResource)(nil).PeerManager)) +} + +// TaskManager mocks base method. +func (m *MockResource) TaskManager() TaskManager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TaskManager") + ret0, _ := ret[0].(TaskManager) + return ret0 +} + +// TaskManager indicates an expected call of TaskManager. +func (mr *MockResourceMockRecorder) TaskManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskManager", reflect.TypeOf((*MockResource)(nil).TaskManager)) +} diff --git a/scheduler/resource/resource_test.go b/scheduler/resource/resource_test.go new file mode 100644 index 00000000000..bb3d1cb419d --- /dev/null +++ b/scheduler/resource/resource_test.go @@ -0,0 +1,122 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "errors" + "reflect" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/gc" + "d7y.io/dragonfly/v2/scheduler/config" + configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" +) + +func TestResource_New(t *testing.T) { + tests := []struct { + name string + mock func(gc *gc.MockGCMockRecorder, dynconfig *configmocks.MockDynconfigInterfaceMockRecorder) + expect func(t *testing.T, resource Resource, err error) + }{ + { + name: "new resource", + mock: func(gc *gc.MockGCMockRecorder, dynconfig *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + gc.Add(gomock.Any()).Return(nil).Times(2), + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{{ID: 1}}, + }, nil).Times(1), + dynconfig.Register(gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, resource Resource, err error) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(resource).Elem().Name(), "resource") + assert.NoError(err) + }, + }, + { + name: "new resource failed because of task manager error", + mock: func(gc *gc.MockGCMockRecorder, dynconfig *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + gc.Add(gomock.Any()).Return(errors.New("foo")).Times(1), + ) + }, + expect: func(t *testing.T, resource Resource, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "new resource failed because of peer manager error", + mock: func(gc *gc.MockGCMockRecorder, dynconfig *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + gc.Add(gomock.Any()).Return(nil).Times(1), + gc.Add(gomock.Any()).Return(errors.New("foo")).Times(1), + ) + }, + expect: func(t *testing.T, resource Resource, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "new resource faild because of dynconfig get error", + mock: func(gc *gc.MockGCMockRecorder, dynconfig *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + gc.Add(gomock.Any()).Return(nil).Times(2), + dynconfig.Get().Return(nil, errors.New("foo")).Times(1), + ) + }, + expect: func(t *testing.T, resource Resource, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "new resource faild because of cdn list is empty", + mock: func(gc *gc.MockGCMockRecorder, dynconfig *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + gc.Add(gomock.Any()).Return(nil).Times(2), + dynconfig.Get().Return(&config.DynconfigData{ + CDNs: []*config.CDN{}, + }, nil).Times(1), + ) + }, + expect: func(t *testing.T, resource Resource, err error) { + assert := assert.New(t) + assert.EqualError(err, "address list of cdn is empty") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + tc.mock(gc.EXPECT(), dynconfig.EXPECT()) + + resource, err := New(config.New(), gc, dynconfig) + tc.expect(t, resource, err) + }) + } +} diff --git a/scheduler/resource/task.go b/scheduler/resource/task.go new file mode 100644 index 00000000000..4c0e4f8bdb1 --- /dev/null +++ b/scheduler/resource/task.go @@ -0,0 +1,232 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "sync" + "time" + + "github.com/looplab/fsm" + "go.uber.org/atomic" + + logger "d7y.io/dragonfly/v2/internal/dflog" + "d7y.io/dragonfly/v2/pkg/container/set" + "d7y.io/dragonfly/v2/pkg/rpc/base" +) + +const ( + // Tiny file size is 128 bytes + TinyFileSize = 128 +) + +const ( + // Task has been created but did not start running + TaskStatePending = "Pending" + + // Task is downloading resources from CDN or back-to-source + TaskStateRunning = "Running" + + // Task has been downloaded successfully + TaskStateSucceeded = "Succeeded" + + // Task has been downloaded failed + TaskStateFailed = "Failed" +) + +const ( + // Task is downloading + TaskEventDownload = "Download" + + // Task downloaded successfully + TaskEventDownloadSucceeded = "DownloadSucceeded" + + // Task downloaded failed + TaskEventDownloadFailed = "DownloadFailed" +) + +type Task struct { + // ID is task id + ID string + + // URL is task download url + URL string + + // URLMeta is task download url meta + URLMeta *base.UrlMeta + + // DirectPiece is tiny piece data + DirectPiece []byte + + // ContentLength is task total content length + ContentLength *atomic.Int64 + + // TotalPieceCount is total piece count + TotalPieceCount *atomic.Int32 + + // BackToSourceLimit is back-to-source limit + BackToSourceLimit *atomic.Int32 + + // BackToSourcePeers is back-to-source sync map + BackToSourcePeers set.SafeSet + + // Task state machine + FSM *fsm.FSM + + // Piece sync map + Pieces *sync.Map + + // Peer sync map + Peers *sync.Map + + // CreateAt is task create time + CreateAt *atomic.Time + + // UpdateAt is task update time + UpdateAt *atomic.Time + + // Task log + Log *logger.SugaredLoggerOnWith +} + +// New task instance +func NewTask(id, url string, backToSourceLimit int, meta *base.UrlMeta) *Task { + t := &Task{ + ID: id, + URL: url, + URLMeta: meta, + ContentLength: atomic.NewInt64(0), + TotalPieceCount: atomic.NewInt32(0), + BackToSourceLimit: atomic.NewInt32(int32(backToSourceLimit)), + BackToSourcePeers: set.NewSafeSet(), + Pieces: &sync.Map{}, + Peers: &sync.Map{}, + CreateAt: atomic.NewTime(time.Now()), + UpdateAt: atomic.NewTime(time.Now()), + Log: logger.WithTaskIDAndURL(id, url), + } + + // Initialize state machine + t.FSM = fsm.NewFSM( + TaskStatePending, + fsm.Events{ + {Name: TaskEventDownload, Src: []string{TaskStatePending, TaskStateFailed}, Dst: TaskStateRunning}, + {Name: TaskEventDownloadSucceeded, Src: []string{TaskStateRunning, TaskStateFailed}, Dst: TaskStateSucceeded}, + {Name: TaskEventDownloadFailed, Src: []string{TaskStateRunning}, Dst: TaskStateFailed}, + }, + fsm.Callbacks{ + TaskEventDownload: func(e *fsm.Event) { + t.UpdateAt.Store(time.Now()) + t.Log.Infof("task state is %s", e.FSM.Current()) + }, + TaskEventDownloadSucceeded: func(e *fsm.Event) { + t.UpdateAt.Store(time.Now()) + t.Log.Infof("task state is %s", e.FSM.Current()) + }, + TaskEventDownloadFailed: func(e *fsm.Event) { + t.UpdateAt.Store(time.Now()) + t.Log.Infof("task state is %s", e.FSM.Current()) + }, + }, + ) + + return t +} + +// LoadPeer return peer for a key +func (t *Task) LoadPeer(key string) (*Peer, bool) { + rawPeer, ok := t.Peers.Load(key) + if !ok { + return nil, false + } + + return rawPeer.(*Peer), ok +} + +// StorePeer set peer +func (t *Task) StorePeer(peer *Peer) { + t.Peers.Store(peer.ID, peer) +} + +// LoadOrStorePeer returns peer the key if present. +// Otherwise, it stores and returns the given peer. +// The loaded result is true if the peer was loaded, false if stored. +func (t *Task) LoadOrStorePeer(peer *Peer) (*Peer, bool) { + rawPeer, loaded := t.Peers.LoadOrStore(peer.ID, peer) + return rawPeer.(*Peer), loaded +} + +// DeletePeer deletes peer for a key +func (t *Task) DeletePeer(key string) { + t.Peers.Delete(key) +} + +// LenPeers return length of peers sync map +func (t *Task) LenPeers() int { + var len int + t.Peers.Range(func(_, _ interface{}) bool { + len++ + return true + }) + + return len +} + +// LoadPiece return piece for a key +func (t *Task) LoadPiece(key int32) (*base.PieceInfo, bool) { + rawPiece, ok := t.Pieces.Load(key) + if !ok { + return nil, false + } + + return rawPiece.(*base.PieceInfo), ok +} + +// StorePiece set piece +func (t *Task) StorePiece(piece *base.PieceInfo) { + t.Pieces.Store(piece.PieceNum, piece) +} + +// LoadOrStorePiece returns piece the key if present. +// Otherwise, it stores and returns the given piece. +// The loaded result is true if the piece was loaded, false if stored. +func (t *Task) LoadOrStorePiece(piece *base.PieceInfo) (*base.PieceInfo, bool) { + rawPiece, loaded := t.Pieces.LoadOrStore(piece.PieceNum, piece) + return rawPiece.(*base.PieceInfo), loaded +} + +// DeletePiece deletes piece for a key +func (t *Task) DeletePiece(key int32) { + t.Pieces.Delete(key) +} + +// SizeScope return task size scope type +func (t *Task) SizeScope() base.SizeScope { + if t.ContentLength.Load() <= TinyFileSize { + return base.SizeScope_TINY + } + + if t.TotalPieceCount.Load() == 1 { + return base.SizeScope_SMALL + } + + return base.SizeScope_NORMAL +} + +// CanBackToSource represents whether peer can back-to-source +func (t *Task) CanBackToSource() bool { + return int32(t.BackToSourcePeers.Len()) < t.BackToSourceLimit.Load() +} diff --git a/scheduler/resource/task_manager.go b/scheduler/resource/task_manager.go new file mode 100644 index 00000000000..b0ecf51a9df --- /dev/null +++ b/scheduler/resource/task_manager.go @@ -0,0 +1,114 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "sync" + "time" + + pkggc "d7y.io/dragonfly/v2/pkg/gc" + "d7y.io/dragonfly/v2/scheduler/config" +) + +const ( + // GC task id + GCTaskID = "task" +) + +type TaskManager interface { + // Load return task for a key + Load(string) (*Task, bool) + + // Store set task + Store(*Task) + + // LoadOrStore returns task the key if present. + // Otherwise, it stores and returns the given task. + // The loaded result is true if the task was loaded, false if stored. + LoadOrStore(*Task) (*Task, bool) + + // Delete deletes task for a key + Delete(string) + + // Try to reclaim task + RunGC() error +} + +type taskManager struct { + // Task sync map + *sync.Map + + // Task time to live + ttl time.Duration +} + +// New task manager interface +func newTaskManager(cfg *config.GCConfig, gc pkggc.GC) (TaskManager, error) { + t := &taskManager{ + Map: &sync.Map{}, + ttl: cfg.TaskTTL, + } + + if err := gc.Add(pkggc.Task{ + ID: GCTaskID, + Interval: cfg.TaskGCInterval, + Timeout: cfg.TaskGCInterval, + Runner: t, + }); err != nil { + return nil, err + } + + return t, nil +} + +func (t *taskManager) Load(key string) (*Task, bool) { + rawTask, ok := t.Map.Load(key) + if !ok { + return nil, false + } + + return rawTask.(*Task), ok +} + +func (t *taskManager) Store(task *Task) { + t.Map.Store(task.ID, task) +} + +func (t *taskManager) LoadOrStore(task *Task) (*Task, bool) { + rawTask, loaded := t.Map.LoadOrStore(task.ID, task) + return rawTask.(*Task), loaded +} + +func (t *taskManager) Delete(key string) { + t.Map.Delete(key) +} + +func (t *taskManager) RunGC() error { + t.Map.Range(func(_, value interface{}) bool { + task := value.(*Task) + elapsed := time.Since(task.UpdateAt.Load()) + + if elapsed > t.ttl && task.LenPeers() == 0 && !task.FSM.Is(TaskStateRunning) { + task.Log.Info("task has been reclaimed") + t.Delete(task.ID) + } + + return true + }) + + return nil +} diff --git a/scheduler/resource/task_manager_mock.go b/scheduler/resource/task_manager_mock.go new file mode 100644 index 00000000000..8ffacb56c8b --- /dev/null +++ b/scheduler/resource/task_manager_mock.go @@ -0,0 +1,102 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/resource/task_manager.go + +// Package resource is a generated GoMock package. +package resource + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockTaskManager is a mock of TaskManager interface. +type MockTaskManager struct { + ctrl *gomock.Controller + recorder *MockTaskManagerMockRecorder +} + +// MockTaskManagerMockRecorder is the mock recorder for MockTaskManager. +type MockTaskManagerMockRecorder struct { + mock *MockTaskManager +} + +// NewMockTaskManager creates a new mock instance. +func NewMockTaskManager(ctrl *gomock.Controller) *MockTaskManager { + mock := &MockTaskManager{ctrl: ctrl} + mock.recorder = &MockTaskManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTaskManager) EXPECT() *MockTaskManagerMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockTaskManager) Delete(arg0 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Delete", arg0) +} + +// Delete indicates an expected call of Delete. +func (mr *MockTaskManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockTaskManager)(nil).Delete), arg0) +} + +// Load mocks base method. +func (m *MockTaskManager) Load(arg0 string) (*Task, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Load", arg0) + ret0, _ := ret[0].(*Task) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Load indicates an expected call of Load. +func (mr *MockTaskManagerMockRecorder) Load(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockTaskManager)(nil).Load), arg0) +} + +// LoadOrStore mocks base method. +func (m *MockTaskManager) LoadOrStore(arg0 *Task) (*Task, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadOrStore", arg0) + ret0, _ := ret[0].(*Task) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// LoadOrStore indicates an expected call of LoadOrStore. +func (mr *MockTaskManagerMockRecorder) LoadOrStore(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrStore", reflect.TypeOf((*MockTaskManager)(nil).LoadOrStore), arg0) +} + +// RunGC mocks base method. +func (m *MockTaskManager) RunGC() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunGC") + ret0, _ := ret[0].(error) + return ret0 +} + +// RunGC indicates an expected call of RunGC. +func (mr *MockTaskManagerMockRecorder) RunGC() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunGC", reflect.TypeOf((*MockTaskManager)(nil).RunGC)) +} + +// Store mocks base method. +func (m *MockTaskManager) Store(arg0 *Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Store", arg0) +} + +// Store indicates an expected call of Store. +func (mr *MockTaskManagerMockRecorder) Store(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Store", reflect.TypeOf((*MockTaskManager)(nil).Store), arg0) +} diff --git a/scheduler/resource/task_manager_test.go b/scheduler/resource/task_manager_test.go new file mode 100644 index 00000000000..49e65141200 --- /dev/null +++ b/scheduler/resource/task_manager_test.go @@ -0,0 +1,376 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "errors" + "reflect" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/gc" + "d7y.io/dragonfly/v2/scheduler/config" +) + +var ( + mockTaskGCConfig = &config.GCConfig{ + TaskGCInterval: 1 * time.Second, + TaskTTL: 1 * time.Microsecond, + } +) + +func TestTaskManager_newTaskManager(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, taskManager TaskManager, err error) + }{ + { + name: "new task manager", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, err error) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(taskManager).Elem().Name(), "taskManager") + }, + }, + { + name: "new task manager failed because of gc error", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(errors.New("foo")).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + taskManager, err := newTaskManager(mockTaskGCConfig, gc) + tc.expect(t, taskManager, err) + }) + } +} + +func TestTaskManager_Load(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, taskManager TaskManager, mockTask *Task) + }{ + { + name: "load task", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + taskManager.Store(mockTask) + task, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + { + name: "task does not exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + _, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + mockTask.ID = "" + taskManager.Store(mockTask) + task, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + taskManager, err := newTaskManager(mockTaskGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, taskManager, mockTask) + }) + } +} + +func TestTaskManager_Store(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, taskManager TaskManager, mockTask *Task) + }{ + { + name: "store task", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + taskManager.Store(mockTask) + task, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + { + name: "store key is empty", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + mockTask.ID = "" + taskManager.Store(mockTask) + task, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + taskManager, err := newTaskManager(mockTaskGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, taskManager, mockTask) + }) + } +} + +func TestTaskManager_LoadOrStore(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, taskManager TaskManager, mockTask *Task) + }{ + { + name: "load task exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + taskManager.Store(mockTask) + task, ok := taskManager.LoadOrStore(mockTask) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + { + name: "load task does not exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + task, ok := taskManager.LoadOrStore(mockTask) + assert.Equal(ok, false) + assert.Equal(task.ID, mockTask.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + taskManager, err := newTaskManager(mockTaskGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, taskManager, mockTask) + }) + } +} + +func TestTaskManager_Delete(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, taskManager TaskManager, mockTask *Task) + }{ + { + name: "delete task", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + taskManager.Store(mockTask) + taskManager.Delete(mockTask.ID) + _, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, false) + }, + }, + { + name: "delete key does not exist", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task) { + assert := assert.New(t) + mockTask.ID = "" + taskManager.Store(mockTask) + taskManager.Delete(mockTask.ID) + _, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + taskManager, err := newTaskManager(mockTaskGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, taskManager, mockTask) + }) + } +} + +func TestTaskManager_RunGC(t *testing.T) { + tests := []struct { + name string + mock func(m *gc.MockGCMockRecorder) + expect func(t *testing.T, taskManager TaskManager, mockTask *Task, mockPeer *Peer) + }{ + { + name: "task reclaimed", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task, mockPeer *Peer) { + assert := assert.New(t) + taskManager.Store(mockTask) + err := taskManager.RunGC() + assert.NoError(err) + + _, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, false) + }, + }, + { + name: "task has peers", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task, mockPeer *Peer) { + assert := assert.New(t) + taskManager.Store(mockTask) + mockTask.StorePeer(mockPeer) + err := taskManager.RunGC() + assert.NoError(err) + + task, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + { + name: "task state is TaskStateRunning", + mock: func(m *gc.MockGCMockRecorder) { + m.Add(gomock.Any()).Return(nil).Times(1) + }, + expect: func(t *testing.T, taskManager TaskManager, mockTask *Task, mockPeer *Peer) { + assert := assert.New(t) + taskManager.Store(mockTask) + mockTask.FSM.SetState(TaskStateRunning) + err := taskManager.RunGC() + assert.NoError(err) + + task, ok := taskManager.Load(mockTask.ID) + assert.Equal(ok, true) + assert.Equal(task.ID, mockTask.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + gc := gc.NewMockGC(ctl) + tc.mock(gc.EXPECT()) + + mockHost := NewHost(mockRawHost) + mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, mockTask, mockHost) + taskManager, err := newTaskManager(mockTaskGCConfig, gc) + if err != nil { + t.Fatal(err) + } + + tc.expect(t, taskManager, mockTask, mockPeer) + }) + } +} diff --git a/scheduler/resource/task_test.go b/scheduler/resource/task_test.go new file mode 100644 index 00000000000..8fef6a30cb2 --- /dev/null +++ b/scheduler/resource/task_test.go @@ -0,0 +1,688 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resource + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" +) + +var ( + mockTaskURLMeta = &base.UrlMeta{ + Digest: "digest", + Tag: "tag", + Range: "range", + Filter: "filter", + Header: map[string]string{ + "content-length": "100", + }, + } + mockTaskURL = "http://example.com/foo" + mockTaskBackToSourceLimit = 200 + mockTaskID = idgen.TaskID(mockTaskURL, mockTaskURLMeta) + mockPieceInfo = &base.PieceInfo{ + PieceNum: 1, + RangeStart: 0, + RangeSize: 100, + PieceMd5: "ad83a945518a4ef007d8b2db2ef165b3", + PieceOffset: 10, + } +) + +func TestTask_NewTask(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + expect func(t *testing.T, task *Task) + }{ + { + name: "new task", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + assert.Equal(task.ID, mockTaskID) + assert.Equal(task.URL, mockTaskURL) + assert.EqualValues(task.URLMeta, mockTaskURLMeta) + assert.Empty(task.DirectPiece) + assert.Equal(task.ContentLength.Load(), int64(0)) + assert.Equal(task.TotalPieceCount.Load(), int32(0)) + assert.Equal(task.BackToSourceLimit.Load(), int32(200)) + assert.Equal(task.BackToSourcePeers.Len(), uint(0)) + assert.Equal(task.FSM.Current(), TaskStatePending) + assert.Empty(task.Pieces) + assert.Empty(task.Peers) + assert.NotEqual(task.CreateAt.Load(), 0) + assert.NotEqual(task.UpdateAt.Load(), 0) + assert.NotNil(task.Log) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta)) + }) + } +} + +func TestTask_LoadPeer(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + peerID string + expect func(t *testing.T, peer *Peer, ok bool) + }{ + { + name: "load peer", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: mockPeerID, + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + { + name: "peer does not exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: idgen.PeerID("0.0.0.0"), + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + { + name: "load key is empty", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: "", + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + mockPeer := NewPeer(mockPeerID, task, mockHost) + + task.StorePeer(mockPeer) + peer, ok := task.LoadPeer(tc.peerID) + tc.expect(t, peer, ok) + }) + } +} + +func TestTask_StorePeer(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + peerID string + expect func(t *testing.T, peer *Peer, ok bool) + }{ + { + name: "store peer", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: mockPeerID, + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + { + name: "store key is empty", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: "", + expect: func(t *testing.T, peer *Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(peer.ID, "") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + mockPeer := NewPeer(tc.peerID, task, mockHost) + + task.StorePeer(mockPeer) + peer, ok := task.LoadPeer(tc.peerID) + tc.expect(t, peer, ok) + }) + } +} + +func TestTask_LoadOrStorePeer(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + peerID string + expect func(t *testing.T, task *Task, mockPeer *Peer) + }{ + { + name: "load peer exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: mockPeerID, + expect: func(t *testing.T, task *Task, mockPeer *Peer) { + assert := assert.New(t) + peer, ok := task.LoadOrStorePeer(mockPeer) + + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + { + name: "load peer does not exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: mockPeerID, + expect: func(t *testing.T, task *Task, mockPeer *Peer) { + assert := assert.New(t) + mockPeer.ID = idgen.PeerID("0.0.0.0") + peer, ok := task.LoadOrStorePeer(mockPeer) + + assert.Equal(ok, false) + assert.Equal(peer.ID, mockPeer.ID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + mockPeer := NewPeer(mockPeerID, task, mockHost) + + task.StorePeer(mockPeer) + tc.expect(t, task, mockPeer) + }) + } +} + +func TestTask_DeletePeer(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + peerID string + expect func(t *testing.T, task *Task) + }{ + { + name: "delete peer", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: mockPeerID, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + _, ok := task.LoadPeer(mockPeerID) + assert.Equal(ok, false) + }, + }, + { + name: "delete key is empty", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + peerID: "", + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + peer, ok := task.LoadPeer(mockPeerID) + assert.Equal(ok, true) + assert.Equal(peer.ID, mockPeerID) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + mockPeer := NewPeer(mockPeerID, task, mockHost) + + task.StorePeer(mockPeer) + task.DeletePeer(tc.peerID) + tc.expect(t, task) + }) + } +} + +func TestTask_LenPeers(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + expect func(t *testing.T, task *Task, mockPeer *Peer) + }{ + { + name: "len peers", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + expect: func(t *testing.T, task *Task, mockPeer *Peer) { + assert := assert.New(t) + task.StorePeer(mockPeer) + mockPeer.ID = idgen.PeerID("0.0.0.0") + task.StorePeer(mockPeer) + assert.Equal(task.LenPeers(), 2) + task.StorePeer(mockPeer) + assert.Equal(task.LenPeers(), 2) + }, + }, + { + name: "peer does not exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + expect: func(t *testing.T, task *Task, mockPeer *Peer) { + assert := assert.New(t) + assert.Equal(task.LenPeers(), 0) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := NewHost(mockRawHost) + task := NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := NewPeer(mockPeerID, task, mockHost) + + tc.expect(t, task, mockPeer) + }) + } +} + +func TestTask_LoadPiece(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + pieceInfo *base.PieceInfo + pieceNum int32 + expect func(t *testing.T, piece *base.PieceInfo, ok bool) + }{ + { + name: "load piece", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: mockPieceInfo.PieceNum, + expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(piece.PieceNum, mockPieceInfo.PieceNum) + }, + }, + { + name: "piece does not exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: 2, + expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + { + name: "load key is zero", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: 0, + expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + + task.StorePiece(tc.pieceInfo) + piece, ok := task.LoadPiece(tc.pieceNum) + tc.expect(t, piece, ok) + }) + } +} + +func TestTask_StorePiece(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + pieceInfo *base.PieceInfo + pieceNum int32 + expect func(t *testing.T, piece *base.PieceInfo, ok bool) + }{ + { + name: "store piece", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: mockPieceInfo.PieceNum, + expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(piece.PieceNum, mockPieceInfo.PieceNum) + }, + }, + { + name: "store key is empty", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: 0, + expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + assert := assert.New(t) + assert.Equal(ok, true) + assert.Equal(piece.PieceNum, int32(0)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + + tc.pieceInfo.PieceNum = tc.pieceNum + task.StorePiece(tc.pieceInfo) + piece, ok := task.LoadPiece(tc.pieceNum) + tc.expect(t, piece, ok) + }) + } +} + +func TestTask_LoadOrStorePiece(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + pieceInfo *base.PieceInfo + pieceNum int32 + expect func(t *testing.T, task *Task, mockPiece *base.PieceInfo) + }{ + { + name: "load piece exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: mockPieceInfo.PieceNum, + expect: func(t *testing.T, task *Task, mockPiece *base.PieceInfo) { + assert := assert.New(t) + peer, ok := task.LoadOrStorePiece(mockPiece) + + assert.Equal(ok, true) + assert.Equal(peer.PieceNum, mockPiece.PieceNum) + }, + }, + { + name: "load piece does not exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: mockPieceInfo.PieceNum, + expect: func(t *testing.T, task *Task, mockPiece *base.PieceInfo) { + assert := assert.New(t) + mockPiece.PieceNum = 2 + peer, ok := task.LoadOrStorePiece(mockPiece) + + assert.Equal(ok, false) + assert.Equal(peer.PieceNum, mockPiece.PieceNum) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + + task.StorePiece(tc.pieceInfo) + tc.expect(t, task, tc.pieceInfo) + }) + } +} + +func TestTask_DeletePiece(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + pieceInfo *base.PieceInfo + pieceNum int32 + expect func(t *testing.T, task *Task) + }{ + { + name: "delete piece", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: mockPieceInfo.PieceNum, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + _, ok := task.LoadPiece(mockPieceInfo.PieceNum) + assert.Equal(ok, false) + }, + }, + { + name: "delete key does not exist", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + pieceInfo: mockPieceInfo, + pieceNum: 0, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + piece, ok := task.LoadPiece(mockPieceInfo.PieceNum) + assert.Equal(ok, true) + assert.Equal(piece.PieceNum, mockPieceInfo.PieceNum) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + + task.StorePiece(tc.pieceInfo) + task.DeletePiece(tc.pieceNum) + tc.expect(t, task) + }) + } +} + +func TestTask_SizeScope(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + contentLength int64 + totalPieceCount int32 + expect func(t *testing.T, task *Task) + }{ + { + name: "scope size is tiny", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + contentLength: TinyFileSize, + totalPieceCount: 1, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + assert.Equal(task.SizeScope(), base.SizeScope_TINY) + }, + }, + { + name: "scope size is small", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + contentLength: TinyFileSize + 1, + totalPieceCount: 1, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + assert.Equal(task.SizeScope(), base.SizeScope_SMALL) + }, + }, + { + name: "scope size is normal", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: mockTaskBackToSourceLimit, + contentLength: TinyFileSize + 1, + totalPieceCount: 2, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + assert.Equal(task.SizeScope(), base.SizeScope_NORMAL) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + task.ContentLength.Store(tc.contentLength) + task.TotalPieceCount.Store(tc.totalPieceCount) + tc.expect(t, task) + }) + } +} + +func TestTask_CanBackToSource(t *testing.T) { + tests := []struct { + name string + id string + urlMeta *base.UrlMeta + url string + backToSourceLimit int + expect func(t *testing.T, task *Task) + }{ + { + name: "task can back-to-source", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: 1, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + assert.Equal(task.CanBackToSource(), true) + }, + }, + { + name: "task can not base-to-source", + id: mockTaskID, + urlMeta: mockTaskURLMeta, + url: mockTaskURL, + backToSourceLimit: 0, + expect: func(t *testing.T, task *Task) { + assert := assert.New(t) + assert.Equal(task.CanBackToSource(), false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + task := NewTask(tc.id, tc.url, tc.backToSourceLimit, tc.urlMeta) + tc.expect(t, task) + }) + } +} diff --git a/scheduler/rpcserver/rpcserver.go b/scheduler/rpcserver/rpcserver.go index 54c8968ac35..1b23adab475 100644 --- a/scheduler/rpcserver/rpcserver.go +++ b/scheduler/rpcserver/rpcserver.go @@ -21,102 +21,99 @@ import ( "fmt" "io" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/container/set" "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/scheduler" schedulerserver "d7y.io/dragonfly/v2/pkg/rpc/scheduler/server" - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/core" - "d7y.io/dragonfly/v2/scheduler/supervisor" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/service" ) -var tracer = otel.Tracer("scheduler-server") - -type server struct { +type Server struct { *grpc.Server - service *core.SchedulerService + service service.Service } // New returns a new transparent scheduler server from the given options -func New(service *core.SchedulerService, opts ...grpc.ServerOption) (*grpc.Server, error) { - svr := &server{ +func New(service service.Service, opts ...grpc.ServerOption) *Server { + svr := &Server{ service: service, } svr.Server = schedulerserver.New(svr, opts...) - return svr.Server, nil + return svr } -func (s *server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) { - taskID := idgen.TaskID(req.Url, req.UrlMeta) - log := logger.WithTaskAndPeerID(taskID, req.PeerId) - log.Infof("register peer task, req: %#v", req) - - var span trace.Span - ctx, span = tracer.Start(ctx, config.SpanPeerRegister, trace.WithSpanKind(trace.SpanKindServer)) - defer span.End() - span.SetAttributes(config.AttributePeerRegisterRequest.String(req.String())) - span.SetAttributes(config.AttributeTaskID.String(taskID)) - +func (s *Server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) { // Get task or add new task - task := s.service.GetOrAddTask(ctx, supervisor.NewTask(taskID, req.Url, req.UrlMeta)) - if task.IsFail() { - dferr := dferrors.New(base.Code_SchedTaskStatusError, "task status is fail") - log.Error(dferr.Message) - span.RecordError(dferr) + task, err := s.service.RegisterTask(ctx, req) + if err != nil { + dferr := dferrors.New(base.Code_SchedTaskStatusError, "register task is fail") + logger.Errorf("peer %s register is failed: %v", req.PeerId, err) return nil, dferr } + log := logger.WithTaskAndPeerID(task.ID, req.PeerId) + log.Infof("register peer task request: %#v", req) + // Task has been successful - if task.IsSuccess() { + if task.FSM.Is(resource.TaskStateSucceeded) { log.Info("task has been successful") - sizeScope := task.GetSizeScope() - span.SetAttributes(config.AttributeTaskSizeScope.String(sizeScope.String())) + sizeScope := task.SizeScope() switch sizeScope { case base.SizeScope_TINY: - // when task.DirectPiece length is 0, data is downloaded by common peers, is not cdn + log.Info("task size scope is tiny and return piece content directly") + // When task.DirectPiece length is 0, data is downloaded by common peers failed if int64(len(task.DirectPiece)) == task.ContentLength.Load() { - log.Info("task size scope is tiny and return piece content directly") return &scheduler.RegisterResult{ - TaskId: taskID, + TaskId: task.ID, SizeScope: sizeScope, DirectPiece: &scheduler.RegisterResult_PieceContent{ PieceContent: task.DirectPiece, }, }, nil } - // fallback to base.SizeScope_SMALL - log.Warnf("task size scope is tiny, but task.DirectPiece length is %d, not %d", + + // Fallback to base.SizeScope_SMALL + log.Warnf("task size scope is tiny, but task.DirectPiece length is %d, not %d. fall through to size scope small", len(task.DirectPiece), task.ContentLength.Load()) fallthrough case base.SizeScope_SMALL: log.Info("task size scope is small") - peer := s.service.RegisterTask(req, task) - parent, err := s.service.SelectParent(peer) - if err != nil { + host, _ := s.service.LoadOrStoreHost(ctx, req) + peer, _ := s.service.LoadOrStorePeer(ctx, req, task, host) + + // If the file is registered as a small type, + // there is no need to build a tree, just find the parent and return + parent, ok := s.service.Scheduler().FindParent(ctx, peer, set.NewSafeSet()) + if !ok { log.Warn("task size scope is small and it can not select parent") - span.AddEvent(config.EventSmallTaskSelectParentFail) return &scheduler.RegisterResult{ - TaskId: taskID, + TaskId: task.ID, SizeScope: sizeScope, }, nil } - firstPiece, ok := task.GetPiece(0) + firstPiece, ok := task.LoadPiece(0) if !ok { log.Warn("task size scope is small and it can not get first piece") return &scheduler.RegisterResult{ - TaskId: taskID, + TaskId: task.ID, SizeScope: sizeScope, }, nil } + peer.ReplaceParent(parent) + if err := peer.FSM.Event(resource.PeerEventRegisterSmall); err != nil { + dferr := dferrors.New(base.Code_SchedError, err.Error()) + log.Errorf("peer %s register is failed: %v", req.PeerId, err) + return nil, dferr + } + singlePiece := &scheduler.SinglePiece{ DstPid: parent.ID, DstAddr: fmt.Sprintf("%s:%d", parent.Host.IP, parent.Host.DownloadPort), @@ -129,10 +126,10 @@ func (s *server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRe PieceStyle: firstPiece.PieceStyle, }, } - log.Infof("task size scope is small and return single piece %#v", sizeScope) - span.SetAttributes(config.AttributeSinglePiece.String(singlePiece.String())) + + log.Infof("task size scope is small and return single piece: %#v %#v", singlePiece, singlePiece.PieceInfo) return &scheduler.RegisterResult{ - TaskId: taskID, + TaskId: task.ID, SizeScope: sizeScope, DirectPiece: &scheduler.RegisterResult_SinglePiece{ SinglePiece: singlePiece, @@ -140,9 +137,16 @@ func (s *server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRe }, nil default: log.Info("task size scope is normal and needs to be register") - s.service.RegisterTask(req, task) + host, _ := s.service.LoadOrStoreHost(ctx, req) + peer, _ := s.service.LoadOrStorePeer(ctx, req, task, host) + if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { + dferr := dferrors.New(base.Code_SchedError, err.Error()) + log.Errorf("peer %s register is failed: %v", req.PeerId, err) + return nil, dferr + } + return &scheduler.RegisterResult{ - TaskId: taskID, + TaskId: task.ID, SizeScope: sizeScope, }, nil } @@ -150,101 +154,91 @@ func (s *server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRe // Task is unsuccessful log.Info("task is unsuccessful and needs to be register") - s.service.RegisterTask(req, task) + host, _ := s.service.LoadOrStoreHost(ctx, req) + peer, _ := s.service.LoadOrStorePeer(ctx, req, task, host) + if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { + dferr := dferrors.New(base.Code_SchedError, err.Error()) + log.Errorf("peer %s register is failed: %v", req.PeerId, err) + return nil, dferr + } + return &scheduler.RegisterResult{ - TaskId: taskID, + TaskId: task.ID, SizeScope: base.SizeScope_NORMAL, }, nil } -func (s *server) ReportPieceResult(stream scheduler.Scheduler_ReportPieceResultServer) error { - var span trace.Span - ctx, span := tracer.Start(stream.Context(), config.SpanReportPieceResult, trace.WithSpanKind(trace.SpanKindServer)) - defer span.End() - pieceResult, err := stream.Recv() +func (s *Server) ReportPieceResult(stream scheduler.Scheduler_ReportPieceResultServer) error { + ctx := stream.Context() + + // Handle begin of piece + beginOfPiece, err := stream.Recv() if err != nil { if err == io.EOF { return nil } - err = dferrors.Newf(base.Code_SchedPeerPieceResultReportFail, "receive an error from peer stream: %v", err) - span.RecordError(err) + logger.Errorf("receive error: %v", err) return err } - logger.Debugf("peer %s start report piece result", pieceResult.SrcPid) - peer, ok := s.service.GetPeer(pieceResult.SrcPid) + // Get peer from peer manager + peer, ok := s.service.LoadPeer(beginOfPiece.SrcPid) if !ok { - err = dferrors.Newf(base.Code_SchedPeerNotFound, "peer %s not found", pieceResult.SrcPid) - span.RecordError(err) - return err + dferr := dferrors.Newf(base.Code_SchedPeerNotFound, "peer %s not found", beginOfPiece.SrcPid) + logger.Errorf("peer %s not found", beginOfPiece.SrcPid) + return dferr } - if peer.Task.IsFail() { - err = dferrors.Newf(base.Code_SchedTaskStatusError, "peer's task status is fail, task status %s", peer.Task.GetStatus()) - span.RecordError(err) - return err - } + // Peer setting stream + peer.StoreStream(stream) + + // Handle begin of piece + s.service.HandlePiece(ctx, peer, beginOfPiece) - conn, ok := peer.BindNewConn(stream) - if !ok { - err = dferrors.Newf(base.Code_SchedPeerPieceResultReportFail, "peer can not bind conn") - span.RecordError(err) - return err - } - logger.Infof("peer %s is connected", peer.ID) - - defer func() { - logger.Infof("peer %s is disconnect: %v", peer.ID, conn.Error()) - span.RecordError(conn.Error()) - }() - if err := s.service.HandlePieceResult(ctx, peer, pieceResult); err != nil { - logger.Errorf("peer %s handle piece result %v fail: %v", peer.ID, pieceResult, err) - } for { select { - case <-conn.Done(): - return conn.Error() - case piece := <-conn.Receiver(): - if piece == nil { - logger.Infof("peer %s channel has been closed", peer.ID) - continue - } - if err := s.service.HandlePieceResult(ctx, peer, piece); err != nil { - logger.Errorf("peer %s handle piece result %v fail: %v", peer.ID, piece, err) + case <-ctx.Done(): + peer.Log.Infof("context was done") + return ctx.Err() + case dferr := <-peer.StopChannel: + peer.Log.Errorf("stream stop dferror: %v", dferr) + return dferr + default: + } + + piece, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil } + peer.Log.Errorf("receive error: %v", err) + return err } + + s.service.HandlePiece(ctx, peer, piece) } } -func (s *server) ReportPeerResult(ctx context.Context, result *scheduler.PeerResult) (err error) { - logger.Debugf("report peer result %v", result) - var span trace.Span - ctx, span = tracer.Start(ctx, config.SpanReportPeerResult, trace.WithSpanKind(trace.SpanKindServer)) - defer span.End() - span.SetAttributes(config.AttributeReportPeerID.String(result.PeerId)) - span.SetAttributes(config.AttributePeerDownloadSuccess.Bool(result.Success)) - span.SetAttributes(config.AttributePeerDownloadResult.String(result.String())) - peer, ok := s.service.GetPeer(result.PeerId) +func (s *Server) ReportPeerResult(ctx context.Context, req *scheduler.PeerResult) (err error) { + peer, ok := s.service.LoadPeer(req.PeerId) if !ok { - logger.Warnf("report peer result: peer %s is not exists", result.PeerId) - err = dferrors.Newf(base.Code_SchedPeerNotFound, "peer %s not found", result.PeerId) - span.RecordError(err) - return err + logger.Errorf("report peer result: peer %s is not exists", req.PeerId) + return dferrors.Newf(base.Code_SchedPeerNotFound, "peer %s not found", req.PeerId) } - return s.service.HandlePeerResult(ctx, peer, result) + + peer.Log.Infof("report peer result request: %#v", req) + s.service.HandlePeer(ctx, peer, req) + return nil } -func (s *server) LeaveTask(ctx context.Context, target *scheduler.PeerTarget) (err error) { - logger.Debugf("leave task %v", target) - var span trace.Span - ctx, span = tracer.Start(ctx, config.SpanPeerLeave, trace.WithSpanKind(trace.SpanKindServer)) - defer span.End() - span.SetAttributes(config.AttributeLeavePeerID.String(target.PeerId)) - span.SetAttributes(config.AttributeLeaveTaskID.String(target.TaskId)) - peer, ok := s.service.GetPeer(target.PeerId) +func (s *Server) LeaveTask(ctx context.Context, req *scheduler.PeerTarget) (err error) { + peer, ok := s.service.LoadPeer(req.PeerId) if !ok { - logger.Warnf("leave task: peer %s is not exists", target.PeerId) - return + logger.Errorf("leave task: peer %s is not exists", req.PeerId) + return dferrors.Newf(base.Code_SchedPeerNotFound, "peer %s not found", req.PeerId) } - return s.service.HandleLeaveTask(ctx, peer) + + peer.Log.Infof("leave task request: %#v", req) + s.service.HandlePeerLeave(ctx, peer) + return nil } diff --git a/scheduler/rpcserver/rpcserver_test.go b/scheduler/rpcserver/rpcserver_test.go new file mode 100644 index 00000000000..97952a589a2 --- /dev/null +++ b/scheduler/rpcserver/rpcserver_test.go @@ -0,0 +1,622 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rpcserver + +import ( + "context" + "errors" + "fmt" + "io" + "reflect" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/internal/dferrors" + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + rpcschedulermocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler" + schedulermocks "d7y.io/dragonfly/v2/scheduler/scheduler/mocks" + "d7y.io/dragonfly/v2/scheduler/service/mocks" +) + +var ( + mockRawHost = &rpcscheduler.PeerHost{ + Uuid: idgen.HostID("hostname", 8003), + Ip: "127.0.0.1", + RpcPort: 8003, + DownPort: 8001, + HostName: "hostname", + SecurityDomain: "security_domain", + Location: "location", + Idc: "idc", + NetTopology: "net_topology", + } + mockTaskURLMeta = &base.UrlMeta{ + Digest: "digest", + Tag: "tag", + Range: "range", + Filter: "filter", + Header: map[string]string{ + "content-length": "100", + }, + } + mockTaskURL = "http://example.com/foo" + mockTaskBackToSourceLimit = 200 + mockTaskID = idgen.TaskID(mockTaskURL, mockTaskURLMeta) + mockPeerID = idgen.PeerID("127.0.0.1") +) + +func TestRPCServer_New(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, s interface{}) + }{ + { + name: "new server", + expect: func(t *testing.T, s interface{}) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(s).Elem().Name(), "Server") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + svc := mocks.NewMockService(ctl) + svr := New(svc) + tc.expect(t, svr) + }) + } +} + +func TestRPCServer_RegisterPeerTask(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerTaskRequest + mock func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) + expect func(t *testing.T, result *rpcscheduler.RegisterResult, err error) + }{ + { + name: "service register failed", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + ms.RegisterTask(context.Background(), req).Return(nil, errors.New("foo")) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedTaskStatusError) + }, + }, + { + name: "task state is TaskStatePending", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStatePending) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + }, + }, + { + name: "task state is TaskStatePending and peer state is PeerStateFailed", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStatePending) + mockPeer.FSM.SetState(resource.PeerStateFailed) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedError) + }, + }, + { + name: "task state is TaskStateRunning", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateRunning) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + }, + }, + { + name: "task state is TaskStateRunning and peer state is PeerStateFailed", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateRunning) + mockPeer.FSM.SetState(resource.PeerStateFailed) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedError) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_TINY", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(1) + mockTask.DirectPiece = []byte{1} + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_TINY) + assert.Equal(result.DirectPiece, &rpcscheduler.RegisterResult_PieceContent{ + PieceContent: []byte{1}, + }) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_TINY, but piece data error and find parent failed", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(2) + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ms.Scheduler().Return(scheduler).Times(1), + msched.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, false).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_TINY) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_SMALL, but find parent failed", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(resource.TinyFileSize + 1) + mockTask.TotalPieceCount.Store(1) + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ms.Scheduler().Return(scheduler).Times(1), + msched.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, false).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_SMALL) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_SMALL, but can not find piece info", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(resource.TinyFileSize + 1) + mockTask.TotalPieceCount.Store(1) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ms.Scheduler().Return(scheduler).Times(1), + msched.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_SMALL) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_SMALL", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(resource.TinyFileSize + 1) + mockTask.TotalPieceCount.Store(1) + mockTask.StorePiece(&base.PieceInfo{ + PieceNum: 0, + }) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ms.Scheduler().Return(scheduler).Times(1), + msched.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_SMALL) + assert.EqualValues(result.DirectPiece, &rpcscheduler.RegisterResult_SinglePiece{ + SinglePiece: &rpcscheduler.SinglePiece{ + DstPid: mockPeerID, + DstAddr: fmt.Sprintf("%s:%d", mockRawHost.Ip, mockRawHost.DownPort), + PieceInfo: &base.PieceInfo{ + PieceNum: 0, + }, + }, + }) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_NORMAL", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(resource.TinyFileSize + 1) + mockTask.TotalPieceCount.Store(2) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + assert.Equal(result.TaskId, mockTaskID) + assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + }, + }, + { + name: "task state is TaskStateSucceeded and sizeScope is SizeScope_NORMAL, but peer state is PeerStateFailed", + req: &rpcscheduler.PeerTaskRequest{}, + mock: func(req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, scheduler scheduler.Scheduler, ms *mocks.MockServiceMockRecorder, msched *schedulermocks.MockSchedulerMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + mockTask.ContentLength.Store(resource.TinyFileSize + 1) + mockTask.TotalPieceCount.Store(2) + mockPeer.FSM.SetState(resource.PeerStateFailed) + + gomock.InOrder( + ms.RegisterTask(context.Background(), req).Return(mockTask, nil).Times(1), + ms.LoadOrStoreHost(context.Background(), req).Return(mockHost, true).Times(1), + ms.LoadOrStorePeer(context.Background(), req, gomock.Any(), gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, result *rpcscheduler.RegisterResult, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedError) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + svc := mocks.NewMockService(ctl) + scheduler := schedulermocks.NewMockScheduler(ctl) + + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + tc.mock(tc.req, mockPeer, mockHost, mockTask, scheduler, svc.EXPECT(), scheduler.EXPECT()) + svr := New(svc) + result, err := svr.RegisterPeerTask(context.Background(), tc.req) + tc.expect(t, result, err) + }) + } +} + +func TestRPCServer_ReportPieceResult(t *testing.T) { + tests := []struct { + name string + mock func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) + expect func(t *testing.T, mockPeer *resource.Peer, err error) + }{ + { + name: "receive begin of piece failed", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + gomock.InOrder( + mstream.Context().Return(context.Background()).Times(1), + mstream.Recv().Return(nil, errors.New("foo")).Times(1), + ) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "receive begin of piece failed because of io EOF", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + gomock.InOrder( + mstream.Context().Return(context.Background()).Times(1), + mstream.Recv().Return(nil, io.EOF).Times(1), + ) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + assert.NoError(err) + }, + }, + { + name: "peer not found", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + gomock.InOrder( + mstream.Context().Return(context.Background()).Times(1), + mstream.Recv().Return(&rpcscheduler.PieceResult{ + SrcPid: mockPeerID, + }, nil).Times(1), + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(nil, false).Times(1), + ) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedPeerNotFound) + }, + }, + { + name: "context canceled", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + gomock.InOrder( + mstream.Context().Return(ctx).Times(1), + mstream.Recv().Return(&rpcscheduler.PieceResult{ + SrcPid: mockPeerID, + }, nil).Times(1), + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ms.HandlePiece(gomock.Any(), gomock.Any(), gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + assert.EqualError(err, "context canceled") + }, + }, + { + name: "stream stop with dferr", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + gomock.InOrder( + mstream.Context().Return(context.Background()).Times(1), + mstream.Recv().Return(&rpcscheduler.PieceResult{ + SrcPid: mockPeerID, + }, nil).Times(1), + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ms.HandlePiece(gomock.Any(), gomock.Any(), gomock.Any()).Return().Times(1), + ) + mockPeer.StopStream(dferrors.New(base.Code_SchedError, "")) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedError) + }, + }, + { + name: "receive piece failed", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + gomock.InOrder( + mstream.Context().Return(context.Background()).Times(1), + mstream.Recv().Return(&rpcscheduler.PieceResult{ + SrcPid: mockPeerID, + }, nil).Times(1), + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ms.HandlePiece(gomock.Any(), gomock.Any(), gomock.Any()).Return().Times(1), + mstream.Recv().Return(nil, errors.New("foo")).Times(1), + ) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + assert.EqualError(err, "foo") + }, + }, + { + name: "receive piece failed because of io EOF", + mock: func(mockPeer *resource.Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockServiceMockRecorder, mstream *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + gomock.InOrder( + mstream.Context().Return(context.Background()).Times(1), + mstream.Recv().Return(&rpcscheduler.PieceResult{ + SrcPid: mockPeerID, + }, nil).Times(1), + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ms.HandlePiece(gomock.Any(), gomock.Any(), gomock.Any()).Return().Times(1), + mstream.Recv().Return(nil, io.EOF).Times(1), + ) + }, + expect: func(t *testing.T, mockPeer *resource.Peer, err error) { + assert := assert.New(t) + assert.NoError(err) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + svc := mocks.NewMockService(ctl) + stream := rpcschedulermocks.NewMockScheduler_ReportPieceResultServer(ctl) + + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + mockPeer.StoreStream(stream) + tc.mock(mockPeer, stream, svc.EXPECT(), stream.EXPECT()) + svr := New(svc) + tc.expect(t, mockPeer, svr.ReportPieceResult(stream)) + }) + } +} + +func TestRPCServer_ReportPeerResult(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerResult + mock func(mockPeer *resource.Peer, ms *mocks.MockServiceMockRecorder) + expect func(t *testing.T, err error) + }{ + { + name: "peer not found", + req: &rpcscheduler.PeerResult{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, ms *mocks.MockServiceMockRecorder) { + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(nil, false).Times(1) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedPeerNotFound) + }, + }, + { + name: "report peer success", + req: &rpcscheduler.PeerResult{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, ms *mocks.MockServiceMockRecorder) { + gomock.InOrder( + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ms.HandlePeer(gomock.Any(), gomock.Eq(mockPeer), gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.NoError(err) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + svc := mocks.NewMockService(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer, svc.EXPECT()) + svr := New(svc) + tc.expect(t, svr.ReportPeerResult(context.Background(), tc.req)) + }) + } +} + +func TestRPCServer_LeaveTask(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerTarget + mock func(mockPeer *resource.Peer, ms *mocks.MockServiceMockRecorder) + expect func(t *testing.T, err error) + }{ + { + name: "peer not found", + req: &rpcscheduler.PeerTarget{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, ms *mocks.MockServiceMockRecorder) { + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(nil, false).Times(1) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + dferr, ok := err.(*dferrors.DfError) + assert.True(ok) + assert.Equal(dferr.Code, base.Code_SchedPeerNotFound) + }, + }, + { + name: "peer leave", + req: &rpcscheduler.PeerTarget{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, ms *mocks.MockServiceMockRecorder) { + gomock.InOrder( + ms.LoadPeer(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ms.HandlePeerLeave(gomock.Any(), gomock.Eq(mockPeer)).Return().Times(1), + ) + }, + expect: func(t *testing.T, err error) { + assert := assert.New(t) + assert.NoError(err) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + svc := mocks.NewMockService(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer, svc.EXPECT()) + svr := New(svc) + tc.expect(t, svr.LeaveTask(context.Background(), tc.req)) + }) + } +} diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index fa8af51f70d..cedf1399b98 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -18,25 +18,27 @@ package scheduler import ( "context" + "fmt" + "net" "net/http" "time" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "golang.org/x/net/netutil" "google.golang.org/grpc" - "d7y.io/dragonfly/v2/cmd/dependency" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/internal/dynconfig" "d7y.io/dragonfly/v2/pkg/dfpath" "d7y.io/dragonfly/v2/pkg/gc" - "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/manager" + rpcmanager "d7y.io/dragonfly/v2/pkg/rpc/manager" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/core" "d7y.io/dragonfly/v2/scheduler/job" "d7y.io/dragonfly/v2/scheduler/metrics" + "d7y.io/dragonfly/v2/scheduler/resource" "d7y.io/dragonfly/v2/scheduler/rpcserver" + "d7y.io/dragonfly/v2/scheduler/scheduler" + "d7y.io/dragonfly/v2/scheduler/service" ) const ( @@ -53,9 +55,6 @@ type Server struct { // Metrics server metricsServer *http.Server - // Scheduler service - service *core.SchedulerService - // Manager client managerClient managerclient.Client @@ -69,11 +68,11 @@ type Server struct { gc gc.GC } -func New(cfg *config.Config, d dfpath.Dfpath) (*Server, error) { +func New(ctx context.Context, cfg *config.Config, d dfpath.Dfpath) (*Server, error) { s := &Server{config: cfg} // Initialize manager client - if cfg.Manager.Addr != "" { + if cfg.Manager.Enable { managerClient, err := managerclient.New(cfg.Manager.Addr) if err != nil { return nil, err @@ -81,8 +80,8 @@ func New(cfg *config.Config, d dfpath.Dfpath) (*Server, error) { s.managerClient = managerClient // Register to manager - if _, err := s.managerClient.UpdateScheduler(&manager.UpdateSchedulerRequest{ - SourceType: manager.SourceType_SCHEDULER_SOURCE, + if _, err := s.managerClient.UpdateScheduler(&rpcmanager.UpdateSchedulerRequest{ + SourceType: rpcmanager.SourceType_SCHEDULER_SOURCE, HostName: s.config.Server.Host, Ip: s.config.Server.IP, Port: int32(s.config.Server.Port), @@ -90,62 +89,69 @@ func New(cfg *config.Config, d dfpath.Dfpath) (*Server, error) { Location: s.config.Host.Location, SchedulerClusterId: uint64(s.config.Manager.SchedulerClusterID), }); err != nil { - return nil, err + logger.Fatalf("register to manager failed %v", err) } } // Initialize dynconfig client - options := []dynconfig.Option{dynconfig.WithLocalConfigPath(dependency.GetConfigPath("scheduler"))} - if s.managerClient != nil && cfg.DynConfig.Type == dynconfig.ManagerSourceType { - options = append(options, - dynconfig.WithManagerClient(config.NewManagerClient(s.managerClient, cfg)), - dynconfig.WithExpireTime(cfg.DynConfig.ExpireTime), - ) - } - dynConfig, err := config.NewDynconfig(cfg.DynConfig.Type, d.CacheDir(), cfg.DynConfig.CDNDirPath, options...) + dynConfig, err := config.NewDynconfig(s.managerClient, d.CacheDir(), cfg) if err != nil { return nil, err } s.dynconfig = dynConfig // Initialize GC - s.gc = gc.New(gc.WithLogger(logger.MetaGCLogger)) + s.gc = gc.New(gc.WithLogger(logger.GCLogger)) - // Initialize scheduler service - var openTel bool - if cfg.Options.Telemetry.Jaeger != "" { - openTel = true - } - service, err := core.NewSchedulerService(cfg.Scheduler, d.PluginDir(), cfg.Metrics, dynConfig, s.gc, core.WithDisableCDN(cfg.DisableCDN), core.WithOpenTel(openTel)) - if err != nil { - return nil, err - } - s.service = service + // Initialize grpc options + var ( + serverOptions []grpc.ServerOption + dialOptions []grpc.DialOption + ) - // Initialize grpc service - var opts []grpc.ServerOption if s.config.Options.Telemetry.Jaeger != "" { - opts = append(opts, grpc.ChainUnaryInterceptor(otelgrpc.UnaryServerInterceptor()), grpc.ChainStreamInterceptor(otelgrpc.StreamServerInterceptor())) + serverOptions = append( + serverOptions, + grpc.ChainUnaryInterceptor(otelgrpc.UnaryServerInterceptor()), + grpc.ChainStreamInterceptor(otelgrpc.StreamServerInterceptor()), + ) + + dialOptions = append( + dialOptions, + grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), + grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()), + ) } - grpcServer, err := rpcserver.New(s.service, opts...) + + // Initialize resource + resource, err := resource.New(cfg, s.gc, dynConfig, dialOptions...) if err != nil { return nil, err } - s.grpcServer = grpcServer - // Initialize prometheus - if cfg.Metrics != nil { - s.metricsServer = metrics.New(cfg.Metrics, grpcServer) - } + // Initialize scheduler + scheduler := scheduler.New(cfg.Scheduler, d.PluginDir()) + + // Initialize scheduler service + service := service.New(cfg, resource, scheduler, dynConfig) + + // Initialize grpc service + svr := rpcserver.New(service, serverOptions...) + s.grpcServer = svr.Server // Initialize job service - if cfg.Job.Redis.Host != "" { - s.job, err = job.New(context.Background(), cfg.Job, cfg.Manager.SchedulerClusterID, cfg.Server.Host, s.service) + if cfg.Job.Enable { + s.job, err = job.New(cfg, service) if err != nil { return nil, err } } + // Initialize metrics + if cfg.Metrics.Enable { + s.metricsServer = metrics.New(cfg.Metrics, s.grpcServer) + } + return s, nil } @@ -162,20 +168,10 @@ func (s *Server) Serve() error { s.gc.Serve() logger.Info("gc start successfully") - // Serve service - go func() { - s.service.Serve() - logger.Info("scheduler service start successfully") - }() - // Serve Job if s.job != nil { - go func() { - if err := s.job.Serve(); err != nil { - logger.Fatalf("job start failed %v", err) - } - logger.Info("job start successfully") - }() + s.job.Serve() + logger.Info("job start successfully") } // Started metrics server @@ -191,28 +187,29 @@ func (s *Server) Serve() error { }() } - // Serve Keepalive if s.managerClient != nil { + // scheduler keepalive with manager go func() { logger.Info("start keepalive to manager") - s.managerClient.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{ + s.managerClient.KeepAlive(s.config.Manager.KeepAlive.Interval, &rpcmanager.KeepAliveRequest{ HostName: s.config.Server.Host, - SourceType: manager.SourceType_SCHEDULER_SOURCE, + SourceType: rpcmanager.SourceType_SCHEDULER_SOURCE, ClusterId: uint64(s.config.Manager.SchedulerClusterID), }) }() } - // Generate GRPC listener - lis, _, err := rpc.ListenWithPortRange(s.config.Server.IP, s.config.Server.Port, s.config.Server.Port) + // Generate GRPC limit listener + listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", s.config.Server.IP, s.config.Server.Port)) if err != nil { logger.Fatalf("net listener failed to start: %v", err) } - defer lis.Close() + defer listener.Close() + limitListener := netutil.LimitListener(listener, s.config.Server.ListenLimit) // Started GRPC server - logger.Infof("started grpc server at %s://%s", lis.Addr().Network(), lis.Addr().String()) - if err := s.grpcServer.Serve(lis); err != nil { + logger.Infof("started grpc server at %s://%s with max connection %d", limitListener.Addr().Network(), limitListener.Addr().String(), s.config.Server.ListenLimit) + if err := s.grpcServer.Serve(limitListener); err != nil { logger.Errorf("stoped grpc server: %v", err) return err } @@ -239,10 +236,6 @@ func (s *Server) Stop() { s.gc.Stop() logger.Info("gc closed") - // Stop scheduler service - s.service.Stop() - logger.Info("scheduler service closed") - // Stop metrics server if s.metricsServer != nil { if err := s.metricsServer.Shutdown(context.Background()); err != nil { diff --git a/scheduler/core/evaluator/evaluator.go b/scheduler/scheduler/evaluator/evaluator.go similarity index 82% rename from scheduler/core/evaluator/evaluator.go rename to scheduler/scheduler/evaluator/evaluator.go index 2ae8e2bb43c..3ee66282cdc 100644 --- a/scheduler/core/evaluator/evaluator.go +++ b/scheduler/scheduler/evaluator/evaluator.go @@ -17,7 +17,7 @@ package evaluator import ( - "d7y.io/dragonfly/v2/scheduler/supervisor" + "d7y.io/dragonfly/v2/scheduler/resource" ) const ( @@ -33,13 +33,10 @@ const ( type Evaluator interface { // Evaluate todo Normalization - Evaluate(parent *supervisor.Peer, child *supervisor.Peer, taskPieceCount int32) float64 - - // NeedAdjustParent determine whether the peer needs a new parent node - NeedAdjustParent(peer *supervisor.Peer) bool + Evaluate(parent *resource.Peer, child *resource.Peer, taskPieceCount int32) float64 // IsBadNode determine if peer is a failed node - IsBadNode(peer *supervisor.Peer) bool + IsBadNode(peer *resource.Peer) bool } func New(algorithm string, pluginDir string) Evaluator { diff --git a/scheduler/core/evaluator/evaluator_base.go b/scheduler/scheduler/evaluator/evaluator_base.go similarity index 54% rename from scheduler/core/evaluator/evaluator_base.go rename to scheduler/scheduler/evaluator/evaluator_base.go index 66356ec966c..604284b19c8 100644 --- a/scheduler/core/evaluator/evaluator_base.go +++ b/scheduler/scheduler/evaluator/evaluator_base.go @@ -24,15 +24,18 @@ import ( logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/util/mathutils" - "d7y.io/dragonfly/v2/scheduler/supervisor" + "d7y.io/dragonfly/v2/scheduler/resource" ) const ( // Finished piece weight - finishedPieceWeight float64 = 0.4 + finishedPieceWeight float64 = 0.3 // Free load weight - freeLoadWeight = 0.3 + freeLoadWeight = 0.25 + + // host type affinity weight + hostTypeAffinityWeight = 0.15 // IDC affinity weight idcAffinityWeight = 0.15 @@ -72,48 +75,60 @@ func NewEvaluatorBase() Evaluator { } // The larger the value after evaluation, the higher the priority -func (eb *evaluatorBase) Evaluate(parent *supervisor.Peer, child *supervisor.Peer, taskPieceCount int32) float64 { +func (eb *evaluatorBase) Evaluate(parent *resource.Peer, child *resource.Peer, totalPieceCount int32) float64 { // If the SecurityDomain of hosts exists but is not equal, // it cannot be scheduled as a parent if parent.Host.SecurityDomain != "" && child.Host.SecurityDomain != "" && - strings.Compare(parent.Host.SecurityDomain, child.Host.SecurityDomain) != 0 { + parent.Host.SecurityDomain != child.Host.SecurityDomain { return minScore } - return finishedPieceWeight*calculatePieceScore(parent, child, taskPieceCount) + + return finishedPieceWeight*calculatePieceScore(parent, child, totalPieceCount) + freeLoadWeight*calculateFreeLoadScore(parent.Host) + + hostTypeAffinityWeight*calculateHostTypeAffinityScore(parent.Host) + idcAffinityWeight*calculateIDCAffinityScore(parent.Host, child.Host) + netTopologyAffinityWeight*calculateMultiElementAffinityScore(parent.Host.NetTopology, child.Host.NetTopology) + locationAffinityWeight*calculateMultiElementAffinityScore(parent.Host.Location, child.Host.Location) } // calculatePieceScore 0.0~unlimited larger and better -func calculatePieceScore(parent *supervisor.Peer, child *supervisor.Peer, taskPieceCount int32) float64 { +func calculatePieceScore(parent *resource.Peer, child *resource.Peer, totalPieceCount int32) float64 { // If the total piece is determined, normalize the number of // pieces downloaded by the parent node - if taskPieceCount > 0 { - finishedPieceCount := parent.TotalPieceCount.Load() - return float64(finishedPieceCount) / float64(taskPieceCount) + if totalPieceCount > 0 { + finishedPieceCount := parent.Pieces.Count() + return float64(finishedPieceCount) / float64(totalPieceCount) } // Use the difference between the parent node and the child node to // download the piece to roughly represent the piece score - parentFinishedPieceCount := parent.TotalPieceCount.Load() - childFinishedPieceCount := child.TotalPieceCount.Load() - return float64(parentFinishedPieceCount - childFinishedPieceCount) + parentFinishedPieceCount := parent.Pieces.Count() + childFinishedPieceCount := child.Pieces.Count() + return float64(parentFinishedPieceCount) - float64(childFinishedPieceCount) } // calculateFreeLoadScore 0.0~1.0 larger and better -func calculateFreeLoadScore(host *supervisor.Host) float64 { - load := host.CurrentUploadLoad.Load() - totalLoad := host.TotalUploadLoad - return float64(totalLoad-load) / float64(totalLoad) +func calculateFreeLoadScore(host *resource.Host) float64 { + load := host.LenPeers() + totalLoad := host.UploadLoadLimit.Load() + return float64(totalLoad-int32(load)) / float64(totalLoad) +} + +// calculateHostTypeAffinityScore 0.0~1.0 larger and better +func calculateHostTypeAffinityScore(host *resource.Host) float64 { + // The selected priority of CDN is lower, + // because CDN download resources are reserved for the first download as much as possible + if host.IsCDN { + return minScore + } + + return maxScore } // calculateIDCAffinityScore 0.0~1.0 larger and better -func calculateIDCAffinityScore(dst, src *supervisor.Host) float64 { - if dst.IDC != "" && src.IDC != "" && strings.Compare(dst.IDC, src.IDC) == 0 { +func calculateIDCAffinityScore(dst, src *resource.Host) float64 { + if dst.IDC != "" && src.IDC != "" && dst.IDC == src.IDC { return maxScore } @@ -126,7 +141,7 @@ func calculateMultiElementAffinityScore(dst, src string) float64 { return minScore } - if strings.Compare(dst, src) == 0 { + if dst == src { return maxScore } @@ -142,7 +157,7 @@ func calculateMultiElementAffinityScore(dst, src string) float64 { } for i := 0; i < elementLen; i++ { - if strings.Compare(dstElements[i], srcElements[i]) != 0 { + if dstElements[i] != srcElements[i] { break } score++ @@ -151,32 +166,18 @@ func calculateMultiElementAffinityScore(dst, src string) float64 { return float64(score) / float64(maxElementLen) } -func (eb *evaluatorBase) NeedAdjustParent(peer *supervisor.Peer) bool { - // CDN is the root node - if peer.Host.IsCDN { - return false - } - - parent, ok := peer.GetParent() - // Peer has no parent and is not completed - if !ok && !peer.IsDone() { - logger.Infof("peer %s need adjust parent because it has not parent and status is %s", peer.ID, peer.GetStatus()) +func (eb *evaluatorBase) IsBadNode(peer *resource.Peer) bool { + if peer.FSM.Is(resource.PeerStateFailed) || peer.FSM.Is(resource.PeerStateLeave) || peer.FSM.Is(resource.PeerStatePending) { + peer.Log.Infof("peer is bad node because peer status is %s", peer.FSM.Current()) return true } - // Peer has parent but parent can't be scheduled. - if ok && (parent.IsLeave() || eb.IsBadNode(parent)) { - logger.Infof("peer %s need adjust parent because parent can't be scheduled", peer.ID) - return true - } - - // Determine whether to adjust parent based on piece download costs - rawCosts := peer.GetPieceCosts() - costs := stats.LoadRawData(rawCosts) + // Determine whether to bad node based on piece download costs + costs := stats.LoadRawData(peer.PieceCosts()) len := len(costs) // Peer has not finished downloading enough piece if len < minAvailableCostLen { - logger.Infof("peer %s has not finished downloading enough piece, it can't be adjusted parent", peer.ID) + logger.Infof("peer %s has not finished downloading enough piece, it can't be bad node", peer.ID) return false } @@ -184,50 +185,19 @@ func (eb *evaluatorBase) NeedAdjustParent(peer *supervisor.Peer) bool { mean, _ := stats.Mean(costs[:len-1]) // nolint: errcheck // Download costs does not meet the normal distribution, - // if the last cost is five times more than mean, it need to be adjusted parent. + // if the last cost is twenty times more than mean, it is bad node. if len < normalDistributionLen { - isNeedAdjustParent := big.NewFloat(lastCost).Cmp(big.NewFloat(mean*5)) > 0 - logger.Infof("peer %s does not meet the normal distribution and mean is %.2f, peer need adjust parent: %t", peer.ID, mean, isNeedAdjustParent) - return isNeedAdjustParent + isBadNode := big.NewFloat(lastCost).Cmp(big.NewFloat(mean*20)) > 0 + logger.Infof("peer %s mean is %.2f and it is bad node: %t", peer.ID, mean, isBadNode) + return isBadNode } // Download costs satisfies the normal distribution, // last cost falling outside of three-sigma effect need to be adjusted parent, // refer to https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule - stdev, _ := stats.StandardDeviation(costs[:len-2]) // nolint: errcheck - isNeedAdjustParent := big.NewFloat(lastCost).Cmp(big.NewFloat(mean+3*stdev)) > 0 - logger.Infof("peer %s meet the normal distribution, costs mean is %.2f and standard deviation is %.2f, peer need adjust parent: %t", - peer.ID, mean, stdev, isNeedAdjustParent) - return isNeedAdjustParent -} - -func (eb *evaluatorBase) IsBadNode(peer *supervisor.Peer) bool { - if peer.IsBad() { - logger.Infof("peer %s is bad because it's status is %s", peer.ID, peer.GetStatus()) - return true - } - - if peer.Host.IsCDN { - logger.Infof("peer %s is cdn can't be bad node", peer.ID) - return false - } - - // Determine whether to bad node based on piece download costs - rawCosts := peer.GetPieceCosts() - costs := stats.LoadRawData(rawCosts) - len := len(costs) - // Peer has not finished downloading enough piece - if len < minAvailableCostLen { - logger.Infof("peer %s has not finished downloading enough piece, it can't be bad node", peer.ID) - return false - } - - lastCost := costs[len-1] - mean, _ := stats.Mean(costs[:len-1]) // nolint: errcheck - - // Download costs does not meet the normal distribution, - // if the last cost is forty times more than mean, it is bad node. - isBadNode := big.NewFloat(lastCost).Cmp(big.NewFloat(mean*40)) > 0 - logger.Infof("peer %s mean is %.2f and it is bad node: %t", peer.ID, mean, isBadNode) + stdev, _ := stats.StandardDeviation(costs[:len-1]) // nolint: errcheck + isBadNode := big.NewFloat(lastCost).Cmp(big.NewFloat(mean+3*stdev)) > 0 + logger.Infof("peer %s meet the normal distribution, costs mean is %.2f and standard deviation is %.2f, peer is bad node: %t", + peer.ID, mean, stdev, isBadNode) return isBadNode } diff --git a/scheduler/scheduler/evaluator/evaluator_base_test.go b/scheduler/scheduler/evaluator/evaluator_base_test.go new file mode 100644 index 00000000000..01e8c943fe3 --- /dev/null +++ b/scheduler/scheduler/evaluator/evaluator_base_test.go @@ -0,0 +1,700 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package evaluator + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/scheduler/resource" +) + +var ( + mockRawHost = &scheduler.PeerHost{ + Uuid: idgen.HostID("hostname", 8003), + Ip: "127.0.0.1", + RpcPort: 8003, + DownPort: 8001, + HostName: "hostname", + SecurityDomain: "security_domain", + Location: "location", + Idc: "idc", + NetTopology: "net_topology", + } + mockTaskURLMeta = &base.UrlMeta{ + Digest: "digest", + Tag: "tag", + Range: "range", + Filter: "filter", + Header: map[string]string{ + "content-length": "100", + }, + } + mockTaskURL = "http://example.com/foo" + mockTaskBackToSourceLimit = 200 + mockTaskID = idgen.TaskID(mockTaskURL, mockTaskURLMeta) + mockPeerID = idgen.PeerID("127.0.0.1") +) + +func TestEvaluatorBase_NewEvaluatorBase(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, e interface{}) + }{ + { + name: "new evaluator base", + expect: func(t *testing.T, e interface{}) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(e).Elem().Name(), "evaluatorBase") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, NewEvaluatorBase()) + }) + } +} + +func TestEvaluatorBase_Evaluate(t *testing.T) { + parentMockHost := resource.NewHost(mockRawHost) + parentMockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + childMockHost := resource.NewHost(mockRawHost) + childMockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + parent *resource.Peer + child *resource.Peer + totalPieceCount int32 + mock func(parent *resource.Peer, child *resource.Peer) + expect func(t *testing.T, score float64) + }{ + { + name: "security domain is not the same", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), parentMockTask, parentMockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), childMockTask, childMockHost), + totalPieceCount: 1, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Host.SecurityDomain = "foo" + child.Host.SecurityDomain = "bar" + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "security domain is same", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), parentMockTask, parentMockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), childMockTask, childMockHost), + totalPieceCount: 1, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Host.SecurityDomain = "bac" + child.Host.SecurityDomain = "bac" + parent.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "parent security domain is empty", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), parentMockTask, parentMockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), childMockTask, childMockHost), + totalPieceCount: 1, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Host.SecurityDomain = "" + child.Host.SecurityDomain = "baz" + parent.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "child security domain is empty", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), parentMockTask, parentMockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), childMockTask, childMockHost), + totalPieceCount: 1, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Host.SecurityDomain = "baz" + child.Host.SecurityDomain = "" + parent.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + eb := NewEvaluatorBase() + tc.mock(tc.parent, tc.child) + tc.expect(t, eb.Evaluate(tc.parent, tc.child, tc.totalPieceCount)) + }) + } +} + +func TestEvaluatorBase_calculatePieceScore(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + parent *resource.Peer + child *resource.Peer + totalPieceCount int32 + mock func(parent *resource.Peer, child *resource.Peer) + expect func(t *testing.T, score float64) + }{ + { + name: "total piece count is zero and child pieces are empty", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 0, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "total piece count is zero and parent pieces are empty", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 0, + mock: func(parent *resource.Peer, child *resource.Peer) { + child.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(-1)) + }, + }, + { + name: "total piece count is zero and child pieces of length greater than parent pieces", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 0, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Pieces.Set(0) + child.Pieces.Set(0) + child.Pieces.Set(1) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(-1)) + }, + }, + { + name: "total piece count is zero and child pieces of length equal than parent pieces", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 0, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Pieces.Set(0) + child.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "total piece count is zero and parent pieces of length greater than child pieces", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 0, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Pieces.Set(0) + parent.Pieces.Set(1) + child.Pieces.Set(0) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "parent pieces are empty", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 10, + mock: func(parent *resource.Peer, child *resource.Peer) {}, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "parent pieces of length greater than zero", + parent: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + child: resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost), + totalPieceCount: 10, + mock: func(parent *resource.Peer, child *resource.Peer) { + parent.Pieces.Set(0) + parent.Pieces.Set(1) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0.2)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.mock(tc.parent, tc.child) + tc.expect(t, calculatePieceScore(tc.parent, tc.child, tc.totalPieceCount)) + }) + } +} + +func TestEvaluatorBase_calculateFreeLoadScore(t *testing.T) { + tests := []struct { + name string + mock func(host *resource.Host, mockPeer *resource.Peer) + expect func(t *testing.T, score float64) + }{ + { + name: "host peers is not empty", + mock: func(host *resource.Host, mockPeer *resource.Peer) { + host.StorePeer(mockPeer) + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0.99)) + }, + }, + { + name: "host peers is empty", + mock: func(host *resource.Host, mockPeer *resource.Peer) {}, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, host) + tc.mock(host, mockPeer) + tc.expect(t, calculateFreeLoadScore(host)) + }) + } +} + +func TestEvaluatorBase_calculateHostTypeAffinityScore(t *testing.T) { + tests := []struct { + name string + mock func(host *resource.Host) + expect func(t *testing.T, score float64) + }{ + { + name: "host is normal peer", + mock: func(host *resource.Host) {}, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "host is cdn", + mock: func(host *resource.Host) { + host.IsCDN = true + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + host := resource.NewHost(mockRawHost) + tc.mock(host) + tc.expect(t, calculateHostTypeAffinityScore(host)) + }) + } +} + +func TestEvaluatorBase_calculateIDCAffinityScore(t *testing.T) { + tests := []struct { + name string + mock func(dstHost *resource.Host, srcHost *resource.Host) + expect func(t *testing.T, score float64) + }{ + { + name: "idc is empty", + mock: func(dstHost *resource.Host, srcHost *resource.Host) { + dstHost.IDC = "" + srcHost.IDC = "" + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "dst host idc is empty", + mock: func(dstHost *resource.Host, srcHost *resource.Host) { + dstHost.IDC = "" + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "src host idc is empty", + mock: func(dstHost *resource.Host, srcHost *resource.Host) { + srcHost.IDC = "" + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "idc is not the same", + mock: func(dstHost *resource.Host, srcHost *resource.Host) { + dstHost.IDC = "foo" + srcHost.IDC = "bar" + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "idc is the same", + mock: func(dstHost *resource.Host, srcHost *resource.Host) { + dstHost.IDC = "example" + srcHost.IDC = "example" + }, + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dstHost := resource.NewHost(mockRawHost) + srcHost := resource.NewHost(mockRawHost) + tc.mock(dstHost, srcHost) + tc.expect(t, calculateIDCAffinityScore(dstHost, srcHost)) + }) + } +} + +func TestEvaluatorBase_calculateMultiElementAffinityScore(t *testing.T) { + tests := []struct { + name string + dst string + src string + expect func(t *testing.T, score float64) + }{ + { + name: "dst is empty and src is empty", + dst: "", + src: "", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "dst is empty", + dst: "", + src: "baz", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "src is empty", + dst: "bar", + src: "", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "has only one element and matches", + dst: "foo", + src: "foo", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "has only one element and does not match", + dst: "foo", + src: "bar", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "has multi element and match", + dst: "foo|bar", + src: "foo|bar", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + { + name: "has multi element and does not match", + dst: "foo|bar", + src: "bar|foo", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0)) + }, + }, + { + name: "dst length is greater than src", + dst: "foo|bar|baz", + src: "foo|bar", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0.4)) + }, + }, + { + name: "src length is greater than dst", + dst: "foo|bar", + src: "foo|bar|baz", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0.4)) + }, + }, + { + name: "dst exceeds maximum length", + dst: "foo|bar|baz|bac|bae|baf", + src: "foo|bar|baz", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0.6)) + }, + }, + { + name: "src exceeds maximum length", + dst: "foo|bar|baz", + src: "foo|bar|baz|bac|bae|baf", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(0.6)) + }, + }, + { + name: "dst and src both exceeds maximum length", + dst: "foo|bar|baz|bac|bae|baf", + src: "foo|bar|baz|bac|bae|baf", + expect: func(t *testing.T, score float64) { + assert := assert.New(t) + assert.Equal(score, float64(1)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, calculateMultiElementAffinityScore(tc.dst, tc.src)) + }) + } +} + +func TestEvaluatorBase_IsBadNode(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + peer *resource.Peer + totalPieceCount int32 + mock func(peer *resource.Peer) + expect func(t *testing.T, isBadNode bool) + }{ + { + name: "peer state is PeerStateFailed", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateFailed) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.True(isBadNode) + }, + }, + { + name: "peer state is PeerStateLeave", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateLeave) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.True(isBadNode) + }, + }, + { + name: "peer state is PeerStatePending", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStatePending) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.True(isBadNode) + }, + }, + { + name: "download costs does not meet the normal distribution and last cost is twenty times more than mean", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.AppendPieceCost(10) + peer.AppendPieceCost(201) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.True(isBadNode) + }, + }, + { + name: "download costs does not meet the normal distribution and last cost is twenty times lower than mean", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.AppendPieceCost(10) + peer.AppendPieceCost(200) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.False(isBadNode) + }, + }, + { + name: "download costs meet the normal distribution and last cost is too long", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.AppendPieceCost(10) + peer.AppendPieceCost(11) + peer.AppendPieceCost(12) + peer.AppendPieceCost(13) + peer.AppendPieceCost(14) + peer.AppendPieceCost(9) + peer.AppendPieceCost(8) + peer.AppendPieceCost(7) + peer.AppendPieceCost(6) + peer.AppendPieceCost(5) + peer.AppendPieceCost(19) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.True(isBadNode) + }, + }, + { + name: "download costs meet the normal distribution and last cost is normal", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.AppendPieceCost(10) + peer.AppendPieceCost(11) + peer.AppendPieceCost(12) + peer.AppendPieceCost(13) + peer.AppendPieceCost(14) + peer.AppendPieceCost(9) + peer.AppendPieceCost(8) + peer.AppendPieceCost(7) + peer.AppendPieceCost(6) + peer.AppendPieceCost(5) + peer.AppendPieceCost(18) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.False(isBadNode) + }, + }, + { + name: "download costs meet the normal distribution and last cost is too short", + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + totalPieceCount: 1, + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.AppendPieceCost(10) + peer.AppendPieceCost(11) + peer.AppendPieceCost(12) + peer.AppendPieceCost(13) + peer.AppendPieceCost(14) + peer.AppendPieceCost(9) + peer.AppendPieceCost(8) + peer.AppendPieceCost(7) + peer.AppendPieceCost(6) + peer.AppendPieceCost(5) + peer.AppendPieceCost(0) + }, + expect: func(t *testing.T, isBadNode bool) { + assert := assert.New(t) + assert.False(isBadNode) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + eb := NewEvaluatorBase() + tc.mock(tc.peer) + tc.expect(t, eb.IsBadNode(tc.peer)) + }) + } +} diff --git a/scheduler/core/evaluator/evaluator_test.go b/scheduler/scheduler/evaluator/evaluator_test.go similarity index 97% rename from scheduler/core/evaluator/evaluator_test.go rename to scheduler/scheduler/evaluator/evaluator_test.go index f5fc0ebd1ab..6c435108209 100644 --- a/scheduler/core/evaluator/evaluator_test.go +++ b/scheduler/scheduler/evaluator/evaluator_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestEvaluatorNew(t *testing.T) { +func TestEvaluator_New(t *testing.T) { pluginDir := "." tests := []struct { name string diff --git a/scheduler/core/evaluator/plugin.go b/scheduler/scheduler/evaluator/plugin.go similarity index 100% rename from scheduler/core/evaluator/plugin.go rename to scheduler/scheduler/evaluator/plugin.go diff --git a/scheduler/core/evaluator/plugin_test.go b/scheduler/scheduler/evaluator/plugin_test.go similarity index 97% rename from scheduler/core/evaluator/plugin_test.go rename to scheduler/scheduler/evaluator/plugin_test.go index aa6d9526a27..5e1b002b22d 100644 --- a/scheduler/core/evaluator/plugin_test.go +++ b/scheduler/scheduler/evaluator/plugin_test.go @@ -25,7 +25,7 @@ import ( testifyassert "github.com/stretchr/testify/assert" ) -func TestLoadPlugin(t *testing.T) { +func TestPlugin_Load(t *testing.T) { assert := testifyassert.New(t) defer func() { os.Remove("./testdata/d7y-scheduler-plugin-evaluator.so") diff --git a/scheduler/core/evaluator/testdata/main.go b/scheduler/scheduler/evaluator/testdata/main.go similarity index 71% rename from scheduler/core/evaluator/testdata/main.go rename to scheduler/scheduler/evaluator/testdata/main.go index de78aca5f43..647ead79091 100644 --- a/scheduler/core/evaluator/testdata/main.go +++ b/scheduler/scheduler/evaluator/testdata/main.go @@ -20,8 +20,8 @@ import ( "fmt" "os" - "d7y.io/dragonfly/v2/scheduler/core/evaluator" - "d7y.io/dragonfly/v2/scheduler/supervisor" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler/evaluator" ) func main() { @@ -31,17 +31,12 @@ func main() { os.Exit(1) } - if score := e.Evaluate(&supervisor.Peer{}, &supervisor.Peer{}, int32(0)); score != float64(1) { + if score := e.Evaluate(&resource.Peer{}, &resource.Peer{}, int32(0)); score != float64(1) { fmt.Println("Evaluate failed") os.Exit(1) } - if ok := e.NeedAdjustParent(&supervisor.Peer{}); !ok { - fmt.Println("NeedAdjustParent failed") - os.Exit(1) - } - - if ok := e.IsBadNode(&supervisor.Peer{}); !ok { + if ok := e.IsBadNode(&resource.Peer{}); !ok { fmt.Println("IsBadNode failed") os.Exit(1) } diff --git a/scheduler/core/evaluator/testdata/plugin/evaluator.go b/scheduler/scheduler/evaluator/testdata/plugin/evaluator.go similarity index 74% rename from scheduler/core/evaluator/testdata/plugin/evaluator.go rename to scheduler/scheduler/evaluator/testdata/plugin/evaluator.go index e8e35f82ed4..974ae0d3956 100644 --- a/scheduler/core/evaluator/testdata/plugin/evaluator.go +++ b/scheduler/scheduler/evaluator/testdata/plugin/evaluator.go @@ -16,19 +16,15 @@ package main -import "d7y.io/dragonfly/v2/scheduler/supervisor" +import "d7y.io/dragonfly/v2/scheduler/resource" type evaluator struct{} -func (e *evaluator) Evaluate(parent *supervisor.Peer, child *supervisor.Peer, taskPieceCount int32) float64 { +func (e *evaluator) Evaluate(parent *resource.Peer, child *resource.Peer, taskPieceCount int32) float64 { return float64(1) } -func (e *evaluator) NeedAdjustParent(peer *supervisor.Peer) bool { - return true -} - -func (e *evaluator) IsBadNode(peer *supervisor.Peer) bool { +func (e *evaluator) IsBadNode(peer *resource.Peer) bool { return true } diff --git a/scheduler/scheduler/mocks/scheduler_mock.go b/scheduler/scheduler/mocks/scheduler_mock.go new file mode 100644 index 00000000000..bc32d65fcf9 --- /dev/null +++ b/scheduler/scheduler/mocks/scheduler_mock.go @@ -0,0 +1,67 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/scheduler/scheduler.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + set "d7y.io/dragonfly/v2/pkg/container/set" + resource "d7y.io/dragonfly/v2/scheduler/resource" + gomock "github.com/golang/mock/gomock" +) + +// MockScheduler is a mock of Scheduler interface. +type MockScheduler struct { + ctrl *gomock.Controller + recorder *MockSchedulerMockRecorder +} + +// MockSchedulerMockRecorder is the mock recorder for MockScheduler. +type MockSchedulerMockRecorder struct { + mock *MockScheduler +} + +// NewMockScheduler creates a new mock instance. +func NewMockScheduler(ctrl *gomock.Controller) *MockScheduler { + mock := &MockScheduler{ctrl: ctrl} + mock.recorder = &MockSchedulerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduler) EXPECT() *MockSchedulerMockRecorder { + return m.recorder +} + +// FindParent mocks base method. +func (m *MockScheduler) FindParent(arg0 context.Context, arg1 *resource.Peer, arg2 set.SafeSet) (*resource.Peer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindParent", arg0, arg1, arg2) + ret0, _ := ret[0].(*resource.Peer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// FindParent indicates an expected call of FindParent. +func (mr *MockSchedulerMockRecorder) FindParent(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindParent", reflect.TypeOf((*MockScheduler)(nil).FindParent), arg0, arg1, arg2) +} + +// ScheduleParent mocks base method. +func (m *MockScheduler) ScheduleParent(arg0 context.Context, arg1 *resource.Peer, arg2 set.SafeSet) ([]*resource.Peer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScheduleParent", arg0, arg1, arg2) + ret0, _ := ret[0].([]*resource.Peer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// ScheduleParent indicates an expected call of ScheduleParent. +func (mr *MockSchedulerMockRecorder) ScheduleParent(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleParent", reflect.TypeOf((*MockScheduler)(nil).ScheduleParent), arg0, arg1, arg2) +} diff --git a/scheduler/scheduler/scheduler.go b/scheduler/scheduler/scheduler.go new file mode 100644 index 00000000000..a8c81b3f130 --- /dev/null +++ b/scheduler/scheduler/scheduler.go @@ -0,0 +1,177 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package scheduler + +import ( + "context" + "sort" + + "d7y.io/dragonfly/v2/pkg/container/set" + "d7y.io/dragonfly/v2/pkg/rpc/base" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/scheduler/config" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler/evaluator" +) + +type Scheduler interface { + // ScheduleParent schedule a parent and candidates to a peer + ScheduleParent(context.Context, *resource.Peer, set.SafeSet) ([]*resource.Peer, bool) + + // Find the parent that best matches the evaluation + FindParent(context.Context, *resource.Peer, set.SafeSet) (*resource.Peer, bool) +} + +type scheduler struct { + evaluator evaluator.Evaluator +} + +func New(cfg *config.SchedulerConfig, pluginDir string) Scheduler { + return &scheduler{ + evaluator: evaluator.New(cfg.Algorithm, pluginDir), + } +} + +func (s *scheduler) ScheduleParent(ctx context.Context, peer *resource.Peer, blocklist set.SafeSet) ([]*resource.Peer, bool) { + // Only PeerStateRunning peers need to be rescheduled, + // and other states including the PeerStateBackToSource indicate that + // they have been scheduled + if !peer.FSM.Is(resource.PeerStateRunning) { + peer.Log.Infof("peer state is %s, can not schedule parent", peer.FSM.Current()) + return []*resource.Peer{}, false + } + + // Find the parent that can be scheduled + parents := s.filterParents(peer, blocklist) + if len(parents) == 0 { + peer.Log.Info("can not find parents") + return []*resource.Peer{}, false + } + + // Sort parents by evaluation score + taskTotalPieceCount := peer.Task.TotalPieceCount.Load() + sort.Slice( + parents, + func(i, j int) bool { + return s.evaluator.Evaluate(peer, parents[i], taskTotalPieceCount) > s.evaluator.Evaluate(peer, parents[j], taskTotalPieceCount) + }, + ) + + // Send scheduling success message + stream, ok := peer.LoadStream() + if !ok { + peer.Log.Error("load peer stream failed") + return []*resource.Peer{}, false + } + + if err := stream.Send(constructSuccessPeerPacket(peer, parents[0], parents[1:])); err != nil { + peer.Log.Error(err) + return []*resource.Peer{}, false + } + + peer.ReplaceParent(parents[0]) + peer.Log.Infof("schedule parent successful, replace parent to %s", parents[0].ID) + return parents, true +} + +func (s *scheduler) FindParent(ctx context.Context, peer *resource.Peer, blocklist set.SafeSet) (*resource.Peer, bool) { + // Filter the parent that can be scheduled + parents := s.filterParents(peer, blocklist) + if len(parents) == 0 { + peer.Log.Info("can not find parents") + return nil, false + } + + // Sort parents by evaluation score + taskTotalPieceCount := peer.Task.TotalPieceCount.Load() + sort.Slice( + parents, + func(i, j int) bool { + return s.evaluator.Evaluate(peer, parents[i], taskTotalPieceCount) > s.evaluator.Evaluate(peer, parents[j], taskTotalPieceCount) + }, + ) + + peer.Log.Infof("find parent %s successful", parents[0].ID) + return parents[0], true +} + +// Filter the parent that can be scheduled +func (s *scheduler) filterParents(peer *resource.Peer, blocklist set.SafeSet) []*resource.Peer { + var parents []*resource.Peer + peer.Task.Peers.Range(func(_, value interface{}) bool { + parent, ok := value.(*resource.Peer) + if !ok { + return true + } + + if blocklist.Contains(parent.ID) { + peer.Log.Infof("parent %s is not selected because it is in blocklist", parent.ID) + return true + } + + if parent == peer { + peer.Log.Info("parent is not selected because it is same") + return true + } + + if s.evaluator.IsBadNode(parent) { + peer.Log.Infof("parent %s is not selected because it is bad node", parent.ID) + return true + } + + if parent.IsDescendant(peer) { + peer.Log.Infof("parent %s is not selected because it is descendant", parent.ID) + return true + } + + if parent.Host.FreeUploadLoad() <= 0 { + peer.Log.Infof("parent %s is not selected because its free upload is empty", parent.ID) + return true + } + + parents = append(parents, parent) + peer.Log.Infof("parent %s is selected", parent.ID) + return true + }) + + return parents +} + +func constructSuccessPeerPacket(peer *resource.Peer, parent *resource.Peer, candidateParents []*resource.Peer) *rpcscheduler.PeerPacket { + var stealPeers []*rpcscheduler.PeerPacket_DestPeer + for _, candidateParent := range candidateParents { + stealPeers = append(stealPeers, &rpcscheduler.PeerPacket_DestPeer{ + Ip: candidateParent.Host.IP, + RpcPort: candidateParent.Host.Port, + PeerId: candidateParent.ID, + }) + } + + return &rpcscheduler.PeerPacket{ + TaskId: peer.Task.ID, + SrcPid: peer.ID, + // TODO(gaius-qi) Configure ParallelCount parameter in manager service + ParallelCount: 1, + MainPeer: &rpcscheduler.PeerPacket_DestPeer{ + Ip: parent.Host.IP, + RpcPort: parent.Host.Port, + PeerId: parent.ID, + }, + StealPeers: stealPeers, + Code: base.Code_Success, + } +} diff --git a/scheduler/scheduler/scheduler_test.go b/scheduler/scheduler/scheduler_test.go new file mode 100644 index 00000000000..04d9351545a --- /dev/null +++ b/scheduler/scheduler/scheduler_test.go @@ -0,0 +1,418 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package scheduler + +import ( + "context" + "errors" + "reflect" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/container/set" + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" + "d7y.io/dragonfly/v2/scheduler/config" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler/evaluator" +) + +var ( + mockPluginDir = "plugin_dir" + mockSchedulerConfig = &config.SchedulerConfig{ + Algorithm: evaluator.DefaultAlgorithm, + } + mockRawHost = &rpcscheduler.PeerHost{ + Uuid: idgen.HostID("hostname", 8003), + Ip: "127.0.0.1", + RpcPort: 8003, + DownPort: 8001, + HostName: "hostname", + SecurityDomain: "security_domain", + Location: "location", + Idc: "idc", + NetTopology: "net_topology", + } + mockTaskURLMeta = &base.UrlMeta{ + Digest: "digest", + Tag: "tag", + Range: "range", + Filter: "filter", + Header: map[string]string{ + "content-length": "100", + }, + } + mockTaskURL = "http://example.com/foo" + mockTaskBackToSourceLimit = 200 + mockTaskID = idgen.TaskID(mockTaskURL, mockTaskURLMeta) + mockPeerID = idgen.PeerID("127.0.0.1") +) + +func TestScheduler_New(t *testing.T) { + tests := []struct { + name string + pluginDir string + expect func(t *testing.T, s interface{}) + }{ + { + name: "new scheduler", + pluginDir: "bar", + expect: func(t *testing.T, s interface{}) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(s).Elem().Name(), "scheduler") + }, + }, + { + name: "new scheduler with empty pluginDir", + pluginDir: "", + expect: func(t *testing.T, s interface{}) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(s).Elem().Name(), "scheduler") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tc.expect(t, New(mockSchedulerConfig, tc.pluginDir)) + }) + } +} + +func TestScheduler_ScheduleParent(t *testing.T) { + tests := []struct { + name string + mock func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) + expect func(t *testing.T, parents []*resource.Peer, ok bool) + }{ + { + name: "peer state is PeerStatePending", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStatePending) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer state is PeerStateReceivedSmall", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateReceivedSmall) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer state is PeerStateReceivedNormal", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateReceivedNormal) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer state is PeerStateBackToSource", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer state is PeerStateSucceeded", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateSucceeded) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer state is PeerStateFailed", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateFailed) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer state is PeerStateLeave", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateLeave) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "task peers is empty", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "task contains only one peer and peer is itself", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(peer) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer is in blocklist", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + blocklist.Add(mockPeer.ID) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer is bad node", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.FSM.SetState(resource.PeerStateFailed) + peer.Task.StorePeer(mockPeer) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "parent is peer's descendant", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + peer.StoreChild(mockPeer) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "parent free upload load is zero", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + mockPeer.Host.UploadLoadLimit.Store(0) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer stream is empty", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + mockPeer.Pieces.Set(0) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer stream send failed", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + mockPeer.Pieces.Set(0) + peer.StoreStream(stream) + ms.Send(gomock.Eq(constructSuccessPeerPacket(peer, mockPeer, []*resource.Peer{}))).Return(errors.New("foo")).Times(1) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "schedule parent", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + mockPeer.Pieces.Set(0) + peer.StoreStream(stream) + ms.Send(gomock.Eq(constructSuccessPeerPacket(peer, mockPeer, []*resource.Peer{}))).Return(nil).Times(1) + }, + expect: func(t *testing.T, parents []*resource.Peer, ok bool) { + assert := assert.New(t) + assert.Equal(len(parents), 1) + assert.True(ok) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := resource.NewPeer(mockPeerID, mockTask, mockHost) + mockPeer := resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost) + blocklist := set.NewSafeSet() + + tc.mock(peer, mockPeer, blocklist, stream, stream.EXPECT()) + scheduler := New(mockSchedulerConfig, mockPluginDir) + parents, ok := scheduler.ScheduleParent(context.Background(), peer, blocklist) + tc.expect(t, parents, ok) + }) + } +} + +func TestScheduler_FindParent(t *testing.T) { + tests := []struct { + name string + mock func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) + expect func(t *testing.T, parent *resource.Peer, ok bool) + }{ + { + name: "task peers is empty", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "task contains only one peer and peer is itself", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(peer) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer is in blocklist", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + blocklist.Add(mockPeer.ID) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "peer is bad node", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.FSM.SetState(resource.PeerStateFailed) + peer.Task.StorePeer(mockPeer) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "parent is peer's descendant", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + peer.StoreChild(mockPeer) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "parent free upload load is zero", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + mockPeer.Host.UploadLoadLimit.Store(0) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.False(ok) + }, + }, + { + name: "find parent", + mock: func(peer *resource.Peer, mockPeer *resource.Peer, blocklist set.SafeSet) { + peer.FSM.SetState(resource.PeerStateRunning) + mockPeer.FSM.SetState(resource.PeerStateRunning) + peer.Task.StorePeer(mockPeer) + mockPeer.Pieces.Set(0) + }, + expect: func(t *testing.T, parent *resource.Peer, ok bool) { + assert := assert.New(t) + assert.True(ok) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := resource.NewPeer(mockPeerID, mockTask, mockHost) + mockPeer := resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost) + blocklist := set.NewSafeSet() + + tc.mock(peer, mockPeer, blocklist) + scheduler := New(mockSchedulerConfig, mockPluginDir) + parent, ok := scheduler.FindParent(context.Background(), peer, blocklist) + tc.expect(t, parent, ok) + }) + } +} diff --git a/scheduler/service/callback.go b/scheduler/service/callback.go new file mode 100644 index 00000000000..baa48907c6a --- /dev/null +++ b/scheduler/service/callback.go @@ -0,0 +1,293 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service + +import ( + "context" + "time" + + "d7y.io/dragonfly/v2/internal/dferrors" + "d7y.io/dragonfly/v2/pkg/container/set" + "d7y.io/dragonfly/v2/pkg/rpc/base" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/scheduler/config" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler" +) + +type Callback interface { + ScheduleParent(context.Context, *resource.Peer, set.SafeSet) + BeginOfPiece(context.Context, *resource.Peer) + EndOfPiece(context.Context, *resource.Peer) + PieceSuccess(context.Context, *resource.Peer, *rpcscheduler.PieceResult) + PieceFail(context.Context, *resource.Peer, *rpcscheduler.PieceResult) + PeerSuccess(context.Context, *resource.Peer) + PeerFail(context.Context, *resource.Peer) + PeerLeave(context.Context, *resource.Peer) + TaskSuccess(context.Context, *resource.Task, *rpcscheduler.PeerResult) + TaskFail(context.Context, *resource.Task) +} + +type callback struct { + // Resource interface + resource resource.Resource + + // Scheduler interface + scheduler scheduler.Scheduler + + // Scheduelr service config + config *config.Config +} + +func newCallback(cfg *config.Config, resource resource.Resource, scheduler scheduler.Scheduler) Callback { + return &callback{ + config: cfg, + resource: resource, + scheduler: scheduler, + } +} + +// Repeat schedule parent for peer +func (c *callback) ScheduleParent(ctx context.Context, peer *resource.Peer, blocklist set.SafeSet) { + var n int + for { + select { + case <-ctx.Done(): + peer.Log.Infof("context was done") + return + default: + } + + // Peer scheduling exceeds retry limit + if n >= c.config.Scheduler.RetryLimit { + if peer.Task.CanBackToSource() { + if ok := peer.StopStream(dferrors.Newf(base.Code_SchedNeedBackSource, "peer scheduling exceeds the limit %d times", c.config.Scheduler.RetryLimit)); !ok { + return + } + + if err := peer.FSM.Event(resource.PeerEventDownloadFromBackToSource); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + return + } + + // If the task state is TaskStateFailed, + // peer back-to-source and reset task state to TaskStateRunning + if peer.Task.FSM.Is(resource.TaskStateFailed) { + if err := peer.Task.FSM.Event(resource.TaskEventDownload); err != nil { + peer.Task.Log.Errorf("task fsm event failed: %v", err) + return + } + } + + // If the peer downloads back-to-source, its parent needs to be deleted + peer.DeleteParent() + peer.Task.Log.Info("peer back to source successfully") + return + } + + // Handle peer failed + if ok := peer.StopStream(dferrors.Newf(base.Code_SchedTaskStatusError, "peer scheduling exceeds the limit %d times", c.config.Scheduler.RetryLimit)); !ok { + peer.Log.Error("stop stream failed") + } + return + } + + if _, ok := c.scheduler.ScheduleParent(ctx, peer, blocklist); !ok { + n++ + peer.Log.Infof("reschedule parent %d times failed", n) + + // Sleep to avoid hot looping + time.Sleep(c.config.Scheduler.RetryInterval) + continue + } + + peer.Log.Infof("reschedule parent %d times successfully", n+1) + return + } +} + +func (c *callback) BeginOfPiece(ctx context.Context, peer *resource.Peer) { + switch peer.FSM.Current() { + case resource.PeerStateBackToSource: + // Back to the source download process, peer directly returns + peer.Log.Info("peer back to source") + return + case resource.PeerStateReceivedSmall: + // When the task is small, + // the peer has already returned to the parent when registering + if err := peer.FSM.Event(resource.PeerEventDownload); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + return + } + case resource.PeerStateReceivedNormal: + // It’s not a case of back-to-source or small task downloading, + // to help peer to schedule the parent node + if err := peer.FSM.Event(resource.PeerEventDownload); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + return + } + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + c.ScheduleParent(ctx, peer, blocklist) + default: + peer.Log.Warnf("peer state is %s when receive the begin of piece", peer.FSM.Current()) + } +} + +func (c *callback) EndOfPiece(ctx context.Context, peer *resource.Peer) {} + +func (c *callback) PieceSuccess(ctx context.Context, peer *resource.Peer, piece *rpcscheduler.PieceResult) { + // Update peer piece info + peer.Pieces.Set(uint(piece.PieceInfo.PieceNum)) + peer.AppendPieceCost(int64(piece.EndTime - piece.BeginTime)) + + // When the peer downloads back-to-source, + // piece downloads successfully updates the task piece info + if peer.FSM.Is(resource.PeerStateBackToSource) { + peer.Task.StorePiece(piece.PieceInfo) + } +} + +func (c *callback) PieceFail(ctx context.Context, peer *resource.Peer, piece *rpcscheduler.PieceResult) { + // Failed to download piece back-to-source + if peer.FSM.Is(resource.PeerStateBackToSource) { + peer.Log.Error("peer back to source finished with fail piece") + return + } + + // It’s not a case of back-to-source downloading failed, + // to help peer to reschedule the parent node + switch piece.Code { + case base.Code_ClientWaitPieceReady: + peer.Log.Info("receive code Code_ClientWaitPieceReady") + return + case base.Code_ClientPieceDownloadFail, base.Code_PeerTaskNotFound, base.Code_CDNTaskNotFound, base.Code_CDNError, base.Code_CDNTaskDownloadFail: + peer.Log.Errorf("receive error code: %v", piece.Code) + if parent, ok := c.resource.PeerManager().Load(piece.DstPid); ok && parent.FSM.Can(resource.PeerEventDownloadFailed) { + if err := parent.FSM.Event(resource.PeerEventDownloadFailed); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + break + } + } + case base.Code_ClientPieceRequestFail: + peer.Log.Error("receive error code Code_ClientPieceRequestFail") + default: + peer.Log.Warnf("unknow report code: %v", piece.Code) + } + + // Peer state is PeerStateRunning will be rescheduled + if !peer.FSM.Is(resource.PeerStateRunning) { + peer.Log.Infof("peer can not be rescheduled because peer state is %s", peer.FSM.Current()) + return + } + + blocklist := set.NewSafeSet() + if parent, ok := c.resource.PeerManager().Load(piece.DstPid); ok { + blocklist.Add(parent.ID) + } + + c.ScheduleParent(ctx, peer, blocklist) +} + +func (c *callback) PeerSuccess(ctx context.Context, peer *resource.Peer) { + // If the peer type is tiny and back-to-source, + // it need to directly download the tiny file and store the data in task DirectPiece + if peer.FSM.Is(resource.PeerStateBackToSource) && peer.Task.SizeScope() == base.SizeScope_TINY { + peer.Log.Info("peer state is PeerStateBackToSource and type is tiny file") + data, err := peer.DownloadTinyFile(ctx) + if err == nil && len(data) == int(peer.Task.ContentLength.Load()) { + // Tiny file downloaded successfully + peer.Task.DirectPiece = data + } else { + peer.Log.Warnf("download tiny file length is %d, task content length is %d, download is failed: %v", len(data), peer.Task.ContentLength.Load(), err) + } + } + + if err := peer.FSM.Event(resource.PeerEventDownloadSucceeded); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + return + } +} + +func (c *callback) PeerFail(ctx context.Context, peer *resource.Peer) { + if err := peer.FSM.Event(resource.PeerEventDownloadFailed); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + return + } + + // Reschedule a new parent to children of peer to exclude the current failed peer + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + + peer.Children.Range(func(_, value interface{}) bool { + child, ok := value.(*resource.Peer) + if !ok { + return true + } + + c.ScheduleParent(ctx, child, blocklist) + return true + }) +} + +func (c *callback) PeerLeave(ctx context.Context, peer *resource.Peer) { + if err := peer.FSM.Event(resource.PeerEventLeave); err != nil { + peer.Log.Errorf("peer fsm event failed: %v", err) + return + } + + peer.Children.Range(func(_, value interface{}) bool { + child, ok := value.(*resource.Peer) + if !ok { + return true + } + + // Reschedule a new parent to children of peer to exclude the current leave peer + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + + c.ScheduleParent(ctx, child, blocklist) + return true + }) + + peer.DeleteParent() + c.resource.PeerManager().Delete(peer.ID) +} + +// Conditions for the task to switch to the TaskStateSucceeded are: +// 1. CDN downloads the resource successfully +// 2. Dfdaemon back-to-source to download successfully +func (c *callback) TaskSuccess(ctx context.Context, task *resource.Task, result *rpcscheduler.PeerResult) { + if err := task.FSM.Event(resource.TaskEventDownloadSucceeded); err != nil { + task.Log.Errorf("task fsm event failed: %v", err) + return + } + + // Update task's resource total piece count and content length + task.TotalPieceCount.Store(result.TotalPieceCount) + task.ContentLength.Store(result.ContentLength) +} + +// Conditions for the task to switch to the TaskStateSucceeded are: +// 1. CDN downloads the resource falied +// 2. Dfdaemon back-to-source to download failed +func (c *callback) TaskFail(ctx context.Context, task *resource.Task) { + if err := task.FSM.Event(resource.TaskEventDownloadFailed); err != nil { + task.Log.Errorf("task fsm event failed: %v", err) + return + } +} diff --git a/scheduler/service/callback_test.go b/scheduler/service/callback_test.go new file mode 100644 index 00000000000..aa31a6a360f --- /dev/null +++ b/scheduler/service/callback_test.go @@ -0,0 +1,1102 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service + +import ( + "context" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strconv" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/pkg/container/set" + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + rpcschedulermocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" + "d7y.io/dragonfly/v2/scheduler/config" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler/mocks" +) + +var ( + mockSchedulerConfig = &config.SchedulerConfig{ + RetryLimit: 3, + RetryInterval: 10 * time.Millisecond, + BackSourceCount: mockTaskBackToSourceLimit, + } + mockRawHost = &rpcscheduler.PeerHost{ + Uuid: idgen.HostID("hostname", 8003), + Ip: "127.0.0.1", + RpcPort: 8003, + DownPort: 8001, + HostName: "hostname", + SecurityDomain: "security_domain", + Location: "location", + Idc: "idc", + NetTopology: "net_topology", + } + mockTaskURLMeta = &base.UrlMeta{ + Digest: "digest", + Tag: "tag", + Range: "range", + Filter: "filter", + Header: map[string]string{ + "content-length": "100", + }, + } + mockTaskURL = "http://example.com/foo" + mockTaskBackToSourceLimit = 200 + mockTaskID = idgen.TaskID(mockTaskURL, mockTaskURLMeta) + mockPeerID = idgen.PeerID("127.0.0.1") + mockCDNPeerID = idgen.CDNPeerID("127.0.0.1") +) + +func TestCallback_newCallback(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, c interface{}) + }{ + { + name: "new callback", + expect: func(t *testing.T, c interface{}) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(c).Elem().Name(), "callback") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + resource := resource.NewMockResource(ctl) + + tc.expect(t, newCallback(&config.Config{Scheduler: mockSchedulerConfig}, resource, scheduler)) + }) + } +} + +func TestCallback_ScheduleParent(t *testing.T) { + tests := []struct { + name string + mock func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "context was done", + mock: func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) { + cancel() + }, + expect: func(t *testing.T, peer *resource.Peer) {}, + }, + { + name: "schedule parent", + mock: func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) { + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1) + }, + expect: func(t *testing.T, peer *resource.Peer) {}, + }, + { + name: "reschedule parent", + mock: func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) { + gomock.InOrder( + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, false).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) {}, + }, + { + name: "schedule parent failed and return error to client", + mock: func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) { + peer.Task.BackToSourceLimit.Store(0) + peer.StoreStream(stream) + gomock.InOrder( + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, false).Times(3), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + + dferr := <-peer.StopChannel + assert.Equal(dferr.Code, base.Code_SchedTaskStatusError) + }, + }, + { + name: "schedule parent failed and load peer stream error", + mock: func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) { + peer.Task.BackToSourceLimit.Store(0) + gomock.InOrder( + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, false).Times(3), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) {}, + }, + { + name: "schedule parent failed and peer back-to-source", + mock: func(cancel context.CancelFunc, peer *resource.Peer, blocklist set.SafeSet, stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, ms *mocks.MockSchedulerMockRecorder) { + peer.Task.BackToSourceLimit.Store(1) + peer.FSM.SetState(resource.PeerStateRunning) + peer.Task.FSM.SetState(resource.TaskStateFailed) + peer.StoreStream(stream) + gomock.InOrder( + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, false).Times(3), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + + dferr := <-peer.StopChannel + assert.Equal(dferr.Code, base.Code_SchedNeedBackSource) + assert.True(peer.FSM.Is(resource.PeerStateBackToSource)) + assert.True(peer.Task.FSM.Is(resource.TaskStateRunning)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + stream := rpcschedulermocks.NewMockScheduler_ReportPieceResultServer(ctl) + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + ctx, cancel := context.WithCancel(context.Background()) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := resource.NewPeer(mockPeerID, mockTask, mockHost) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + blocklist := set.NewSafeSet() + + tc.mock(cancel, peer, blocklist, stream, stream.EXPECT(), scheduler.EXPECT()) + callback.ScheduleParent(ctx, peer, blocklist) + tc.expect(t, peer) + }) + } +} + +func TestCallback_BeginOfPiece(t *testing.T) { + tests := []struct { + name string + mock func(peer *resource.Peer, scheduler *mocks.MockSchedulerMockRecorder) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "peer state is PeerStateBackToSource", + mock: func(peer *resource.Peer, scheduler *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateBackToSource)) + }, + }, + { + name: "peer state is PeerStateReceivedSmall", + mock: func(peer *resource.Peer, scheduler *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateReceivedSmall) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + }, + }, + { + name: "peer state is PeerStateReceivedNormal", + mock: func(peer *resource.Peer, scheduler *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateReceivedNormal) + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + scheduler.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + }, + }, + { + name: "peer state is PeerStateSucceeded", + mock: func(peer *resource.Peer, scheduler *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateSucceeded) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := resource.NewPeer(mockPeerID, mockTask, mockHost) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + + tc.mock(peer, scheduler.EXPECT()) + callback.BeginOfPiece(context.Background(), peer) + tc.expect(t, peer) + }) + } +} + +func TestCallback_PieceSuccess(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + piece *rpcscheduler.PieceResult + peer *resource.Peer + mock func(peer *resource.Peer) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "piece success", + piece: &rpcscheduler.PieceResult{ + PieceInfo: &base.PieceInfo{ + PieceNum: 0, + PieceMd5: "ac32345ef819f03710e2105c81106fdd", + }, + BeginTime: uint64(time.Now().Unix()), + EndTime: uint64(time.Now().Add(1 * time.Second).Unix()), + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.Pieces.Count(), uint(1)) + assert.Equal(peer.PieceCosts(), []int64{1}) + }, + }, + { + name: "piece state is PeerStateBackToSource", + piece: &rpcscheduler.PieceResult{ + PieceInfo: &base.PieceInfo{ + PieceNum: 0, + PieceMd5: "ac32345ef819f03710e2105c81106fdd", + }, + BeginTime: uint64(time.Now().Unix()), + EndTime: uint64(time.Now().Add(1 * time.Second).Unix()), + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.Pieces.Count(), uint(1)) + assert.Equal(peer.PieceCosts(), []int64{1}) + piece, ok := peer.Task.LoadPiece(0) + assert.True(ok) + assert.EqualValues(piece, &base.PieceInfo{ + PieceNum: 0, + PieceMd5: "ac32345ef819f03710e2105c81106fdd", + }) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + + tc.mock(tc.peer) + callback.PieceSuccess(context.Background(), tc.peer, tc.piece) + tc.expect(t, tc.peer) + }) + } +} + +func TestCallback_PieceFail(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + piece *rpcscheduler.PieceResult + peer *resource.Peer + parent *resource.Peer + mock func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *resource.Peer, parent *resource.Peer) + }{ + { + name: "peer state is PeerStateBackToSource", + piece: &rpcscheduler.PieceResult{}, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateBackToSource)) + }, + }, + { + name: "piece result code is Code_ClientWaitPieceReady", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_ClientWaitPieceReady, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + }, + }, + { + name: "piece result code is Code_ClientPieceDownloadFail and parent state set PeerEventDownloadFailed", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_ClientPieceDownloadFail, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "piece result code is Code_PeerTaskNotFound and parent state set PeerEventDownloadFailed", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_PeerTaskNotFound, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "piece result code is Code_CDNTaskNotFound and parent state set PeerEventDownloadFailed", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_CDNTaskNotFound, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "piece result code is Code_CDNError and parent state set PeerEventDownloadFailed", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_CDNError, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "piece result code is Code_CDNTaskDownloadFail and parent state set PeerEventDownloadFailed", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_CDNTaskDownloadFail, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "piece result code is Code_ClientPieceRequestFail", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_ClientPieceRequestFail, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateRunning)) + }, + }, + { + name: "piece result code is unknow", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_ClientPieceRequestFail, + DstPid: mockCDNPeerID, + }, + peer: resource.NewPeer(mockPeerID, mockTask, mockHost), + parent: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, parent *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + parent.FSM.SetState(resource.PeerStateRunning) + blocklist := set.NewSafeSet() + blocklist.Add(parent.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(parent.ID)).Return(parent, true).Times(1), + ms.ScheduleParent(gomock.Any(), gomock.Eq(peer), gomock.Eq(blocklist)).Return(nil, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, parent *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + assert.True(parent.FSM.Is(resource.PeerStateRunning)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + peerManager := resource.NewMockPeerManager(ctl) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + + tc.mock(tc.peer, tc.parent, peerManager, scheduler.EXPECT(), res.EXPECT(), peerManager.EXPECT()) + callback.PieceFail(context.Background(), tc.peer, tc.piece) + tc.expect(t, tc.peer, tc.parent) + }) + } +} + +func TestCallback_PeerSuccess(t *testing.T) { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if _, err := w.Write([]byte{1}); err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + })) + defer s.Close() + + tests := []struct { + name string + mock func(peer *resource.Peer) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "peer is tiny type and download piece success", + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateBackToSource) + peer.Task.ContentLength.Store(1) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.Task.DirectPiece, []byte{1}) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + }, + }, + { + name: "peer is tiny type and download piece failed", + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Empty(peer.Task.DirectPiece) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + }, + }, + { + name: "peer is small and state is PeerStateBackToSource", + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateBackToSource) + peer.Task.ContentLength.Store(resource.TinyFileSize + 1) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Empty(peer.Task.DirectPiece) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + }, + }, + { + name: "peer is small and state is PeerStateRunning", + mock: func(peer *resource.Peer) { + peer.FSM.SetState(resource.PeerStateRunning) + peer.Task.ContentLength.Store(resource.TinyFileSize + 1) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Empty(peer.Task.DirectPiece) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + + url, err := url.Parse(s.URL) + if err != nil { + t.Fatal(err) + } + + ip, rawPort, err := net.SplitHostPort(url.Host) + if err != nil { + t.Fatal(err) + } + + port, err := strconv.ParseInt(rawPort, 10, 32) + if err != nil { + t.Fatal(err) + } + + mockRawHost.Ip = ip + mockRawHost.DownPort = int32(port) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + peer := resource.NewPeer(mockPeerID, mockTask, mockHost) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + + tc.mock(peer) + callback.PeerSuccess(context.Background(), peer) + tc.expect(t, peer) + }) + } +} + +func TestCallback_PeerFail(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + peer *resource.Peer + child *resource.Peer + mock func(peer *resource.Peer, child *resource.Peer, ms *mocks.MockSchedulerMockRecorder) + expect func(t *testing.T, peer *resource.Peer, child *resource.Peer) + }{ + { + name: "peer state is PeerStateFailed", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, ms *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateFailed) + }, + expect: func(t *testing.T, peer *resource.Peer, child *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "peer state is PeerStateLeave", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, ms *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateLeave) + }, + expect: func(t *testing.T, peer *resource.Peer, child *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + { + name: "peer state is PeerStateRunning and children need to be scheduled", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, ms *mocks.MockSchedulerMockRecorder) { + peer.StoreChild(child) + peer.FSM.SetState(resource.PeerStateRunning) + child.FSM.SetState(resource.PeerStateRunning) + + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + ms.ScheduleParent(gomock.Any(), gomock.Eq(child), gomock.Eq(blocklist)).Return(nil, true).Times(1) + }, + expect: func(t *testing.T, peer *resource.Peer, child *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "peer state is PeerStateRunning and it has no children", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, ms *mocks.MockSchedulerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, peer *resource.Peer, child *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateFailed)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + + tc.mock(tc.peer, tc.child, scheduler.EXPECT()) + callback.PeerFail(context.Background(), tc.peer) + tc.expect(t, tc.peer, tc.child) + }) + } +} + +func TestCallback_PeerLeave(t *testing.T) { + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tests := []struct { + name string + peer *resource.Peer + child *resource.Peer + mock func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "peer state is PeerStatePending", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStatePending) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStatePending)) + }, + }, + { + name: "peer state is PeerStateReceivedSmall", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateReceivedSmall) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateReceivedSmall)) + }, + }, + { + name: "peer state is PeerStateReceivedNormal", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateReceivedNormal) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateReceivedNormal)) + }, + }, + { + name: "peer state is PeerStateRunning", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateRunning)) + }, + }, + { + name: "peer state is PeerStateBackToSource", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateBackToSource)) + }, + }, + { + name: "peer state is PeerStateLeave", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateLeave) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + { + name: "peer state is PeerStateSucceeded and children need to be scheduled", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.StoreChild(child) + peer.FSM.SetState(resource.PeerStateSucceeded) + child.FSM.SetState(resource.PeerStateRunning) + + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + gomock.InOrder( + ms.ScheduleParent(gomock.Any(), gomock.Eq(child), gomock.Eq(blocklist)).Return(nil, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Delete(gomock.Eq(peer.ID)).Return().Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + _, ok := peer.LoadParent() + assert.False(ok) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + { + name: "peer state is PeerStateSucceeded and it has no children", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateSucceeded) + + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Delete(gomock.Eq(peer.ID)).Return().Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + _, ok := peer.LoadParent() + assert.False(ok) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + { + name: "peer state is PeerStateFailed and children need to be scheduled", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.StoreChild(child) + peer.FSM.SetState(resource.PeerStateFailed) + child.FSM.SetState(resource.PeerStateRunning) + + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + gomock.InOrder( + ms.ScheduleParent(gomock.Any(), gomock.Eq(child), gomock.Eq(blocklist)).Return(nil, true).Times(1), + mr.PeerManager().Return(peerManager).Times(1), + mp.Delete(gomock.Eq(peer.ID)).Return().Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + _, ok := peer.LoadParent() + assert.False(ok) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + { + name: "peer state is PeerStateFailed and it has no children", + peer: resource.NewPeer(mockCDNPeerID, mockTask, mockHost), + child: resource.NewPeer(mockPeerID, mockTask, mockHost), + mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + peer.FSM.SetState(resource.PeerStateFailed) + + blocklist := set.NewSafeSet() + blocklist.Add(peer.ID) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Delete(gomock.Eq(peer.ID)).Return().Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + _, ok := peer.LoadParent() + assert.False(ok) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + peerManager := resource.NewMockPeerManager(ctl) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + + tc.mock(tc.peer, tc.child, peerManager, scheduler.EXPECT(), res.EXPECT(), peerManager.EXPECT()) + callback.PeerLeave(context.Background(), tc.peer) + tc.expect(t, tc.peer) + }) + } +} + +func TestCallback_TaskSuccess(t *testing.T) { + tests := []struct { + name string + result *rpcscheduler.PeerResult + mock func(task *resource.Task) + expect func(t *testing.T, task *resource.Task) + }{ + { + name: "task state is TaskStatePending", + result: &rpcscheduler.PeerResult{}, + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStatePending) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStatePending)) + }, + }, + { + name: "task state is TaskStateSucceeded", + result: &rpcscheduler.PeerResult{}, + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStateSucceeded) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStateSucceeded)) + }, + }, + { + name: "task state is TaskStateRunning", + result: &rpcscheduler.PeerResult{ + TotalPieceCount: 1, + ContentLength: 1, + }, + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStateRunning) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStateSucceeded)) + assert.Equal(task.TotalPieceCount.Load(), int32(1)) + assert.Equal(task.ContentLength.Load(), int64(1)) + }, + }, + { + name: "task state is TaskStateFailed", + result: &rpcscheduler.PeerResult{ + TotalPieceCount: 1, + ContentLength: 1, + }, + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStateFailed) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStateSucceeded)) + assert.Equal(task.TotalPieceCount.Load(), int32(1)) + assert.Equal(task.ContentLength.Load(), int64(1)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + task := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tc.mock(task) + callback.TaskSuccess(context.Background(), task, tc.result) + tc.expect(t, task) + }) + } +} + +func TestCallback_TaskFail(t *testing.T) { + tests := []struct { + name string + mock func(task *resource.Task) + expect func(t *testing.T, task *resource.Task) + }{ + { + name: "task state is TaskStatePending", + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStatePending) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStatePending)) + }, + }, + { + name: "task state is TaskStateSucceeded", + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStateSucceeded) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStateSucceeded)) + }, + }, + { + name: "task state is TaskStateRunning", + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStateRunning) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStateFailed)) + }, + }, + { + name: "task state is TaskStateFailed", + mock: func(task *resource.Task) { + task.FSM.SetState(resource.TaskStateFailed) + }, + expect: func(t *testing.T, task *resource.Task) { + assert := assert.New(t) + assert.True(task.FSM.Is(resource.TaskStateFailed)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + callback := newCallback(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler) + task := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + + tc.mock(task) + callback.TaskFail(context.Background(), task) + tc.expect(t, task) + }) + } +} diff --git a/scheduler/service/mocks/callback_mock.go b/scheduler/service/mocks/callback_mock.go new file mode 100644 index 00000000000..13547c36d2a --- /dev/null +++ b/scheduler/service/mocks/callback_mock.go @@ -0,0 +1,158 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/service/callback.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + set "d7y.io/dragonfly/v2/pkg/container/set" + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + resource "d7y.io/dragonfly/v2/scheduler/resource" + gomock "github.com/golang/mock/gomock" +) + +// MockCallback is a mock of Callback interface. +type MockCallback struct { + ctrl *gomock.Controller + recorder *MockCallbackMockRecorder +} + +// MockCallbackMockRecorder is the mock recorder for MockCallback. +type MockCallbackMockRecorder struct { + mock *MockCallback +} + +// NewMockCallback creates a new mock instance. +func NewMockCallback(ctrl *gomock.Controller) *MockCallback { + mock := &MockCallback{ctrl: ctrl} + mock.recorder = &MockCallbackMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCallback) EXPECT() *MockCallbackMockRecorder { + return m.recorder +} + +// BeginOfPiece mocks base method. +func (m *MockCallback) BeginOfPiece(arg0 context.Context, arg1 *resource.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "BeginOfPiece", arg0, arg1) +} + +// BeginOfPiece indicates an expected call of BeginOfPiece. +func (mr *MockCallbackMockRecorder) BeginOfPiece(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginOfPiece", reflect.TypeOf((*MockCallback)(nil).BeginOfPiece), arg0, arg1) +} + +// EndOfPiece mocks base method. +func (m *MockCallback) EndOfPiece(arg0 context.Context, arg1 *resource.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "EndOfPiece", arg0, arg1) +} + +// EndOfPiece indicates an expected call of EndOfPiece. +func (mr *MockCallbackMockRecorder) EndOfPiece(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndOfPiece", reflect.TypeOf((*MockCallback)(nil).EndOfPiece), arg0, arg1) +} + +// PeerFail mocks base method. +func (m *MockCallback) PeerFail(arg0 context.Context, arg1 *resource.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PeerFail", arg0, arg1) +} + +// PeerFail indicates an expected call of PeerFail. +func (mr *MockCallbackMockRecorder) PeerFail(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerFail", reflect.TypeOf((*MockCallback)(nil).PeerFail), arg0, arg1) +} + +// PeerLeave mocks base method. +func (m *MockCallback) PeerLeave(arg0 context.Context, arg1 *resource.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PeerLeave", arg0, arg1) +} + +// PeerLeave indicates an expected call of PeerLeave. +func (mr *MockCallbackMockRecorder) PeerLeave(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerLeave", reflect.TypeOf((*MockCallback)(nil).PeerLeave), arg0, arg1) +} + +// PeerSuccess mocks base method. +func (m *MockCallback) PeerSuccess(arg0 context.Context, arg1 *resource.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PeerSuccess", arg0, arg1) +} + +// PeerSuccess indicates an expected call of PeerSuccess. +func (mr *MockCallbackMockRecorder) PeerSuccess(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerSuccess", reflect.TypeOf((*MockCallback)(nil).PeerSuccess), arg0, arg1) +} + +// PieceFail mocks base method. +func (m *MockCallback) PieceFail(arg0 context.Context, arg1 *resource.Peer, arg2 *scheduler.PieceResult) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PieceFail", arg0, arg1, arg2) +} + +// PieceFail indicates an expected call of PieceFail. +func (mr *MockCallbackMockRecorder) PieceFail(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PieceFail", reflect.TypeOf((*MockCallback)(nil).PieceFail), arg0, arg1, arg2) +} + +// PieceSuccess mocks base method. +func (m *MockCallback) PieceSuccess(arg0 context.Context, arg1 *resource.Peer, arg2 *scheduler.PieceResult) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PieceSuccess", arg0, arg1, arg2) +} + +// PieceSuccess indicates an expected call of PieceSuccess. +func (mr *MockCallbackMockRecorder) PieceSuccess(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PieceSuccess", reflect.TypeOf((*MockCallback)(nil).PieceSuccess), arg0, arg1, arg2) +} + +// ScheduleParent mocks base method. +func (m *MockCallback) ScheduleParent(arg0 context.Context, arg1 *resource.Peer, arg2 set.SafeSet) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "ScheduleParent", arg0, arg1, arg2) +} + +// ScheduleParent indicates an expected call of ScheduleParent. +func (mr *MockCallbackMockRecorder) ScheduleParent(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleParent", reflect.TypeOf((*MockCallback)(nil).ScheduleParent), arg0, arg1, arg2) +} + +// TaskFail mocks base method. +func (m *MockCallback) TaskFail(arg0 context.Context, arg1 *resource.Task) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TaskFail", arg0, arg1) +} + +// TaskFail indicates an expected call of TaskFail. +func (mr *MockCallbackMockRecorder) TaskFail(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskFail", reflect.TypeOf((*MockCallback)(nil).TaskFail), arg0, arg1) +} + +// TaskSuccess mocks base method. +func (m *MockCallback) TaskSuccess(arg0 context.Context, arg1 *resource.Task, arg2 *scheduler.PeerResult) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TaskSuccess", arg0, arg1, arg2) +} + +// TaskSuccess indicates an expected call of TaskSuccess. +func (mr *MockCallbackMockRecorder) TaskSuccess(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TaskSuccess", reflect.TypeOf((*MockCallback)(nil).TaskSuccess), arg0, arg1, arg2) +} diff --git a/scheduler/service/mocks/service_mock.go b/scheduler/service/mocks/service_mock.go new file mode 100644 index 00000000000..e686fa25db0 --- /dev/null +++ b/scheduler/service/mocks/service_mock.go @@ -0,0 +1,162 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: scheduler/service/service.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + resource "d7y.io/dragonfly/v2/scheduler/resource" + scheduler0 "d7y.io/dragonfly/v2/scheduler/scheduler" + gomock "github.com/golang/mock/gomock" +) + +// MockService is a mock of Service interface. +type MockService struct { + ctrl *gomock.Controller + recorder *MockServiceMockRecorder +} + +// MockServiceMockRecorder is the mock recorder for MockService. +type MockServiceMockRecorder struct { + mock *MockService +} + +// NewMockService creates a new mock instance. +func NewMockService(ctrl *gomock.Controller) *MockService { + mock := &MockService{ctrl: ctrl} + mock.recorder = &MockServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockService) EXPECT() *MockServiceMockRecorder { + return m.recorder +} + +// CDN mocks base method. +func (m *MockService) CDN() resource.CDN { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CDN") + ret0, _ := ret[0].(resource.CDN) + return ret0 +} + +// CDN indicates an expected call of CDN. +func (mr *MockServiceMockRecorder) CDN() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CDN", reflect.TypeOf((*MockService)(nil).CDN)) +} + +// HandlePeer mocks base method. +func (m *MockService) HandlePeer(arg0 context.Context, arg1 *resource.Peer, arg2 *scheduler.PeerResult) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandlePeer", arg0, arg1, arg2) +} + +// HandlePeer indicates an expected call of HandlePeer. +func (mr *MockServiceMockRecorder) HandlePeer(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandlePeer", reflect.TypeOf((*MockService)(nil).HandlePeer), arg0, arg1, arg2) +} + +// HandlePeerLeave mocks base method. +func (m *MockService) HandlePeerLeave(ctx context.Context, peer *resource.Peer) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandlePeerLeave", ctx, peer) +} + +// HandlePeerLeave indicates an expected call of HandlePeerLeave. +func (mr *MockServiceMockRecorder) HandlePeerLeave(ctx, peer interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandlePeerLeave", reflect.TypeOf((*MockService)(nil).HandlePeerLeave), ctx, peer) +} + +// HandlePiece mocks base method. +func (m *MockService) HandlePiece(arg0 context.Context, arg1 *resource.Peer, arg2 *scheduler.PieceResult) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandlePiece", arg0, arg1, arg2) +} + +// HandlePiece indicates an expected call of HandlePiece. +func (mr *MockServiceMockRecorder) HandlePiece(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandlePiece", reflect.TypeOf((*MockService)(nil).HandlePiece), arg0, arg1, arg2) +} + +// LoadOrStoreHost mocks base method. +func (m *MockService) LoadOrStoreHost(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*resource.Host, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadOrStoreHost", arg0, arg1) + ret0, _ := ret[0].(*resource.Host) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// LoadOrStoreHost indicates an expected call of LoadOrStoreHost. +func (mr *MockServiceMockRecorder) LoadOrStoreHost(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrStoreHost", reflect.TypeOf((*MockService)(nil).LoadOrStoreHost), arg0, arg1) +} + +// LoadOrStorePeer mocks base method. +func (m *MockService) LoadOrStorePeer(arg0 context.Context, arg1 *scheduler.PeerTaskRequest, arg2 *resource.Task, arg3 *resource.Host) (*resource.Peer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadOrStorePeer", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*resource.Peer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// LoadOrStorePeer indicates an expected call of LoadOrStorePeer. +func (mr *MockServiceMockRecorder) LoadOrStorePeer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrStorePeer", reflect.TypeOf((*MockService)(nil).LoadOrStorePeer), arg0, arg1, arg2, arg3) +} + +// LoadPeer mocks base method. +func (m *MockService) LoadPeer(arg0 string) (*resource.Peer, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadPeer", arg0) + ret0, _ := ret[0].(*resource.Peer) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// LoadPeer indicates an expected call of LoadPeer. +func (mr *MockServiceMockRecorder) LoadPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadPeer", reflect.TypeOf((*MockService)(nil).LoadPeer), arg0) +} + +// RegisterTask mocks base method. +func (m *MockService) RegisterTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*resource.Task, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterTask", arg0, arg1) + ret0, _ := ret[0].(*resource.Task) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterTask indicates an expected call of RegisterTask. +func (mr *MockServiceMockRecorder) RegisterTask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterTask", reflect.TypeOf((*MockService)(nil).RegisterTask), arg0, arg1) +} + +// Scheduler mocks base method. +func (m *MockService) Scheduler() scheduler0.Scheduler { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Scheduler") + ret0, _ := ret[0].(scheduler0.Scheduler) + return ret0 +} + +// Scheduler indicates an expected call of Scheduler. +func (mr *MockServiceMockRecorder) Scheduler() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheduler", reflect.TypeOf((*MockService)(nil).Scheduler)) +} diff --git a/scheduler/service/service.go b/scheduler/service/service.go new file mode 100644 index 00000000000..d9d35559888 --- /dev/null +++ b/scheduler/service/service.go @@ -0,0 +1,232 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service + +import ( + "context" + "time" + + "d7y.io/dragonfly/v2/pkg/idgen" + "d7y.io/dragonfly/v2/pkg/rpc/base" + "d7y.io/dragonfly/v2/pkg/rpc/base/common" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + pkgsync "d7y.io/dragonfly/v2/pkg/sync" + "d7y.io/dragonfly/v2/scheduler/config" + "d7y.io/dragonfly/v2/scheduler/metrics" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler" +) + +type Service interface { + Scheduler() scheduler.Scheduler + CDN() resource.CDN + RegisterTask(context.Context, *rpcscheduler.PeerTaskRequest) (*resource.Task, error) + LoadOrStoreHost(context.Context, *rpcscheduler.PeerTaskRequest) (*resource.Host, bool) + LoadOrStorePeer(context.Context, *rpcscheduler.PeerTaskRequest, *resource.Task, *resource.Host) (*resource.Peer, bool) + LoadPeer(string) (*resource.Peer, bool) + HandlePiece(context.Context, *resource.Peer, *rpcscheduler.PieceResult) + HandlePeer(context.Context, *resource.Peer, *rpcscheduler.PeerResult) + HandlePeerLeave(ctx context.Context, peer *resource.Peer) +} + +type service struct { + // Resource interface + resource resource.Resource + + // Scheduler interface + scheduler scheduler.Scheduler + + // callback holds some actions, like task fail, piece success actions + callback Callback + + // Scheduelr service config + config *config.Config + + // Dynamic config + dynconfig config.DynconfigInterface + + // Key map mutex + kmu *pkgsync.Krwmutex +} + +func New( + cfg *config.Config, + resource resource.Resource, + scheduler scheduler.Scheduler, + dynconfig config.DynconfigInterface, +) Service { + // Initialize callback + callback := newCallback(cfg, resource, scheduler) + + return &service{ + resource: resource, + scheduler: scheduler, + callback: callback, + config: cfg, + dynconfig: dynconfig, + kmu: pkgsync.NewKrwmutex(), + } +} + +func (s *service) Scheduler() scheduler.Scheduler { + return s.scheduler +} + +func (s *service) CDN() resource.CDN { + return s.resource.CDN() +} + +func (s *service) RegisterTask(ctx context.Context, req *rpcscheduler.PeerTaskRequest) (*resource.Task, error) { + task := resource.NewTask(idgen.TaskID(req.Url, req.UrlMeta), req.Url, s.config.Scheduler.BackSourceCount, req.UrlMeta) + + s.kmu.Lock(task.ID) + defer s.kmu.Unlock(task.ID) + task, ok := s.resource.TaskManager().LoadOrStore(task) + if ok && (task.FSM.Is(resource.TaskStateRunning) || task.FSM.Is(resource.TaskStateSucceeded)) { + // Task is healthy and can be reused + task.UpdateAt.Store(time.Now()) + task.Log.Infof("reuse task and status is %s", task.FSM.Current()) + return task, nil + } + + // Trigger task + if err := task.FSM.Event(resource.TaskEventDownload); err != nil { + return nil, err + } + + // Start seed cdn task + go func() { + task.Log.Info("cdn start seed task") + peer, endOfPiece, err := s.resource.CDN().TriggerTask(context.Background(), task) + if err != nil { + task.Log.Errorf("trigger task failed: %v", err) + + // Update the peer status first to help task return the error code to the peer that is downloading + // If init cdn fails, peer is nil + s.callback.TaskFail(ctx, task) + if peer != nil { + s.callback.PeerFail(ctx, peer) + } + return + } + + // Update the task status first to help peer scheduling evaluation and scoring + s.callback.TaskSuccess(ctx, task, endOfPiece) + s.callback.PeerSuccess(ctx, peer) + }() + + return task, nil +} + +func (s *service) LoadOrStoreHost(ctx context.Context, req *rpcscheduler.PeerTaskRequest) (*resource.Host, bool) { + rawHost := req.PeerHost + host, ok := s.resource.HostManager().Load(rawHost.Uuid) + if !ok { + // Get scheduler cluster client config by manager + var options []resource.HostOption + if clientConfig, ok := s.dynconfig.GetSchedulerClusterClientConfig(); ok { + options = append(options, resource.WithUploadLoadLimit(int32(clientConfig.LoadLimit))) + } + + host = resource.NewHost(rawHost, options...) + s.resource.HostManager().Store(host) + host.Log.Info("create host") + return host, false + } + + host.Log.Info("host already exists") + return host, true +} + +func (s *service) LoadOrStorePeer(ctx context.Context, req *rpcscheduler.PeerTaskRequest, task *resource.Task, host *resource.Host) (*resource.Peer, bool) { + peer := resource.NewPeer(req.PeerId, task, host) + return s.resource.PeerManager().LoadOrStore(peer) +} + +func (s *service) LoadPeer(id string) (*resource.Peer, bool) { + return s.resource.PeerManager().Load(id) +} + +func (s *service) HandlePiece(ctx context.Context, peer *resource.Peer, piece *rpcscheduler.PieceResult) { + // Handle piece download successfully + if piece.Success { + peer.Log.Infof("receive successful piece: %#v %#v", piece, piece.PieceInfo) + s.callback.PieceSuccess(ctx, peer, piece) + + // Collect peer host traffic metrics + if s.config.Metrics != nil && s.config.Metrics.EnablePeerHost { + metrics.PeerHostTraffic.WithLabelValues("download", peer.Host.ID, peer.Host.IP).Add(float64(piece.PieceInfo.RangeSize)) + if p, ok := s.resource.PeerManager().Load(piece.DstPid); ok { + metrics.PeerHostTraffic.WithLabelValues("upload", p.Host.ID, p.Host.IP).Add(float64(piece.PieceInfo.RangeSize)) + } else { + peer.Log.Warnf("dst peer %s not found for piece %#v %#v", piece.DstPid, piece, piece.PieceInfo) + } + } + return + } + + // Handle begin of piece and end of piece + if piece.PieceInfo != nil { + if piece.PieceInfo.PieceNum == common.BeginOfPiece { + peer.Log.Infof("receive begin of piece: %#v %#v", piece, piece.PieceInfo) + s.callback.BeginOfPiece(ctx, peer) + return + } + + if piece.PieceInfo.PieceNum == common.EndOfPiece { + peer.Log.Infof("receive end of piece: %#v %#v", piece, piece.PieceInfo) + s.callback.EndOfPiece(ctx, peer) + return + } + } + + // Handle piece download failed + if piece.Code != base.Code_Success { + + // Wait for the client piece to be ready + // to prevent redundant logs from being printed + if piece.Code != base.Code_ClientWaitPieceReady { + peer.Log.Errorf("receive failed piece: %#v %#v", piece, piece.PieceInfo) + } + + s.callback.PieceFail(ctx, peer, piece) + return + } + + // Handle unknow piece + peer.Log.Warnf("receive unknow piece: %#v", piece) + return +} + +func (s *service) HandlePeer(ctx context.Context, peer *resource.Peer, req *rpcscheduler.PeerResult) { + if !req.Success { + if peer.Task.BackToSourcePeers.Contains(peer) { + s.callback.TaskFail(ctx, peer.Task) + } + s.callback.PeerFail(ctx, peer) + return + } + + if peer.Task.BackToSourcePeers.Contains(peer) { + s.callback.TaskSuccess(ctx, peer.Task, req) + } + s.callback.PeerSuccess(ctx, peer) +} + +func (s *service) HandlePeerLeave(ctx context.Context, peer *resource.Peer) { + s.callback.PeerLeave(ctx, peer) +} diff --git a/scheduler/service/service_test.go b/scheduler/service/service_test.go new file mode 100644 index 00000000000..5b9bd40ff8d --- /dev/null +++ b/scheduler/service/service_test.go @@ -0,0 +1,773 @@ +/* + * Copyright 2020 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package service + +import ( + "context" + "errors" + "reflect" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + + "d7y.io/dragonfly/v2/manager/types" + "d7y.io/dragonfly/v2/pkg/rpc/base" + "d7y.io/dragonfly/v2/pkg/rpc/base/common" + rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/scheduler/config" + configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" + "d7y.io/dragonfly/v2/scheduler/resource" + "d7y.io/dragonfly/v2/scheduler/scheduler" + "d7y.io/dragonfly/v2/scheduler/scheduler/mocks" +) + +func TestService_New(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, s interface{}) + }{ + { + name: "new service", + expect: func(t *testing.T, s interface{}) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(s).Elem().Name(), "service") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + resource := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + tc.expect(t, New(&config.Config{Scheduler: mockSchedulerConfig}, resource, scheduler, dynconfig)) + }) + } +} + +func TestService_Scheduler(t *testing.T) { + tests := []struct { + name string + expect func(t *testing.T, s scheduler.Scheduler) + }{ + { + name: "get scheduler interface", + expect: func(t *testing.T, s scheduler.Scheduler) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(s).Elem().Name(), "MockScheduler") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig) + tc.expect(t, svc.Scheduler()) + }) + } +} + +func TestService_CDN(t *testing.T) { + tests := []struct { + name string + mock func(cdn resource.CDN, mr *resource.MockResourceMockRecorder) + expect func(t *testing.T, c resource.CDN) + }{ + { + name: "get cdn interface", + mock: func(cdn resource.CDN, mr *resource.MockResourceMockRecorder) { + mr.CDN().Return(cdn).Times(1) + }, + expect: func(t *testing.T, c resource.CDN) { + assert := assert.New(t) + assert.Equal(reflect.TypeOf(c).Elem().Name(), "MockCDN") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + cdn := resource.NewMockCDN(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig) + tc.mock(cdn, res.EXPECT()) + tc.expect(t, svc.CDN()) + }) + } +} + +func TestService_RegisterTask(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerTaskRequest + run func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) + }{ + { + name: "task already exists and state is TaskStateRunning", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + }, + run: func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateRunning) + gomock.InOrder( + mr.TaskManager().Return(taskManager).Times(1), + mt.LoadOrStore(gomock.Any()).Return(mockTask, true).Times(1), + ) + + task, err := svc.RegisterTask(context.Background(), req) + assert := assert.New(t) + assert.NoError(err) + assert.EqualValues(mockTask, task) + }, + }, + { + name: "task already exists and state is TaskStateSucceeded", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + }, + run: func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) { + mockTask.FSM.SetState(resource.TaskStateSucceeded) + gomock.InOrder( + mr.TaskManager().Return(taskManager).Times(1), + mt.LoadOrStore(gomock.Any()).Return(mockTask, true).Times(1), + ) + + task, err := svc.RegisterTask(context.Background(), req) + assert := assert.New(t) + assert.NoError(err) + assert.EqualValues(mockTask, task) + }, + }, + { + name: "task state is TaskStatePending", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + }, + run: func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) { + var wg sync.WaitGroup + wg.Add(2) + defer wg.Wait() + + mockTask.FSM.SetState(resource.TaskStatePending) + gomock.InOrder( + mr.TaskManager().Return(taskManager).Times(1), + mt.LoadOrStore(gomock.Any()).Return(mockTask, true).Times(1), + mr.CDN().Do(func() { wg.Done() }).Return(cdn).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, nil).Times(1), + ) + + task, err := svc.RegisterTask(context.Background(), req) + assert := assert.New(t) + assert.NoError(err) + assert.EqualValues(mockTask, task) + }, + }, + { + name: "task state is TaskStateFailed", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + }, + run: func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) { + var wg sync.WaitGroup + wg.Add(2) + defer wg.Wait() + + mockTask.FSM.SetState(resource.TaskStateFailed) + gomock.InOrder( + mr.TaskManager().Return(taskManager).Times(1), + mt.LoadOrStore(gomock.Any()).Return(mockTask, true).Times(1), + mr.CDN().Do(func() { wg.Done() }).Return(cdn).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, nil).Times(1), + ) + + task, err := svc.RegisterTask(context.Background(), req) + assert := assert.New(t) + assert.NoError(err) + assert.EqualValues(mockTask, task) + }, + }, + { + name: "task state is TaskStatePending, but trigger cdn failed", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + }, + run: func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) { + var wg sync.WaitGroup + wg.Add(2) + defer wg.Wait() + + mockTask.FSM.SetState(resource.TaskStatePending) + gomock.InOrder( + mr.TaskManager().Return(taskManager).Times(1), + mt.LoadOrStore(gomock.Any()).Return(mockTask, true).Times(1), + mr.CDN().Do(func() { wg.Done() }).Return(cdn).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, errors.New("foo")).Times(1), + ) + + task, err := svc.RegisterTask(context.Background(), req) + assert := assert.New(t) + assert.NoError(err) + assert.EqualValues(mockTask, task) + }, + }, + { + name: "task state is TaskStateFailed, but trigger cdn failed", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + }, + run: func(svc Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, cdn resource.CDN, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mc *resource.MockCDNMockRecorder) { + var wg sync.WaitGroup + wg.Add(2) + defer wg.Wait() + + mockTask.FSM.SetState(resource.TaskStateFailed) + gomock.InOrder( + mr.TaskManager().Return(taskManager).Times(1), + mt.LoadOrStore(gomock.Any()).Return(mockTask, true).Times(1), + mr.CDN().Do(func() { wg.Done() }).Return(cdn).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, errors.New("foo")).Times(1), + ) + + task, err := svc.RegisterTask(context.Background(), req) + assert := assert.New(t) + assert.NoError(err) + assert.EqualValues(mockTask, task) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig) + taskManager := resource.NewMockTaskManager(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + cdn := resource.NewMockCDN(ctl) + tc.run(svc, tc.req, mockTask, mockPeer, taskManager, cdn, res.EXPECT(), taskManager.EXPECT(), cdn.EXPECT()) + }) + } +} + +func TestService_LoadOrStoreHost(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerTaskRequest + mock func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + expect func(t *testing.T, host *resource.Host, loaded bool) + }{ + { + name: "host already exists", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + PeerHost: mockRawHost, + }, + mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + mr.HostManager().Return(hostManager).Times(1), + mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(mockHost, true).Times(1), + ) + }, + expect: func(t *testing.T, host *resource.Host, loaded bool) { + assert := assert.New(t) + assert.Equal(host.ID, mockRawHost.Uuid) + assert.True(loaded) + }, + }, + { + name: "host does not exist", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + PeerHost: mockRawHost, + }, + mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + mr.HostManager().Return(hostManager).Times(1), + mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(nil, false).Times(1), + md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{LoadLimit: 10}, true).Times(1), + mr.HostManager().Return(hostManager).Times(1), + mh.Store(gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, host *resource.Host, loaded bool) { + assert := assert.New(t) + assert.Equal(host.ID, mockRawHost.Uuid) + assert.Equal(host.UploadLoadLimit.Load(), int32(10)) + assert.False(loaded) + }, + }, + { + name: "host does not exist and dynconfig get cluster client config failed", + req: &rpcscheduler.PeerTaskRequest{ + Url: mockTaskURL, + UrlMeta: mockTaskURLMeta, + PeerHost: mockRawHost, + }, + mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + gomock.InOrder( + mr.HostManager().Return(hostManager).Times(1), + mh.Load(gomock.Eq(mockRawHost.Uuid)).Return(nil, false).Times(1), + md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{}, false).Times(1), + mr.HostManager().Return(hostManager).Times(1), + mh.Store(gomock.Any()).Return().Times(1), + ) + }, + expect: func(t *testing.T, host *resource.Host, loaded bool) { + assert := assert.New(t) + assert.Equal(host.ID, mockRawHost.Uuid) + assert.False(loaded) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig) + hostManager := resource.NewMockHostManager(ctl) + mockHost := resource.NewHost(mockRawHost) + + tc.mock(mockHost, hostManager, res.EXPECT(), hostManager.EXPECT(), dynconfig.EXPECT()) + host, loaded := svc.LoadOrStoreHost(context.Background(), tc.req) + tc.expect(t, host, loaded) + }) + } +} + +func TestService_LoadOrStorePeer(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerTaskRequest + mock func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *resource.Peer, loaded bool) + }{ + { + name: "peer already exists", + req: &rpcscheduler.PeerTaskRequest{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, loaded bool) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(loaded) + }, + }, + { + name: "peer does not exists", + req: &rpcscheduler.PeerTaskRequest{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.LoadOrStore(gomock.Any()).Return(mockPeer, false).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, loaded bool) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.False(loaded) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig) + peerManager := resource.NewMockPeerManager(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer, peerManager, res.EXPECT(), peerManager.EXPECT()) + peer, loaded := svc.LoadOrStorePeer(context.Background(), tc.req, mockTask, mockHost) + tc.expect(t, peer, loaded) + }) + } +} + +func TestService_LoadPeer(t *testing.T) { + tests := []struct { + name string + req *rpcscheduler.PeerTaskRequest + mock func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *resource.Peer, ok bool) + }{ + { + name: "peer already exists", + req: &rpcscheduler.PeerTaskRequest{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, ok bool) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.Equal(ok, true) + }, + }, + { + name: "peer does not exists", + req: &rpcscheduler.PeerTaskRequest{ + PeerId: mockPeerID, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(mockPeerID)).Return(nil, false).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer, ok bool) { + assert := assert.New(t) + assert.Equal(ok, false) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig) + peerManager := resource.NewMockPeerManager(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer, peerManager, res.EXPECT(), peerManager.EXPECT()) + peer, ok := svc.LoadPeer(mockPeerID) + tc.expect(t, peer, ok) + }) + } +} + +func TestService_HandlePiece(t *testing.T) { + tests := []struct { + name string + piece *rpcscheduler.PieceResult + mock func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "piece success", + piece: &rpcscheduler.PieceResult{ + DstPid: mockCDNPeerID, + PieceInfo: &base.PieceInfo{ + PieceNum: 0, + }, + BeginTime: uint64(time.Now().Unix()), + EndTime: uint64(time.Now().Add(1 * time.Second).Unix()), + Success: true, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(mockCDNPeerID)).Return(mockPeer, true).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.Equal(peer.Pieces.Count(), uint(1)) + assert.Equal(peer.PieceCosts(), []int64{1}) + }, + }, + { + name: "piece success and load peer failed", + piece: &rpcscheduler.PieceResult{ + DstPid: mockCDNPeerID, + PieceInfo: &base.PieceInfo{ + PieceNum: 0, + }, + BeginTime: uint64(time.Now().Unix()), + EndTime: uint64(time.Now().Add(1 * time.Second).Unix()), + Success: true, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Load(gomock.Eq(mockCDNPeerID)).Return(nil, false).Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.Equal(peer.Pieces.Count(), uint(1)) + assert.Equal(peer.PieceCosts(), []int64{1}) + }, + }, + { + name: "receive begin of piece", + piece: &rpcscheduler.PieceResult{ + PieceInfo: &base.PieceInfo{ + PieceNum: common.BeginOfPiece, + }, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mockPeer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateBackToSource)) + }, + }, + { + name: "receive begin of piece", + piece: &rpcscheduler.PieceResult{ + PieceInfo: &base.PieceInfo{ + PieceNum: common.EndOfPiece, + }, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStatePending)) + }, + }, + { + name: "receive failed piece", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_CDNError, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mockPeer.FSM.SetState(resource.PeerStateBackToSource) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateBackToSource)) + }, + }, + { + name: "receive unknow piece", + piece: &rpcscheduler.PieceResult{ + Code: base.Code_Success, + }, + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStatePending)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig) + peerManager := resource.NewMockPeerManager(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer, peerManager, res.EXPECT(), peerManager.EXPECT()) + svc.HandlePiece(context.Background(), mockPeer, tc.piece) + tc.expect(t, mockPeer) + }) + } +} + +func TestService_HandlePeer(t *testing.T) { + tests := []struct { + name string + result *rpcscheduler.PeerResult + mock func(mockPeer *resource.Peer) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "peer failed", + result: &rpcscheduler.PeerResult{ + Success: false, + }, + mock: func(mockPeer *resource.Peer) { + mockPeer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateFailed)) + }, + }, + { + name: "peer back-to-source failed", + result: &rpcscheduler.PeerResult{ + Success: false, + }, + mock: func(mockPeer *resource.Peer) { + mockPeer.FSM.SetState(resource.PeerStateRunning) + mockPeer.Task.FSM.SetState(resource.TaskStateRunning) + mockPeer.Task.BackToSourcePeers.Add(mockPeer) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateFailed)) + assert.True(peer.Task.FSM.Is(resource.TaskStateFailed)) + }, + }, + { + name: "peer success", + result: &rpcscheduler.PeerResult{ + Success: true, + }, + mock: func(mockPeer *resource.Peer) { + mockPeer.FSM.SetState(resource.PeerStateRunning) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + }, + }, + { + name: "peer back-to-source success", + result: &rpcscheduler.PeerResult{ + Success: true, + }, + mock: func(mockPeer *resource.Peer) { + mockPeer.FSM.SetState(resource.PeerStateRunning) + mockPeer.Task.FSM.SetState(resource.TaskStateRunning) + mockPeer.Task.BackToSourcePeers.Add(mockPeer) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateSucceeded)) + assert.True(peer.Task.FSM.Is(resource.TaskStateSucceeded)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer) + svc.HandlePeer(context.Background(), mockPeer, tc.result) + tc.expect(t, mockPeer) + }) + } +} + +func TestService_HandlePeerLeave(t *testing.T) { + tests := []struct { + name string + mock func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) + expect func(t *testing.T, peer *resource.Peer) + }{ + { + name: "peer leave", + mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { + mockPeer.FSM.SetState(resource.PeerStateSucceeded) + gomock.InOrder( + mr.PeerManager().Return(peerManager).Times(1), + mp.Delete(gomock.Eq(mockPeerID)).Return().Times(1), + ) + }, + expect: func(t *testing.T, peer *resource.Peer) { + assert := assert.New(t) + assert.Equal(peer.ID, mockPeerID) + assert.True(peer.FSM.Is(resource.PeerStateLeave)) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctl := gomock.NewController(t) + defer ctl.Finish() + scheduler := mocks.NewMockScheduler(ctl) + res := resource.NewMockResource(ctl) + dynconfig := configmocks.NewMockDynconfigInterface(ctl) + svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig) + peerManager := resource.NewMockPeerManager(ctl) + mockHost := resource.NewHost(mockRawHost) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskBackToSourceLimit, mockTaskURLMeta) + mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) + + tc.mock(mockPeer, peerManager, res.EXPECT(), peerManager.EXPECT()) + svc.HandlePeerLeave(context.Background(), mockPeer) + tc.expect(t, mockPeer) + }) + } +} diff --git a/scheduler/supervisor/cdn.go b/scheduler/supervisor/cdn.go deleted file mode 100644 index b3abb281726..00000000000 --- a/scheduler/supervisor/cdn.go +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -//go:generate mockgen -destination ./mocks/cdn_mock.go -package mocks d7y.io/dragonfly/v2/scheduler/supervisor CDNDynmaicClient - -package supervisor - -import ( - "context" - "fmt" - "io" - "net/http" - "reflect" - "sync" - - "github.com/pkg/errors" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc" - - "d7y.io/dragonfly/v2/internal/dferrors" - logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/internal/dfnet" - "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - cdnclient "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" - "d7y.io/dragonfly/v2/scheduler/config" -) - -var ( - ErrCDNClientUninitialized = errors.New("cdn client is not initialized") - ErrCDNRegisterFail = errors.New("cdn task register failed") - ErrCDNDownloadFail = errors.New("cdn task download failed") - ErrCDNUnknown = errors.New("cdn obtain seed encounter unknown err") - ErrCDNInvokeFail = errors.New("invoke cdn interface failed") - ErrInitCDNPeerFail = errors.New("init cdn peer failed") -) - -var tracer = otel.Tracer("scheduler-cdn") - -type CDN interface { - // CetClient get cdn grpc client - GetClient() CDNDynmaicClient - - // StartSeedTask start seed cdn task - StartSeedTask(context.Context, *Task) (*Peer, error) -} - -type cdn struct { - // Client is cdn dynamic client - client CDNDynmaicClient - // peerManager is peer manager - peerManager PeerManager - // hostManager is host manager - hostManager HostManager -} - -func NewCDN(client CDNDynmaicClient, peerManager PeerManager, hostManager HostManager) CDN { - return &cdn{ - client: client, - peerManager: peerManager, - hostManager: hostManager, - } -} - -func (c *cdn) GetClient() CDNDynmaicClient { - return c.client -} - -func (c *cdn) StartSeedTask(ctx context.Context, task *Task) (*Peer, error) { - logger.Infof("start seed task %s", task.ID) - defer func() { - logger.Infof("finish seed task %s, task status is %s", task.ID, task.GetStatus()) - }() - var seedSpan trace.Span - ctx, seedSpan = tracer.Start(ctx, config.SpanTriggerCDNSeed) - defer seedSpan.End() - seedRequest := &cdnsystem.SeedRequest{ - TaskId: task.ID, - Url: task.URL, - UrlMeta: task.URLMeta, - } - seedSpan.SetAttributes(config.AttributeCDNSeedRequest.String(seedRequest.String())) - - if c.client == nil { - err := ErrCDNClientUninitialized - seedSpan.RecordError(err) - seedSpan.SetAttributes(config.AttributePeerDownloadSuccess.Bool(false)) - return nil, err - } - - stream, err := c.client.ObtainSeeds(trace.ContextWithSpan(context.Background(), seedSpan), seedRequest) - if err != nil { - seedSpan.RecordError(err) - seedSpan.SetAttributes(config.AttributePeerDownloadSuccess.Bool(false)) - if cdnErr, ok := err.(*dferrors.DfError); ok { - logger.Errorf("failed to obtain cdn seed: %v", cdnErr) - switch cdnErr.Code { - case base.Code_CDNTaskRegistryFail: - return nil, errors.Wrap(ErrCDNRegisterFail, "obtain seeds") - case base.Code_CDNTaskDownloadFail: - return nil, errors.Wrapf(ErrCDNDownloadFail, "obtain seeds") - default: - return nil, errors.Wrapf(ErrCDNUnknown, "obtain seeds") - } - } - return nil, errors.Wrapf(ErrCDNInvokeFail, "obtain seeds from cdn: %v", err) - } - - return c.receivePiece(ctx, task, stream) -} - -func (c *cdn) receivePiece(ctx context.Context, task *Task, stream *cdnclient.PieceSeedStream) (*Peer, error) { - span := trace.SpanFromContext(ctx) - var initialized bool - var cdnPeer *Peer - for { - piece, err := stream.Recv() - if err != nil { - if err == io.EOF { - logger.Infof("task %s connection closed", task.ID) - if cdnPeer != nil && task.GetStatus() == TaskStatusSuccess { - span.SetAttributes(config.AttributePeerDownloadSuccess.Bool(true)) - return cdnPeer, nil - } - return nil, errors.Errorf("cdn stream receive EOF but task status is %s", task.GetStatus()) - } - - span.RecordError(err) - span.SetAttributes(config.AttributePeerDownloadSuccess.Bool(false)) - logger.Errorf("task %s add piece err %v", task.ID, err) - if recvErr, ok := err.(*dferrors.DfError); ok { - switch recvErr.Code { - case base.Code_CDNTaskRegistryFail: - return nil, errors.Wrapf(ErrCDNRegisterFail, "receive piece") - case base.Code_CDNTaskDownloadFail: - return nil, errors.Wrapf(ErrCDNDownloadFail, "receive piece") - default: - return nil, errors.Wrapf(ErrCDNUnknown, "recive piece") - } - } - return nil, errors.Wrapf(ErrCDNInvokeFail, "receive piece from cdn: %v", err) - } - - if piece != nil { - logger.Infof("task %s add piece %v", task.ID, piece) - if !initialized { - cdnPeer, err = c.initCDNPeer(ctx, task, piece) - if err != nil { - return nil, err - } - - logger.Infof("task %s init cdn peer %v", task.ID, cdnPeer) - if !task.CanSchedule() { - task.SetStatus(TaskStatusSeeding) - } - initialized = true - } - - span.AddEvent(config.EventCDNPieceReceived, trace.WithAttributes(config.AttributePieceReceived.String(piece.String()))) - cdnPeer.Touch() - if piece.Done { - logger.Infof("task %s receive pieces finish", task.ID) - task.TotalPieceCount.Store(piece.TotalPieceCount) - task.ContentLength.Store(piece.ContentLength) - task.SetStatus(TaskStatusSuccess) - cdnPeer.SetStatus(PeerStatusSuccess) - if task.ContentLength.Load() <= TinyFileSize { - data, err := downloadTinyFile(ctx, task, cdnPeer.Host) - if err == nil && len(data) == int(task.ContentLength.Load()) { - task.DirectPiece = data - } - } - span.SetAttributes(config.AttributePeerDownloadSuccess.Bool(true)) - span.SetAttributes(config.AttributeContentLength.Int64(task.ContentLength.Load())) - return cdnPeer, nil - } - - cdnPeer.UpdateProgress(piece.PieceInfo.PieceNum+1, 0) - task.GetOrAddPiece(piece.PieceInfo) - } - } -} - -func (c *cdn) initCDNPeer(ctx context.Context, task *Task, ps *cdnsystem.PieceSeed) (*Peer, error) { - span := trace.SpanFromContext(ctx) - span.AddEvent(config.EventCreateCDNPeer) - - var host *Host - var ok bool - peer, ok := c.peerManager.Get(ps.PeerId) - if !ok { - if host, ok = c.hostManager.Get(ps.HostUuid); !ok { - if host, ok = c.client.GetHost(ps.HostUuid); !ok { - logger.Errorf("cannot find cdn host %s", ps.HostUuid) - return nil, errors.Wrapf(ErrInitCDNPeerFail, "cannot find host %s", ps.HostUuid) - } - c.hostManager.Add(host) - } - peer = NewPeer(ps.PeerId, task, host) - } - - peer.SetStatus(PeerStatusRunning) - c.peerManager.Add(peer) - peer.Task.Log().Debugf("cdn peer %s has been added", peer.ID) - return peer, nil -} - -func downloadTinyFile(ctx context.Context, task *Task, cdnHost *Host) ([]byte, error) { - // download url: http://${host}:${port}/download/${taskIndex}/${taskID}?peerId=scheduler; - // taskIndex is the first three characters of taskID - url := fmt.Sprintf("http://%s:%d/download/%s/%s?peerId=scheduler", - cdnHost.IP, cdnHost.DownloadPort, task.ID[:3], task.ID) - - span := trace.SpanFromContext(ctx) - span.AddEvent(config.EventDownloadTinyFile, trace.WithAttributes(config.AttributeDownloadFileURL.String(url))) - - resp, err := http.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - data, err := io.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return data, nil -} - -type CDNDynmaicClient interface { - // cdnclient is cdn grpc client - cdnclient.CdnClient - // Observer is dynconfig observer - config.Observer - // Get cdn host - GetHost(hostID string) (*Host, bool) -} - -type cdnDynmaicClient struct { - cdnclient.CdnClient - data *config.DynconfigData - hosts map[string]*Host - lock sync.RWMutex - peerManager PeerManager - hostManager HostManager -} - -func NewCDNDynmaicClient(dynConfig config.DynconfigInterface, peerManager PeerManager, hostManager HostManager, opts []grpc.DialOption) (CDNDynmaicClient, error) { - config, err := dynConfig.Get() - if err != nil { - return nil, err - } - - client, err := cdnclient.GetClientByAddr(cdnsToNetAddrs(config.CDNs), opts...) - if err != nil { - return nil, err - } - - dc := &cdnDynmaicClient{ - CdnClient: client, - data: config, - hosts: cdnsToHosts(config.CDNs), - peerManager: peerManager, - hostManager: hostManager, - } - - dynConfig.Register(dc) - return dc, nil -} - -func (dc *cdnDynmaicClient) GetHost(id string) (*Host, bool) { - dc.lock.RLock() - defer dc.lock.RUnlock() - - host, ok := dc.hosts[id] - if !ok { - return nil, false - } - - return host, true -} - -func (dc *cdnDynmaicClient) OnNotify(data *config.DynconfigData) { - if reflect.DeepEqual(dc.data, data) { - return - } - - dc.lock.Lock() - defer dc.lock.Unlock() - - // If cdn is deleted, clear cdn related information - hosts := cdnsToHosts(data.CDNs) - logger.Infof("cdn hosts %#v update to %#v", dc.hosts, hosts) - - for _, v := range dc.hosts { - id := idgen.CDNHostID(v.HostName, v.RPCPort) - for _, host := range hosts { - if v.HostName != host.HostName { - continue - } - - if v.RPCPort != host.RPCPort { - continue - } - - if v.IP == host.IP { - continue - } - - v.Log().Info("host has been deleted") - if host, ok := dc.hostManager.Get(id); ok { - host.GetPeers().Range(func(_, value interface{}) bool { - if peer, ok := value.(*Peer); ok { - peer.Log().Info("cdn peer left because cdn host was deleted") - peer.Leave() - } - - return true - }) - v.Log().Info("delete cdn host from host manager because cdn host was deleted") - dc.hostManager.Delete(id) - } else { - v.Log().Warn("can not found host from host manager") - } - } - } - - dc.data = data - dc.hosts = hosts - dc.UpdateState(cdnsToNetAddrs(data.CDNs)) -} - -// cdnsToHosts coverts []*config.CDN to map[string]*Host. -func cdnsToHosts(cdns []*config.CDN) map[string]*Host { - hosts := map[string]*Host{} - for _, cdn := range cdns { - var options []HostOption - if config, ok := cdn.GetCDNClusterConfig(); ok { - options = []HostOption{ - WithNetTopology(config.NetTopology), - WithTotalUploadLoad(config.LoadLimit), - } - } - - id := idgen.CDNHostID(cdn.HostName, cdn.Port) - hosts[id] = NewCDNHost(id, cdn.IP, cdn.HostName, cdn.Port, cdn.DownloadPort, cdn.SecurityGroup, cdn.Location, cdn.IDC, options...) - } - - return hosts -} - -// cdnsToNetAddrs coverts []*config.CDN to []dfnet.NetAddr. -func cdnsToNetAddrs(cdns []*config.CDN) []dfnet.NetAddr { - netAddrs := make([]dfnet.NetAddr, 0, len(cdns)) - for _, cdn := range cdns { - netAddrs = append(netAddrs, dfnet.NetAddr{ - Type: dfnet.TCP, - Addr: fmt.Sprintf("%s:%d", cdn.IP, cdn.Port), - }) - } - - return netAddrs -} diff --git a/scheduler/supervisor/cdn_test.go b/scheduler/supervisor/cdn_test.go deleted file mode 100644 index 85b59048f69..00000000000 --- a/scheduler/supervisor/cdn_test.go +++ /dev/null @@ -1,521 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package supervisor_test - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "reflect" - "testing" - - "github.com/agiledragon/gomonkey/v2" - "github.com/golang/mock/gomock" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "d7y.io/dragonfly/v2/internal/dferrors" - logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" - "d7y.io/dragonfly/v2/scheduler/supervisor" - "d7y.io/dragonfly/v2/scheduler/supervisor/mocks" -) - -var ( - mockPieceSeedStream = &client.PieceSeedStream{} - mockPieceSeed = &cdnsystem.PieceSeed{} - mockHost = &supervisor.Host{} - mockTask = &supervisor.Task{} - mockPeer = &supervisor.Peer{} - mockLogger = &logger.SugaredLoggerOnWith{} -) - -func TestCDN_Nil(t *testing.T) { - tests := []struct { - name string - status supervisor.TaskStatus - mock func(t *testing.T) (supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) - expect func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) - }{ - { - name: "nil client", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockTask.ID = "mocktask" - - patch := &gomonkey.Patches{} - return mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.NotNil(cdn) - assert.Nil(cdn.GetClient()) - assert.Nil(peer) - assert.Equal(supervisor.ErrCDNClientUninitialized, err) - - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - mockPeerManager, mockHostManager, patch := tc.mock(t) - cdn := supervisor.NewCDN(nil, mockPeerManager, mockHostManager) - mockTask.SetStatus(tc.status) - peer, err := cdn.StartSeedTask(context.Background(), mockTask) - tc.expect(t, cdn, peer, err) - patch.Reset() - }) - } -} - -func TestCDN_Initial(t *testing.T) { - tests := []struct { - name string - task *supervisor.Task - status supervisor.TaskStatus - mock func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) - expect func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) - }{ - { - name: "ObtainSeeds cause CDNTaskRegistryFail", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - err := dferrors.New(base.Code_CDNTaskRegistryFail, "mockError") - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(nil, err).AnyTimes() - - patch := &gomonkey.Patches{} - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(supervisor.ErrCDNRegisterFail, errors.Cause(err)) - }, - }, - { - name: "ObtainSeeds cause CDNTaskDownloadFail", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - err := dferrors.New(base.Code_CDNTaskDownloadFail, "mockError") - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(nil, err).AnyTimes() - - patch := &gomonkey.Patches{} - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(supervisor.ErrCDNDownloadFail, errors.Cause(err)) - }, - }, - { - name: "ObtainSeeds cause other errors", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - err := dferrors.New(114514, "mockError") - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(nil, err).AnyTimes() - - patch := &gomonkey.Patches{} - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(supervisor.ErrCDNUnknown, errors.Cause(err)) - }, - }, - { - name: "ObtainSeeds cause invoke client failed", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - err := fmt.Errorf("invoke error") - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(nil, err).AnyTimes() - - patch := &gomonkey.Patches{} - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Equal(supervisor.ErrCDNInvokeFail, errors.Cause(err)) - }, - }, - { - name: "failed for EOF and TaskStatusWaiting", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{nil, io.EOF}}, - } - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(err) - }, - }, - { - name: "success for EOF and TaskStatusSuccess", - status: supervisor.TaskStatusSuccess, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - mockPeerManager.EXPECT().Get(gomock.Any()).Return(nil, false).AnyTimes() - mockPeerManager.EXPECT().Add(gomock.Any()).Return().AnyTimes() - mockHostManager.EXPECT().Get(gomock.Any()).Return(mockHost, true).AnyTimes() - - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockLogger), "Debugf", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - - patch.ApplyMethodSeq(reflect.TypeOf(mockTask), "GetOrAddPiece", - []gomonkey.OutputCell{{Values: gomonkey.Params{nil, true}}}) - patch.ApplyMethodSeq(reflect.TypeOf(mockTask), "Log", - []gomonkey.OutputCell{{Values: gomonkey.Params{mockLogger}}}) - - patch.ApplyMethodSeq(reflect.TypeOf(mockPeer), "Touch", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - patch.ApplyMethodSeq(reflect.TypeOf(mockPeer), "UpdateProgress", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - - newPeerRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPeer}}, - } - patch.ApplyFuncSeq(supervisor.NewPeer, newPeerRet) - - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPieceSeed, nil}, Times: 1}, - {Values: gomonkey.Params{nil, io.EOF}, Times: 1}, - } - patch.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - - mockPieceSeed.PieceInfo = &base.PieceInfo{PieceNum: 0} - mockPeer.Task = mockTask - mockPeer.ID = "114514" - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Equal(mockPeer, peer) - assert.Nil(err) - }, - }, - { - name: "receivePiece cause CDNTaskRegistryFail", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - - err := dferrors.New(base.Code_CDNTaskRegistryFail, "mockError") - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{nil, err}}, - } - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(supervisor.ErrCDNRegisterFail, errors.Cause(err)) - }, - }, - { - name: "receivePiece cause CDNTaskDownloadFail", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - - err := dferrors.New(base.Code_CDNTaskDownloadFail, "mockError") - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{nil, err}}, - } - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(supervisor.ErrCDNDownloadFail, errors.Cause(err)) - }, - }, - { - name: "receivePiece cause other errors", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - - err := dferrors.New(114514, "mockError") - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{nil, err}}, - } - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Error(supervisor.ErrCDNUnknown, errors.Cause(err)) - }, - }, - { - name: "receivePiece cause invoke client failed", - - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - - err := fmt.Errorf("invoke error") - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{nil, err}}, - } - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Equal(supervisor.ErrCDNInvokeFail, errors.Cause(err)) - }, - }, - { - name: "initCDNPeer peer is nil", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - mockCDNDynmaicClient.EXPECT().GetHost(gomock.Any()).Return(nil, false).AnyTimes() - mockPeerManager.EXPECT().Get(gomock.Any()).Return(nil, false).AnyTimes() - mockHostManager.EXPECT().Get(gomock.Any()).Return(nil, false).AnyTimes() - - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPieceSeed, nil}}, - } - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Nil(peer) - assert.Equal(supervisor.ErrInitCDNPeerFail, errors.Cause(err)) - }, - }, - { - name: "downloadTinyFile http.Get error (restore host from hostManager)", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - mockCDNDynmaicClient.EXPECT().GetHost(gomock.Any()).Return(nil, false).AnyTimes() - mockPeerManager.EXPECT().Get(gomock.Any()).Return(nil, false).AnyTimes() - mockPeerManager.EXPECT().Add(gomock.Any()).Return().AnyTimes() - mockHostManager.EXPECT().Get(gomock.Any()).Return(mockHost, true).AnyTimes() - - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockLogger), "Debugf", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - - patch.ApplyMethodSeq(reflect.TypeOf(mockTask), "Log", - []gomonkey.OutputCell{{Values: gomonkey.Params{mockLogger}}}) - - err := fmt.Errorf("http error") - httpRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{nil, err}}, - } - - patch.ApplyFuncSeq(http.Get, httpRet) - - patch.ApplyMethodSeq(reflect.TypeOf(mockPeer), "Touch", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - - newPeerRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPeer}}, - } - patch.ApplyFuncSeq(supervisor.NewPeer, newPeerRet) - - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPieceSeed, nil}}, - } - patch.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - - mockPieceSeed.Done = true - mockHost.IP = "0.0.0.0" - mockHost.DownloadPort = 1919 - mockTask.ID = "1919810" - mockPeer.Host = mockHost - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Equal(mockPeer, peer) - assert.Nil(err) - }, - }, - { - name: "downloadTinyFile success (restore host from client)", - status: supervisor.TaskStatusWaiting, - mock: func(t *testing.T) (supervisor.CDNDynmaicClient, supervisor.PeerManager, supervisor.HostManager, *gomonkey.Patches) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - mockCDNDynmaicClient := mocks.NewMockCDNDynmaicClient(ctl) - mockPeerManager := mocks.NewMockPeerManager(ctl) - mockHostManager := mocks.NewMockHostManager(ctl) - mockCDNDynmaicClient.EXPECT().ObtainSeeds(gomock.Any(), gomock.Any()).Return(mockPieceSeedStream, nil).AnyTimes() - mockCDNDynmaicClient.EXPECT().GetHost(gomock.Any()).Return(mockHost, true).AnyTimes() - mockPeerManager.EXPECT().Get(gomock.Any()).Return(nil, false).AnyTimes() - mockPeerManager.EXPECT().Add(gomock.Any()).Return().AnyTimes() - mockHostManager.EXPECT().Get(gomock.Any()).Return(nil, false).AnyTimes() - mockHostManager.EXPECT().Add(gomock.Any()).Return().AnyTimes() - - patch := gomonkey.ApplyMethodSeq(reflect.TypeOf(mockLogger), "Debugf", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - - patch.ApplyMethodSeq(reflect.TypeOf(mockTask), "Log", - []gomonkey.OutputCell{{Values: gomonkey.Params{mockLogger}}}) - - const testwords string = "dragonfly-scheduler-test" - res := &http.Response{ - Body: io.NopCloser( - bytes.NewBuffer([]byte(testwords))), - } - httpRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{res, nil}}, - } - patch.ApplyFuncSeq(http.Get, httpRet) - - patch.ApplyMethodSeq(reflect.TypeOf(mockPeer), "Touch", - []gomonkey.OutputCell{{Values: gomonkey.Params{}}}) - - newPeerRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPeer}}, - } - patch.ApplyFuncSeq(supervisor.NewPeer, newPeerRet) - - streamRet := []gomonkey.OutputCell{ - {Values: gomonkey.Params{mockPieceSeed, nil}}, - } - patch.ApplyMethodSeq(reflect.TypeOf(mockPieceSeedStream), "Recv", streamRet) - - mockPieceSeed.Done = true - mockPieceSeed.ContentLength = int64(len(testwords)) - mockHost.IP = "0.0.0.0" - mockHost.DownloadPort = 1919 - mockTask.ID = "1919810" - mockPeer.Host = mockHost - return mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch - }, - expect: func(t *testing.T, cdn supervisor.CDN, peer *supervisor.Peer, err error) { - assert := assert.New(t) - assert.Equal(mockPeer, peer) - assert.Nil(err) - assert.Equal([]byte("dragonfly-scheduler-test"), mockTask.DirectPiece) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - mockCDNDynmaicClient, mockPeerManager, mockHostManager, patch := tc.mock(t) - cdn := supervisor.NewCDN(mockCDNDynmaicClient, mockPeerManager, mockHostManager) - mockTask.SetStatus(tc.status) - peer, err := cdn.StartSeedTask(context.Background(), mockTask) - tc.expect(t, cdn, peer, err) - patch.Reset() - }) - } -} diff --git a/scheduler/supervisor/host.go b/scheduler/supervisor/host.go deleted file mode 100644 index 51d63c9ef29..00000000000 --- a/scheduler/supervisor/host.go +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//go:generate mockgen -destination ./mocks/host_mock.go -package mocks d7y.io/dragonfly/v2/scheduler/supervisor HostManager - -package supervisor - -import ( - "sync" - - "go.uber.org/atomic" - - logger "d7y.io/dragonfly/v2/internal/dflog" -) - -const ( - // When using the manager configuration parameter, limit the maximum load number to 5000 - HostMaxLoad = 5 * 1000 -) - -type HostManager interface { - // Add host - Add(*Host) - // Get host - Get(string) (*Host, bool) - // Delete host - Delete(string) -} - -type hostManager struct { - // host map - *sync.Map -} - -func NewHostManager() HostManager { - return &hostManager{&sync.Map{}} -} - -func (m *hostManager) Get(key string) (*Host, bool) { - host, ok := m.Load(key) - if !ok { - return nil, false - } - - return host.(*Host), ok -} - -func (m *hostManager) Add(host *Host) { - m.Store(host.UUID, host) -} - -func (m *hostManager) Delete(key string) { - m.Map.Delete(key) -} - -type HostOption func(rt *Host) *Host - -func WithTotalUploadLoad(load uint32) HostOption { - return func(h *Host) *Host { - h.TotalUploadLoad = load - return h - } -} - -func WithNetTopology(n string) HostOption { - return func(h *Host) *Host { - h.NetTopology = n - return h - } -} - -type Host struct { - // uuid each time the daemon starts, it will generate a different uuid - UUID string - // IP peer host ip - IP string - // HostName peer host name - HostName string - // RPCPort rpc service port for peer - RPCPort int32 - // DownloadPort piece downloading port for peer - DownloadPort int32 - // IsCDN if host type is cdn - IsCDN bool - // SecurityDomain security isolation domain for network - SecurityDomain string - // Location location path: area|country|province|city|... - Location string - // IDC idc where the peer host is located - IDC string - // NetTopology network device path - // according to the user's own network topology definition, the coverage range from large to small, using the | symbol segmentation, - // Example: switch|router|... - NetTopology string - // TODO TotalUploadLoad currentUploadLoad decided by real time client report host info - TotalUploadLoad uint32 - // CurrentUploadLoad is current upload load number - CurrentUploadLoad atomic.Uint32 - // peers info map - peers *sync.Map - // host logger - logger *logger.SugaredLoggerOnWith -} - -func NewClientHost(uuid, ip, hostname string, rpcPort, downloadPort int32, securityDomain, location, idc string, options ...HostOption) *Host { - return newHost(uuid, ip, hostname, rpcPort, downloadPort, false, securityDomain, location, idc, options...) -} - -func NewCDNHost(uuid, ip, hostname string, rpcPort, downloadPort int32, securityDomain, location, idc string, options ...HostOption) *Host { - return newHost(uuid, ip, hostname, rpcPort, downloadPort, true, securityDomain, location, idc, options...) -} - -func newHost(uuid, ip, hostname string, rpcPort, downloadPort int32, isCDN bool, securityDomain, location, idc string, options ...HostOption) *Host { - host := &Host{ - UUID: uuid, - IP: ip, - HostName: hostname, - RPCPort: rpcPort, - DownloadPort: downloadPort, - IsCDN: isCDN, - SecurityDomain: securityDomain, - Location: location, - IDC: idc, - NetTopology: "", - TotalUploadLoad: 100, - peers: &sync.Map{}, - logger: logger.With("hostUUID", uuid), - } - - for _, opt := range options { - opt(host) - } - - return host -} - -func (h *Host) AddPeer(peer *Peer) { - h.peers.Store(peer.ID, peer) -} - -func (h *Host) DeletePeer(id string) { - h.peers.Delete(id) -} - -func (h *Host) GetPeer(id string) (*Peer, bool) { - peer, ok := h.peers.Load(id) - if !ok { - return nil, false - } - - return peer.(*Peer), ok -} - -func (h *Host) GetPeers() *sync.Map { - return h.peers -} - -func (h *Host) GetPeersLen() int { - length := 0 - h.peers.Range(func(_, _ interface{}) bool { - length++ - return true - }) - - return length -} - -func (h *Host) GetFreeUploadLoad() int32 { - return int32(h.TotalUploadLoad - h.CurrentUploadLoad.Load()) -} - -func (h *Host) Log() *logger.SugaredLoggerOnWith { - return h.logger -} diff --git a/scheduler/supervisor/host_test.go b/scheduler/supervisor/host_test.go deleted file mode 100644 index 8d56fdd922c..00000000000 --- a/scheduler/supervisor/host_test.go +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package supervisor_test - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - - "d7y.io/dragonfly/v2/scheduler/supervisor" -) - -func TestHost_New(t *testing.T) { - tests := []struct { - name string - host *supervisor.Host - expect func(t *testing.T, host *supervisor.Host) - }{ - { - name: "create by normal config", - host: supervisor.NewClientHost("main", "127.0.0.1", "Client", 8080, 8081, "", "", ""), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("main", host.UUID) - }, - }, - { - name: "create CDN by normal config", - host: supervisor.NewCDNHost("main", "127.0.0.1", "Client", 8080, 8081, "", "", ""), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("main", host.UUID) - }, - }, - { - name: "create by special symbols", - host: supervisor.NewClientHost("⁂⁎♜♝♞⁑(๑ `▽´๑)", "127.0.0.1", "Client", 8080, 8081, "", "", ""), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("⁂⁎♜♝♞⁑(๑ `▽´๑)", host.UUID) - }, - }, - { - name: "create by error address", - host: supervisor.NewClientHost("host", "0.0.0.0", "Client", 8080, 8080, "", "", ""), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("host", host.UUID) - }, - }, - { - name: "create with geography information", - host: supervisor.NewClientHost("host", "127.0.0.1", "Client", 8080, 8081, "goagle", "microsaft", "facebaok"), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("host", host.UUID) - assert.Equal("goagle", host.SecurityDomain) - assert.Equal("microsaft", host.Location) - assert.Equal("facebaok", host.IDC) - }, - }, - { - name: "create by error address", - host: supervisor.NewClientHost("host", "-1.257.w.-0", "Client", -100, 29000, "", "", ""), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("host", host.UUID) - }, - }, - { - name: "create by normal config", - host: supervisor.NewClientHost("host", "127.0.0.1", "Client", 8080, 8081, "", "", ""), - expect: func(t *testing.T, host *supervisor.Host) { - assert := assert.New(t) - assert.Equal("host", host.UUID) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - tc.expect(t, tc.host) - }) - } -} - -func TestHostManager_New(t *testing.T) { - tests := []struct { - name string - expect func(t *testing.T, hostManager supervisor.HostManager) - }{ - { - name: "simple create", - expect: func(t *testing.T, hostManager supervisor.HostManager) { - assert := assert.New(t) - assert.NotNil(hostManager) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - hostManager := supervisor.NewHostManager() - tc.expect(t, hostManager) - }) - } -} - -func TestHostManager_Get(t *testing.T) { - tests := []struct { - name string - number int - fetch int - expect func(t *testing.T, host *supervisor.Host, success bool) - }{ - { - name: "fetch first host", - number: 3, - fetch: 0, - expect: func(t *testing.T, host *supervisor.Host, success bool) { - assert := assert.New(t) - assert.Equal("0", host.UUID) - assert.True(success) - }, - }, - { - name: "fetch last host", - number: 3, - fetch: 2, - expect: func(t *testing.T, host *supervisor.Host, success bool) { - assert := assert.New(t) - assert.Equal("2", host.UUID) - assert.True(success) - }, - }, - { - name: "fetch not exist host", - number: 3, - fetch: -1, - expect: func(t *testing.T, host *supervisor.Host, success bool) { - assert := assert.New(t) - assert.Equal((*supervisor.Host)(nil), host) - assert.False(success) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - hostManager := supervisor.NewHostManager() - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - host := mockAHost(index) - hostManager.Add(host) - } - host, success := hostManager.Get(strconv.Itoa(tc.fetch)) - tc.expect(t, host, success) - }) - } -} - -func TestHostManager_Delete(t *testing.T) { - tests := []struct { - name string - number int - delete int - expect func(t *testing.T, host *supervisor.Host, success bool) - }{ - { - name: "delete exist host", - number: 1, - delete: 0, - expect: func(t *testing.T, host *supervisor.Host, success bool) { - assert := assert.New(t) - assert.Nil(host) - assert.False(success) - }, - }, - { - name: "delete not exist host", - number: 1, - delete: 100, - expect: func(t *testing.T, host *supervisor.Host, success bool) { - assert := assert.New(t) - assert.Nil(host) - assert.False(success) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - hostManager := supervisor.NewHostManager() - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - host := mockAHost(index) - hostManager.Add(host) - } - hostManager.Delete(strconv.Itoa(tc.delete)) - host, success := hostManager.Get(strconv.Itoa(tc.delete)) - - tc.expect(t, host, success) - }) - } -} - -func mockAHost(UUID string) *supervisor.Host { - host := supervisor.NewClientHost(UUID, "127.0.0.1", "Client", 8080, 8081, "", "", "") - return host -} diff --git a/scheduler/supervisor/mocks/cdn_mock.go b/scheduler/supervisor/mocks/cdn_mock.go deleted file mode 100644 index 510447605e9..00000000000 --- a/scheduler/supervisor/mocks/cdn_mock.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: d7y.io/dragonfly/v2/scheduler/supervisor (interfaces: CDNDynmaicClient) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - dfnet "d7y.io/dragonfly/v2/internal/dfnet" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" - config "d7y.io/dragonfly/v2/scheduler/config" - supervisor "d7y.io/dragonfly/v2/scheduler/supervisor" - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" -) - -// MockCDNDynmaicClient is a mock of CDNDynmaicClient interface. -type MockCDNDynmaicClient struct { - ctrl *gomock.Controller - recorder *MockCDNDynmaicClientMockRecorder -} - -// MockCDNDynmaicClientMockRecorder is the mock recorder for MockCDNDynmaicClient. -type MockCDNDynmaicClientMockRecorder struct { - mock *MockCDNDynmaicClient -} - -// NewMockCDNDynmaicClient creates a new mock instance. -func NewMockCDNDynmaicClient(ctrl *gomock.Controller) *MockCDNDynmaicClient { - mock := &MockCDNDynmaicClient{ctrl: ctrl} - mock.recorder = &MockCDNDynmaicClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCDNDynmaicClient) EXPECT() *MockCDNDynmaicClientMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockCDNDynmaicClient) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockCDNDynmaicClientMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockCDNDynmaicClient)(nil).Close)) -} - -// GetHost mocks base method. -func (m *MockCDNDynmaicClient) GetHost(arg0 string) (*supervisor.Host, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHost", arg0) - ret0, _ := ret[0].(*supervisor.Host) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetHost indicates an expected call of GetHost. -func (mr *MockCDNDynmaicClientMockRecorder) GetHost(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHost", reflect.TypeOf((*MockCDNDynmaicClient)(nil).GetHost), arg0) -} - -// GetPieceTasks mocks base method. -func (m *MockCDNDynmaicClient) GetPieceTasks(arg0 context.Context, arg1 dfnet.NetAddr, arg2 *base.PieceTaskRequest, arg3 ...grpc.CallOption) (*base.PiecePacket, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPieceTasks indicates an expected call of GetPieceTasks. -func (mr *MockCDNDynmaicClientMockRecorder) GetPieceTasks(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockCDNDynmaicClient)(nil).GetPieceTasks), varargs...) -} - -// ObtainSeeds mocks base method. -func (m *MockCDNDynmaicClient) ObtainSeeds(arg0 context.Context, arg1 *cdnsystem.SeedRequest, arg2 ...grpc.CallOption) (*client.PieceSeedStream, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ObtainSeeds", varargs...) - ret0, _ := ret[0].(*client.PieceSeedStream) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ObtainSeeds indicates an expected call of ObtainSeeds. -func (mr *MockCDNDynmaicClientMockRecorder) ObtainSeeds(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockCDNDynmaicClient)(nil).ObtainSeeds), varargs...) -} - -// OnNotify mocks base method. -func (m *MockCDNDynmaicClient) OnNotify(arg0 *config.DynconfigData) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "OnNotify", arg0) -} - -// OnNotify indicates an expected call of OnNotify. -func (mr *MockCDNDynmaicClientMockRecorder) OnNotify(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnNotify", reflect.TypeOf((*MockCDNDynmaicClient)(nil).OnNotify), arg0) -} - -// UpdateState mocks base method. -func (m *MockCDNDynmaicClient) UpdateState(arg0 []dfnet.NetAddr) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "UpdateState", arg0) -} - -// UpdateState indicates an expected call of UpdateState. -func (mr *MockCDNDynmaicClientMockRecorder) UpdateState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateState", reflect.TypeOf((*MockCDNDynmaicClient)(nil).UpdateState), arg0) -} diff --git a/scheduler/supervisor/mocks/host_mock.go b/scheduler/supervisor/mocks/host_mock.go deleted file mode 100644 index 833ac9dbf5e..00000000000 --- a/scheduler/supervisor/mocks/host_mock.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: d7y.io/dragonfly/v2/scheduler/supervisor (interfaces: HostManager) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - supervisor "d7y.io/dragonfly/v2/scheduler/supervisor" - gomock "github.com/golang/mock/gomock" -) - -// MockHostManager is a mock of HostManager interface. -type MockHostManager struct { - ctrl *gomock.Controller - recorder *MockHostManagerMockRecorder -} - -// MockHostManagerMockRecorder is the mock recorder for MockHostManager. -type MockHostManagerMockRecorder struct { - mock *MockHostManager -} - -// NewMockHostManager creates a new mock instance. -func NewMockHostManager(ctrl *gomock.Controller) *MockHostManager { - mock := &MockHostManager{ctrl: ctrl} - mock.recorder = &MockHostManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockHostManager) EXPECT() *MockHostManagerMockRecorder { - return m.recorder -} - -// Add mocks base method. -func (m *MockHostManager) Add(arg0 *supervisor.Host) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Add", arg0) -} - -// Add indicates an expected call of Add. -func (mr *MockHostManagerMockRecorder) Add(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockHostManager)(nil).Add), arg0) -} - -// Delete mocks base method. -func (m *MockHostManager) Delete(arg0 string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Delete", arg0) -} - -// Delete indicates an expected call of Delete. -func (mr *MockHostManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockHostManager)(nil).Delete), arg0) -} - -// Get mocks base method. -func (m *MockHostManager) Get(arg0 string) (*supervisor.Host, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].(*supervisor.Host) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockHostManagerMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockHostManager)(nil).Get), arg0) -} diff --git a/scheduler/supervisor/mocks/peer_mock.go b/scheduler/supervisor/mocks/peer_mock.go deleted file mode 100644 index 33f075eec4e..00000000000 --- a/scheduler/supervisor/mocks/peer_mock.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: d7y.io/dragonfly/v2/scheduler/supervisor (interfaces: PeerManager) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - sync "sync" - - supervisor "d7y.io/dragonfly/v2/scheduler/supervisor" - gomock "github.com/golang/mock/gomock" -) - -// MockPeerManager is a mock of PeerManager interface. -type MockPeerManager struct { - ctrl *gomock.Controller - recorder *MockPeerManagerMockRecorder -} - -// MockPeerManagerMockRecorder is the mock recorder for MockPeerManager. -type MockPeerManagerMockRecorder struct { - mock *MockPeerManager -} - -// NewMockPeerManager creates a new mock instance. -func NewMockPeerManager(ctrl *gomock.Controller) *MockPeerManager { - mock := &MockPeerManager{ctrl: ctrl} - mock.recorder = &MockPeerManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPeerManager) EXPECT() *MockPeerManagerMockRecorder { - return m.recorder -} - -// Add mocks base method. -func (m *MockPeerManager) Add(arg0 *supervisor.Peer) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Add", arg0) -} - -// Add indicates an expected call of Add. -func (mr *MockPeerManagerMockRecorder) Add(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockPeerManager)(nil).Add), arg0) -} - -// Delete mocks base method. -func (m *MockPeerManager) Delete(arg0 string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Delete", arg0) -} - -// Delete indicates an expected call of Delete. -func (mr *MockPeerManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockPeerManager)(nil).Delete), arg0) -} - -// Get mocks base method. -func (m *MockPeerManager) Get(arg0 string) (*supervisor.Peer, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].(*supervisor.Peer) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockPeerManagerMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPeerManager)(nil).Get), arg0) -} - -// GetPeers mocks base method. -func (m *MockPeerManager) GetPeers() *sync.Map { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPeers") - ret0, _ := ret[0].(*sync.Map) - return ret0 -} - -// GetPeers indicates an expected call of GetPeers. -func (mr *MockPeerManagerMockRecorder) GetPeers() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeers", reflect.TypeOf((*MockPeerManager)(nil).GetPeers)) -} - -// GetPeersByTask mocks base method. -func (m *MockPeerManager) GetPeersByTask(arg0 string) []*supervisor.Peer { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPeersByTask", arg0) - ret0, _ := ret[0].([]*supervisor.Peer) - return ret0 -} - -// GetPeersByTask indicates an expected call of GetPeersByTask. -func (mr *MockPeerManagerMockRecorder) GetPeersByTask(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeersByTask", reflect.TypeOf((*MockPeerManager)(nil).GetPeersByTask), arg0) -} diff --git a/scheduler/supervisor/mocks/task_mock.go b/scheduler/supervisor/mocks/task_mock.go deleted file mode 100644 index 91dca661c83..00000000000 --- a/scheduler/supervisor/mocks/task_mock.go +++ /dev/null @@ -1,89 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: d7y.io/dragonfly/v2/scheduler/supervisor (interfaces: TaskManager) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - supervisor "d7y.io/dragonfly/v2/scheduler/supervisor" - gomock "github.com/golang/mock/gomock" -) - -// MockTaskManager is a mock of TaskManager interface. -type MockTaskManager struct { - ctrl *gomock.Controller - recorder *MockTaskManagerMockRecorder -} - -// MockTaskManagerMockRecorder is the mock recorder for MockTaskManager. -type MockTaskManagerMockRecorder struct { - mock *MockTaskManager -} - -// NewMockTaskManager creates a new mock instance. -func NewMockTaskManager(ctrl *gomock.Controller) *MockTaskManager { - mock := &MockTaskManager{ctrl: ctrl} - mock.recorder = &MockTaskManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTaskManager) EXPECT() *MockTaskManagerMockRecorder { - return m.recorder -} - -// Add mocks base method. -func (m *MockTaskManager) Add(arg0 *supervisor.Task) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Add", arg0) -} - -// Add indicates an expected call of Add. -func (mr *MockTaskManagerMockRecorder) Add(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockTaskManager)(nil).Add), arg0) -} - -// Delete mocks base method. -func (m *MockTaskManager) Delete(arg0 string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Delete", arg0) -} - -// Delete indicates an expected call of Delete. -func (mr *MockTaskManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockTaskManager)(nil).Delete), arg0) -} - -// Get mocks base method. -func (m *MockTaskManager) Get(arg0 string) (*supervisor.Task, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].(*supervisor.Task) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockTaskManagerMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockTaskManager)(nil).Get), arg0) -} - -// GetOrAdd mocks base method. -func (m *MockTaskManager) GetOrAdd(arg0 *supervisor.Task) (*supervisor.Task, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOrAdd", arg0) - ret0, _ := ret[0].(*supervisor.Task) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetOrAdd indicates an expected call of GetOrAdd. -func (mr *MockTaskManagerMockRecorder) GetOrAdd(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOrAdd", reflect.TypeOf((*MockTaskManager)(nil).GetOrAdd), arg0) -} diff --git a/scheduler/supervisor/peer.go b/scheduler/supervisor/peer.go deleted file mode 100644 index e873289e336..00000000000 --- a/scheduler/supervisor/peer.go +++ /dev/null @@ -1,631 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -//go:generate mockgen -destination ./mocks/peer_mock.go -package mocks d7y.io/dragonfly/v2/scheduler/supervisor PeerManager - -package supervisor - -import ( - "io" - "sync" - "time" - - "github.com/pkg/errors" - "go.uber.org/atomic" - - logger "d7y.io/dragonfly/v2/internal/dflog" - gc "d7y.io/dragonfly/v2/pkg/gc" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - "d7y.io/dragonfly/v2/scheduler/config" -) - -const ( - PeerGCID = "peer" -) - -var ErrChannelBusy = errors.New("channel busy") - -type PeerManager interface { - // Add peer - Add(*Peer) - // Get peer - Get(string) (*Peer, bool) - // Delete peer - Delete(string) - // Get peer by task id - GetPeersByTask(string) []*Peer - // Get peers - GetPeers() *sync.Map -} - -type peerManager struct { - // hostManager is host manager - hostManager HostManager - // peerTTL is peer TTL - peerTTL time.Duration - // peerTTI is peer TTI - peerTTI time.Duration - // peers is peer map - peers *sync.Map - // peerManager lock - lock sync.RWMutex -} - -func NewPeerManager(cfg *config.GCConfig, gcManager gc.GC, hostManager HostManager) (PeerManager, error) { - m := &peerManager{ - hostManager: hostManager, - peerTTL: cfg.PeerTTL, - peerTTI: cfg.PeerTTI, - peers: &sync.Map{}, - } - - // Add GC task - if err := gcManager.Add(gc.Task{ - ID: PeerGCID, - Interval: cfg.PeerGCInterval, - Timeout: cfg.PeerGCInterval, - Runner: m, - }); err != nil { - return nil, err - } - - return m, nil -} - -func (m *peerManager) Add(peer *Peer) { - m.lock.Lock() - defer m.lock.Unlock() - - peer.Host.AddPeer(peer) - peer.Task.AddPeer(peer) - m.peers.Store(peer.ID, peer) -} - -func (m *peerManager) Get(id string) (*Peer, bool) { - peer, ok := m.peers.Load(id) - if !ok { - return nil, false - } - - return peer.(*Peer), ok -} - -func (m *peerManager) Delete(id string) { - m.lock.Lock() - defer m.lock.Unlock() - - if peer, ok := m.Get(id); ok { - peer.Host.DeletePeer(id) - peer.Task.DeletePeer(peer) - peer.ReplaceParent(nil) - m.peers.Delete(id) - } -} - -func (m *peerManager) GetPeersByTask(taskID string) []*Peer { - var peers []*Peer - m.peers.Range(func(_, value interface{}) bool { - peer := value.(*Peer) - if peer.Task.ID == taskID { - peers = append(peers, peer) - } - return true - }) - return peers -} - -func (m *peerManager) GetPeers() *sync.Map { - return m.peers -} - -func (m *peerManager) RunGC() error { - m.peers.Range(func(key, value interface{}) bool { - id := key.(string) - peer := value.(*Peer) - elapsed := time.Since(peer.lastAccessAt.Load()) - - if elapsed > m.peerTTI && !peer.IsDone() && !peer.Host.IsCDN { - if !peer.IsConnected() { - peer.Log().Infof("peer is not connected") - peer.Leave() - } - if peer.GetStatus() != PeerStatusZombie { - peer.Log().Infof("peer has been more than %s since last access, it's status changes from %s to zombie", m.peerTTI, peer.GetStatus().String()) - peer.SetStatus(PeerStatusZombie) - } - } - - if peer.IsLeave() || peer.IsFail() || elapsed > m.peerTTL { - if elapsed > m.peerTTL { - peer.Log().Infof("delete peer because %s have passed since last access", m.peerTTL) - } - m.Delete(id) - if peer.Host.GetPeersLen() == 0 { - m.hostManager.Delete(peer.Host.UUID) - } - if peer.Task.GetPeers().Len() == 0 { - peer.Task.Log().Info("peers is empty, task status become waiting") - peer.Task.SetStatus(TaskStatusWaiting) - } - } - - return true - }) - - return nil -} - -type PeerStatus uint8 - -func (status PeerStatus) String() string { - switch status { - case PeerStatusWaiting: - return "Waiting" - case PeerStatusRunning: - return "Running" - case PeerStatusZombie: - return "Zombie" - case PeerStatusFail: - return "Fail" - case PeerStatusSuccess: - return "Success" - default: - return "unknown" - } -} - -const ( - PeerStatusWaiting PeerStatus = iota - PeerStatusRunning - // TODO add Seeding status - PeerStatusZombie - PeerStatusFail - PeerStatusSuccess -) - -type Peer struct { - // ID is ID of peer - ID string - // Task is peer task - Task *Task - // Host is peer host - Host *Host - // TotalPieceCount is downloaded finished piece count - TotalPieceCount atomic.Int32 - // CreateAt is peer create time - CreateAt *atomic.Time - // lastAccessAt is peer last access time - lastAccessAt *atomic.Time - // parent is peer parent and type is *Peer - parent atomic.Value - // children is peer children map - children *sync.Map - // status is peer status and type is PeerStatus - status atomic.Value - // pieceCosts is piece historical download time - pieceCosts []int - // conn is channel instance and type is *Channel - conn atomic.Value - // leave is whether the peer leaves - leave atomic.Bool - // peer logger - logger *logger.SugaredLoggerOnWith - // peer lock - lock sync.RWMutex -} - -func NewPeer(id string, task *Task, host *Host) *Peer { - peer := &Peer{ - ID: id, - Task: task, - Host: host, - CreateAt: atomic.NewTime(time.Now()), - lastAccessAt: atomic.NewTime(time.Now()), - children: &sync.Map{}, - logger: logger.WithTaskAndPeerID(task.ID, id), - } - - peer.status.Store(PeerStatusWaiting) - return peer -} - -func (peer *Peer) GetTreeNodeCount() int { - count := 1 - peer.children.Range(func(key, value interface{}) bool { - node := value.(*Peer) - count += node.GetTreeNodeCount() - return true - }) - - return count -} - -func (peer *Peer) GetTreeDepth() int { - var deep int - node := peer - for node != nil { - deep++ - parent, ok := node.GetParent() - if !ok || node.Host.IsCDN { - break - } - node = parent - } - return deep -} - -func (peer *Peer) GetRoot() *Peer { - node := peer - for node != nil { - parent, ok := node.GetParent() - if !ok || node.Host.IsCDN { - break - } - node = parent - } - - return node -} - -// IsDescendant if peer is offspring of ancestor -func (peer *Peer) IsDescendant(ancestor *Peer) bool { - return isDescendant(ancestor, peer) -} - -func isDescendant(ancestor, offspring *Peer) bool { - if ancestor == nil || offspring == nil { - return false - } - // TODO avoid circulation - node := offspring - for node != nil { - parent, ok := node.GetParent() - if !ok { - return false - } else if parent.ID == ancestor.ID { - return true - } - node = parent - } - return false -} - -// IsAncestorOf if offspring is offspring of peer -func (peer *Peer) IsAncestor(offspring *Peer) bool { - return isDescendant(peer, offspring) -} - -func (peer *Peer) insertChild(child *Peer) { - peer.children.Store(child.ID, child) - peer.Host.CurrentUploadLoad.Inc() - peer.Task.UpdatePeer(peer) -} - -func (peer *Peer) deleteChild(child *Peer) { - peer.children.Delete(child.ID) - peer.Host.CurrentUploadLoad.Dec() - peer.Task.UpdatePeer(peer) -} - -func (peer *Peer) ReplaceParent(parent *Peer) { - oldParent, ok := peer.GetParent() - if ok { - oldParent.deleteChild(peer) - } - - peer.SetParent(parent) - if parent != nil { - parent.insertChild(peer) - } -} - -func (peer *Peer) GetChildren() *sync.Map { - return peer.children -} - -func (peer *Peer) SetParent(parent *Peer) { - peer.parent.Store(parent) -} - -func (peer *Peer) GetParent() (*Peer, bool) { - parent := peer.parent.Load() - if parent == nil { - return nil, false - } - - p, ok := parent.(*Peer) - if p == nil || !ok { - return nil, false - } - - return p, true -} - -func (peer *Peer) Touch() { - peer.lock.Lock() - defer peer.lock.Unlock() - - peer.lastAccessAt.Store(time.Now()) - if peer.GetStatus() == PeerStatusZombie && !peer.leave.Load() { - peer.SetStatus(PeerStatusRunning) - } - peer.Task.Touch() -} - -func (peer *Peer) GetPieceCosts() []int { - peer.lock.RLock() - defer peer.lock.RUnlock() - - return peer.pieceCosts -} - -func (peer *Peer) SetPieceCosts(costs ...int) { - peer.lock.Lock() - defer peer.lock.Unlock() - - peer.pieceCosts = append(peer.pieceCosts, costs...) -} - -func (peer *Peer) UpdateProgress(finishedCount int32, cost int) { - if finishedCount > peer.TotalPieceCount.Load() { - peer.TotalPieceCount.Store(finishedCount) - peer.SetPieceCosts(cost) - peer.Task.UpdatePeer(peer) - return - } -} - -func (peer *Peer) SortedValue() int { - peer.lock.RLock() - defer peer.lock.RUnlock() - - pieceCount := peer.TotalPieceCount.Load() - freeLoad := peer.getFreeLoad() - if peer.Host.IsCDN { - // if peer's host is CDN, peer has the lowest priority among all peers with the same number of pieces - return int(pieceCount * HostMaxLoad) - } - return int(pieceCount*HostMaxLoad + freeLoad) -} - -func (peer *Peer) getFreeLoad() int32 { - if peer.Host == nil { - return 0 - } - return peer.Host.GetFreeUploadLoad() -} - -func (peer *Peer) SetStatus(status PeerStatus) { - peer.status.Store(status) -} - -func (peer *Peer) GetStatus() PeerStatus { - return peer.status.Load().(PeerStatus) -} - -func (peer *Peer) Leave() { - peer.leave.Store(true) -} - -func (peer *Peer) IsLeave() bool { - return peer.leave.Load() -} - -func (peer *Peer) IsRunning() bool { - return peer.GetStatus() == PeerStatusRunning -} - -func (peer *Peer) IsWaiting() bool { - return peer.GetStatus() == PeerStatusWaiting -} - -func (peer *Peer) IsSuccess() bool { - return peer.GetStatus() == PeerStatusSuccess -} - -func (peer *Peer) IsDone() bool { - return peer.GetStatus() == PeerStatusSuccess || peer.GetStatus() == PeerStatusFail -} - -func (peer *Peer) IsBad() bool { - return peer.GetStatus() == PeerStatusFail || peer.GetStatus() == PeerStatusZombie -} - -func (peer *Peer) IsFail() bool { - return peer.GetStatus() == PeerStatusFail -} - -func (peer *Peer) BindNewConn(stream scheduler.Scheduler_ReportPieceResultServer) (*Channel, bool) { - peer.lock.Lock() - defer peer.lock.Unlock() - - if peer.GetStatus() == PeerStatusWaiting { - peer.SetStatus(PeerStatusRunning) - } - peer.setConn(newChannel(stream)) - return peer.getConn() -} - -func (peer *Peer) setConn(conn *Channel) { - peer.conn.Store(conn) -} - -func (peer *Peer) getConn() (*Channel, bool) { - conn := peer.conn.Load() - if conn == nil { - return nil, false - } - - c, ok := conn.(*Channel) - if c == nil || !ok { - return nil, false - } - - return c, true -} - -func (peer *Peer) IsConnected() bool { - conn, ok := peer.getConn() - if !ok { - return false - } - - return !conn.IsClosed() -} - -func (peer *Peer) SendSchedulePacket(packet *scheduler.PeerPacket) error { - conn, ok := peer.getConn() - if !ok { - return errors.New("client peer is not connected") - } - - return conn.Send(packet) -} - -func (peer *Peer) CloseChannelWithError(err error) error { - conn, ok := peer.getConn() - if !ok { - return errors.New("client peer is not connected") - } - - conn.err = err - conn.Close() - return nil -} - -func (peer *Peer) Log() *logger.SugaredLoggerOnWith { - return peer.logger -} - -type Channel struct { - sender chan *scheduler.PeerPacket - receiver chan *scheduler.PieceResult - stream scheduler.Scheduler_ReportPieceResultServer - closed *atomic.Bool - done chan struct{} - wg sync.WaitGroup - err error -} - -func newChannel(stream scheduler.Scheduler_ReportPieceResultServer) *Channel { - c := &Channel{ - sender: make(chan *scheduler.PeerPacket), - receiver: make(chan *scheduler.PieceResult), - stream: stream, - closed: atomic.NewBool(false), - done: make(chan struct{}), - } - - c.wg.Add(2) - c.start() - return c -} - -func (c *Channel) start() { - startWG := &sync.WaitGroup{} - startWG.Add(2) - - go c.receiveLoop(startWG) - go c.sendLoop(startWG) - startWG.Wait() -} - -func (c *Channel) Send(packet *scheduler.PeerPacket) error { - select { - case <-c.done: - return errors.New("conn has closed") - case c.sender <- packet: - return nil - default: - return ErrChannelBusy - } -} - -func (c *Channel) Receiver() <-chan *scheduler.PieceResult { - return c.receiver -} - -func (c *Channel) Close() { - if !c.closed.CAS(false, true) { - return - } - go func() { - close(c.done) - c.wg.Wait() - }() -} - -func (c *Channel) Error() error { - return c.err -} - -func (c *Channel) Done() <-chan struct{} { - if c.done == nil { - c.done = make(chan struct{}) - } - return c.done -} - -func (c *Channel) IsClosed() bool { - return c.closed.Load() -} - -func (c *Channel) receiveLoop(startWG *sync.WaitGroup) { - defer func() { - close(c.receiver) - c.wg.Done() - c.Close() - }() - - startWG.Done() - - for { - select { - case <-c.done: - return - default: - pieceResult, err := c.stream.Recv() - if err == io.EOF { - return - } - if err != nil { - c.err = err - return - } - c.receiver <- pieceResult - } - } -} - -func (c *Channel) sendLoop(startWG *sync.WaitGroup) { - defer func() { - c.wg.Done() - c.Close() - }() - - startWG.Done() - - for { - select { - case <-c.done: - return - case packet := <-c.sender: - if err := c.stream.Send(packet); err != nil { - c.err = err - return - } - } - } -} diff --git a/scheduler/supervisor/peer_test.go b/scheduler/supervisor/peer_test.go deleted file mode 100644 index 1dda59c78f1..00000000000 --- a/scheduler/supervisor/peer_test.go +++ /dev/null @@ -1,613 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package supervisor_test - -import ( - "strconv" - "testing" - - "github.com/golang/mock/gomock" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/supervisor" - "d7y.io/dragonfly/v2/scheduler/supervisor/mocks" -) - -const ( - HostMaxLoad = 5 * 1000 -) - -func TestPeer_New(t *testing.T) { - tests := []struct { - name string - id string - expect func(t *testing.T, peer *supervisor.Peer) - }{ - { - name: "create by normal config", - id: "normal", - expect: func(t *testing.T, peer *supervisor.Peer) { - assert := assert.New(t) - assert.Equal("normal", peer.ID) - }, - }, - { - name: "create by special symbols", - id: "#@+:\b\t\\\"☹ ☺ ☻ (✿◠‿◠)", - expect: func(t *testing.T, peer *supervisor.Peer) { - assert := assert.New(t) - assert.Equal("#@+:\b\t\\\"☹ ☺ ☻ (✿◠‿◠)", peer.ID) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := mockATask("task") - host := mockAHost("host") - peer := supervisor.NewPeer(tc.id, task, host) - tc.expect(t, peer) - }) - } -} - -func TestPeer_Tree(t *testing.T) { - tests := []struct { - name string - number int - tree map[int]int - answer []int - expect func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) - }{ - { - name: "test ID of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{0, 1, 2, 3, 4, 5}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - for i := 0; i < number; i++ { - assert.Equal(strconv.Itoa(answer[i]), peers[i].ID) - } - }, - }, - { - name: "test TreeNodeCount of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{6, 3, 2, 1, 1, 1}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - for i := 0; i < number; i++ { - assert.Equal(answer[i], peers[i].GetTreeNodeCount()) - } - }, - }, - { - name: "test TreeDepth of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{1, 2, 2, 3, 3, 3}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - for i := 0; i < number; i++ { - assert.Equal(answer[i], peers[i].GetTreeDepth()) - } - }, - }, - { - name: "test Root of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{0, 0, 0, 0, 0, 0}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - for i := 0; i < number; i++ { - assert.Equal(strconv.Itoa(answer[i]), peers[i].GetRoot().ID) - } - }, - }, - { - name: "test Parent of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{-1, 0, 0, 1, 1, 2}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - for i := 0; i < number; i++ { - parent, success := peers[i].GetParent() - if answer[i] < 0 { - assert.Equal((*supervisor.Peer)(nil), parent) - assert.False(success) - } else { - assert.Equal(strconv.Itoa(answer[i]), parent.ID) - assert.True(success) - } - } - }, - }, - { - name: "test Ancestor of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - assert.False(peers[0].IsAncestor(peers[0])) - assert.False(peers[0].IsAncestor(nil)) - - assert.True(peers[0].IsAncestor(peers[5])) - assert.False(peers[5].IsAncestor(peers[0])) - - assert.True(peers[1].IsAncestor(peers[4])) - assert.False(peers[4].IsAncestor(peers[1])) - }, - }, - { - name: "test Descendant of tree structure", - number: 6, - tree: map[int]int{1: 0, 2: 0, 3: 1, 4: 1, 5: 2}, - answer: []int{}, - expect: func(t *testing.T, peers []*supervisor.Peer, number int, answer []int) { - assert := assert.New(t) - assert.False(peers[0].IsDescendant(peers[0])) - assert.False(peers[0].IsDescendant(nil)) - - assert.False(peers[0].IsDescendant(peers[5])) - assert.True(peers[5].IsDescendant(peers[0])) - - assert.False(peers[1].IsDescendant(peers[4])) - assert.True(peers[4].IsDescendant(peers[1])) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var peers []*supervisor.Peer - task := mockATask("task") - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - peer := mockAPeer(index, task) - if i > 0 { - peer.ReplaceParent(peers[tc.tree[i]]) - } - peers = append(peers, peer) - } - tc.expect(t, peers, tc.number, tc.answer) - }) - } -} - -func TestPeer_Status(t *testing.T) { - tests := []struct { - name string - status supervisor.PeerStatus - statusName string - judgeArray []bool - expect func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) - }{ - { - name: "status Waiting", - status: supervisor.PeerStatusWaiting, - statusName: "Waiting", - judgeArray: []bool{false, true, false, false, false, false}, - expect: func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.PeerStatus.String(status), statusName) - assert.Equal(peer.GetStatus(), status) - - statutusJudgeArray := []bool{ - peer.IsRunning(), peer.IsWaiting(), peer.IsSuccess(), - peer.IsDone(), peer.IsBad(), peer.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status Running", - status: supervisor.PeerStatusRunning, - statusName: "Running", - judgeArray: []bool{true, false, false, false, false, false}, - expect: func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.PeerStatus.String(status), statusName) - assert.Equal(peer.GetStatus(), status) - - statutusJudgeArray := []bool{ - peer.IsRunning(), peer.IsWaiting(), peer.IsSuccess(), - peer.IsDone(), peer.IsBad(), peer.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status Zombie", - status: supervisor.PeerStatusZombie, - statusName: "Zombie", - judgeArray: []bool{false, false, false, false, true, false}, - expect: func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.PeerStatus.String(status), statusName) - assert.Equal(peer.GetStatus(), status) - - statutusJudgeArray := []bool{ - peer.IsRunning(), peer.IsWaiting(), peer.IsSuccess(), - peer.IsDone(), peer.IsBad(), peer.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status Fail", - status: supervisor.PeerStatusFail, - statusName: "Fail", - judgeArray: []bool{false, false, false, true, true, true}, - expect: func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.PeerStatus.String(status), statusName) - assert.Equal(peer.GetStatus(), status) - - statutusJudgeArray := []bool{ - peer.IsRunning(), peer.IsWaiting(), peer.IsSuccess(), - peer.IsDone(), peer.IsBad(), peer.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status Success", - status: supervisor.PeerStatusSuccess, - statusName: "Success", - judgeArray: []bool{false, false, true, true, false, false}, - expect: func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.PeerStatus.String(status), statusName) - assert.Equal(peer.GetStatus(), status) - - statutusJudgeArray := []bool{ - peer.IsRunning(), peer.IsWaiting(), peer.IsSuccess(), - peer.IsDone(), peer.IsBad(), peer.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "unknown", - status: 100, - statusName: "unknown", - judgeArray: []bool{false, false, false, false, false, false}, - expect: func(t *testing.T, peer *supervisor.Peer, status supervisor.PeerStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.PeerStatus.String(status), statusName) - assert.Equal(peer.GetStatus(), status) - - statutusJudgeArray := []bool{ - peer.IsRunning(), peer.IsWaiting(), peer.IsSuccess(), - peer.IsDone(), peer.IsBad(), peer.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := mockATask("task") - peer := mockAPeer("peer", task) - peer.SetStatus(tc.status) - - tc.expect(t, peer, tc.status, tc.statusName, tc.judgeArray) - }) - } -} - -func TestPeerManager_New(t *testing.T) { - tests := []struct { - name string - config *config.GCConfig - mock func(m *mocks.MockGCMockRecorder) - expect func(t *testing.T, peerManager supervisor.PeerManager, err error) - }{ - { - name: "create with default config", - config: config.New().Scheduler.GC, - mock: func(m *mocks.MockGCMockRecorder) { - m.Add(gomock.Any()).Return(nil).AnyTimes() - }, - expect: func(t *testing.T, peerManager supervisor.PeerManager, err error) { - assert := assert.New(t) - assert.NotNil(peerManager) - assert.Nil(err) - }, - }, - { - name: "create with strange int", - config: &config.GCConfig{ - PeerGCInterval: 1, - TaskGCInterval: 1 >> 69, - PeerTTL: 1 << 62, - PeerTTI: 1, - TaskTTL: 1, - TaskTTI: 1, - }, - mock: func(m *mocks.MockGCMockRecorder) { - m.Add(gomock.Any()).Return(nil).AnyTimes() - }, - expect: func(t *testing.T, peerManager supervisor.PeerManager, err error) { - assert := assert.New(t) - assert.NotNil(peerManager) - assert.Nil(err) - }, - }, - { - name: "gc failed", - config: config.New().Scheduler.GC, - mock: func(m *mocks.MockGCMockRecorder) { - m.Add(gomock.Any()).Return(errors.New("mockError")).AnyTimes() - }, - expect: func(t *testing.T, peerManager supervisor.PeerManager, err error) { - assert := assert.New(t) - assert.Nil(peerManager) - assert.Error(err) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockHostManager(ctl) - mockGC := mocks.NewMockGC(ctl) - tc.mock(mockGC.EXPECT()) - - peerManager, err := supervisor.NewPeerManager(tc.config, mockGC, mockHostManager) - tc.expect(t, peerManager, err) - }) - } -} - -func TestPeerManager_GetPeer(t *testing.T) { - tests := []struct { - name string - number int - fetch int - expect func(t *testing.T, peer *supervisor.Peer, success bool, err error) - }{ - { - name: "fetch first peer", - number: 3, - fetch: 0, - expect: func(t *testing.T, peer *supervisor.Peer, success bool, err error) { - assert := assert.New(t) - assert.Equal("0", peer.ID) - assert.True(success) - assert.Nil(err) - }, - }, - { - name: "fetch last peer", - number: 3, - fetch: 2, - expect: func(t *testing.T, peer *supervisor.Peer, success bool, err error) { - assert := assert.New(t) - assert.Equal("2", peer.ID) - assert.True(success) - assert.Nil(err) - }, - }, - { - name: "fetch not exist peer", - number: 3, - fetch: -1, - expect: func(t *testing.T, peer *supervisor.Peer, success bool, err error) { - assert := assert.New(t) - assert.Equal((*supervisor.Peer)(nil), peer) - assert.False(success) - assert.Nil(err) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockHostManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - peerManager, err := supervisor.NewPeerManager(cfg.Scheduler.GC, mockGC, mockHostManager) - task := mockATask("123") - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - peer := mockAPeer(index, task) - peerManager.Add(peer) - } - peer, success := peerManager.Get(strconv.Itoa(tc.fetch)) - tc.expect(t, peer, success, err) - }) - } -} - -func TestPeerManager_Add(t *testing.T) { - tests := []struct { - name string - ID []int - expect func(t *testing.T, peer []*supervisor.Peer) - }{ - { - name: "add seperative peers", - ID: []int{1, 2, 3}, - expect: func(t *testing.T, peers []*supervisor.Peer) { - assert := assert.New(t) - assert.Len(peers, 3) - }, - }, - { - name: "add duplicate peers", - ID: []int{1, 1, 1}, - expect: func(t *testing.T, peers []*supervisor.Peer) { - assert := assert.New(t) - assert.Len(peers, 1) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockHostManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - peerManager, _ := supervisor.NewPeerManager(cfg.Scheduler.GC, mockGC, mockHostManager) - task := mockATask("123") - for _, i := range tc.ID { - index := strconv.Itoa(i) - peer := mockAPeer(index, task) - peerManager.Add(peer) - } - peers := peerManager.GetPeersByTask("123") - tc.expect(t, peers) - }) - } -} - -func TestPeerManager_GetPeersByTask(t *testing.T) { - tests := []struct { - name string - tasks map[*supervisor.Task]int - fetch string - expect func(t *testing.T, peer []*supervisor.Peer) - }{ - { - name: "peer for a task", - tasks: map[*supervisor.Task]int{mockATask("123"): 3}, - fetch: "123", - expect: func(t *testing.T, peers []*supervisor.Peer) { - assert := assert.New(t) - assert.Len(peers, 3) - }, - }, - { - name: "one from two task", - tasks: map[*supervisor.Task]int{mockATask("123"): 2, mockATask("456"): 3}, - fetch: "123", - expect: func(t *testing.T, peers []*supervisor.Peer) { - assert := assert.New(t) - assert.Len(peers, 2) - }, - }, - { - name: "no peer for a task", - tasks: map[*supervisor.Task]int{mockATask("123"): 1}, - fetch: "456", - expect: func(t *testing.T, peers []*supervisor.Peer) { - assert := assert.New(t) - assert.Len(peers, 0) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockHostManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - peerManager, _ := supervisor.NewPeerManager(cfg.Scheduler.GC, mockGC, mockHostManager) - nowAt := 0 - for task, num := range tc.tasks { - for i := nowAt; i < nowAt+num; i++ { - index := strconv.Itoa(i) - peer := mockAPeer(index, task) - t.Log(i, index, nowAt, peer.ID, num) - peerManager.Add(peer) - } - nowAt += num - } - peers := peerManager.GetPeersByTask(tc.fetch) - tc.expect(t, peers) - }) - } -} - -func TestPeerManager_Delete(t *testing.T) { - tests := []struct { - name string - number int - delete int - fetch int - expect func(t *testing.T, peer *supervisor.Peer, success bool) - }{ - { - name: "delete exist peer", - number: 1, - delete: 0, - fetch: 0, - expect: func(t *testing.T, peer *supervisor.Peer, success bool) { - assert := assert.New(t) - assert.Nil(peer) - assert.False(success) - }, - }, - { - name: "delete not exist peer", - number: 1, - delete: 100, - fetch: 0, - expect: func(t *testing.T, peer *supervisor.Peer, success bool) { - assert := assert.New(t) - assert.NotNil(peer) - assert.True(success) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockHostManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - peerManager, _ := supervisor.NewPeerManager(cfg.Scheduler.GC, mockGC, mockHostManager) - task := mockATask("123") - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - peer := mockAPeer(index, task) - peerManager.Add(peer) - } - peerManager.Delete(strconv.Itoa(tc.delete)) - peer, success := peerManager.Get(strconv.Itoa(tc.fetch)) - - tc.expect(t, peer, success) - }) - } -} - -func mockAPeer(ID string, task *supervisor.Task) *supervisor.Peer { - host := supervisor.NewClientHost(ID, "127.0.0.1", "Client", 8080, 8081, "", "", "") - return supervisor.NewPeer(ID, task, host) -} diff --git a/scheduler/supervisor/task.go b/scheduler/supervisor/task.go deleted file mode 100644 index 9ce5cc4b744..00000000000 --- a/scheduler/supervisor/task.go +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -//go:generate mockgen -destination ./mocks/task_mock.go -package mocks d7y.io/dragonfly/v2/scheduler/supervisor TaskManager - -package supervisor - -import ( - "sync" - "time" - - "go.uber.org/atomic" - - logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/container/list" - gc "d7y.io/dragonfly/v2/pkg/gc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/scheduler/config" -) - -const ( - TaskGCID = "task" - TinyFileSize = 128 -) - -type TaskManager interface { - // Add task - Add(*Task) - // Get task - Get(string) (*Task, bool) - // Delete task - Delete(string) - // GetOrAdd or add task - GetOrAdd(*Task) (*Task, bool) -} - -type taskManager struct { - // peerManager is peer manager - peerManager PeerManager - // taskTTL is task TTL - taskTTL time.Duration - // taskTTI is task TTI - taskTTI time.Duration - // tasks is task map - tasks *sync.Map -} - -func NewTaskManager(cfg *config.GCConfig, gcManager gc.GC, peerManager PeerManager) (TaskManager, error) { - m := &taskManager{ - peerManager: peerManager, - taskTTL: cfg.TaskTTL, - taskTTI: cfg.TaskTTI, - tasks: &sync.Map{}, - } - - if err := gcManager.Add(gc.Task{ - ID: TaskGCID, - Interval: cfg.PeerGCInterval, - Timeout: cfg.PeerGCInterval, - Runner: m, - }); err != nil { - return nil, err - } - - return m, nil -} - -func (m *taskManager) Delete(id string) { - m.tasks.Delete(id) -} - -func (m *taskManager) Add(task *Task) { - m.tasks.Store(task.ID, task) -} - -func (m *taskManager) Get(id string) (*Task, bool) { - task, ok := m.tasks.Load(id) - if !ok { - return nil, false - } - - return task.(*Task), ok -} - -func (m *taskManager) GetOrAdd(t *Task) (*Task, bool) { - task, ok := m.tasks.LoadOrStore(t.ID, t) - return task.(*Task), ok -} - -func (m *taskManager) RunGC() error { - m.tasks.Range(func(key, value interface{}) bool { - taskID := key.(string) - task := value.(*Task) - elapsed := time.Since(task.lastAccessAt.Load()) - if elapsed > m.taskTTI && task.IsSuccess() { - task.Log().Info("elapsed larger than taskTTI, task status become zombie") - task.SetStatus(TaskStatusZombie) - } - - if task.GetPeers().Len() == 0 { - task.Log().Info("peers is empty, task status become waiting") - task.SetStatus(TaskStatusWaiting) - } - - if elapsed > m.taskTTL { - // TODO lock - peers := m.peerManager.GetPeersByTask(taskID) - for _, peer := range peers { - task.Log().Infof("delete peer %s because task is time to leave", peer.ID) - m.peerManager.Delete(peer.ID) - } - task.Log().Info("delete task because elapsed larger than task TTL") - m.Delete(taskID) - } - return true - }) - return nil -} - -type TaskStatus uint8 - -func (status TaskStatus) String() string { - switch status { - case TaskStatusWaiting: - return "Waiting" - case TaskStatusRunning: - return "Running" - case TaskStatusSeeding: - return "Seeding" - case TaskStatusSuccess: - return "Success" - case TaskStatusZombie: - return "Zombie" - case TaskStatusFail: - return "Fail" - default: - return "unknown" - } -} - -const ( - TaskStatusWaiting TaskStatus = iota - TaskStatusRunning - TaskStatusSeeding - TaskStatusSuccess - TaskStatusZombie - TaskStatusFail -) - -type Task struct { - // ID is task id - ID string - // URL is task download url - URL string - // URLMeta is task download url meta - URLMeta *base.UrlMeta - // DirectPiece is tiny piece data - DirectPiece []byte - // ContentLength is task total content length - ContentLength atomic.Int64 - // CreateAt is peer create time - CreateAt *atomic.Time - // LastTriggerAt is peer last trigger time - LastTriggerAt *atomic.Time - // lastAccessAt is peer last access time - lastAccessAt *atomic.Time - // status is task status and type is TaskStatus - status atomic.Value - // peers is peer sorted unique list - peers list.SortedUniqueList - // BackToSourceWeight is back-to-source peer weight - BackToSourceWeight atomic.Int32 - // backToSourcePeers is back-to-source peers list - backToSourcePeers []string - // pieces is piece map - pieces *sync.Map - // TotalPieceCount is total piece count - TotalPieceCount atomic.Int32 - // task logger - logger *logger.SugaredLoggerOnWith - // task lock - lock sync.RWMutex -} - -func NewTask(id, url string, meta *base.UrlMeta) *Task { - now := time.Now() - task := &Task{ - ID: id, - URL: url, - URLMeta: meta, - CreateAt: atomic.NewTime(now), - LastTriggerAt: atomic.NewTime(now), - lastAccessAt: atomic.NewTime(now), - backToSourcePeers: []string{}, - pieces: &sync.Map{}, - peers: list.NewSortedUniqueList(), - logger: logger.WithTaskID(id), - } - - task.status.Store(TaskStatusWaiting) - return task -} - -func (task *Task) SetStatus(status TaskStatus) { - task.status.Store(status) -} - -func (task *Task) GetStatus() TaskStatus { - return task.status.Load().(TaskStatus) -} - -// IsSuccess determines that whether cdn status is success. -func (task *Task) IsSuccess() bool { - return task.GetStatus() == TaskStatusSuccess -} - -// CanSchedule determines whether task can be scheduled -// only task status is seeding or success can be scheduled -func (task *Task) CanSchedule() bool { - return task.GetStatus() == TaskStatusSeeding || task.GetStatus() == TaskStatusSuccess -} - -// IsWaiting determines whether task is waiting -func (task *Task) IsWaiting() bool { - return task.GetStatus() == TaskStatusWaiting -} - -// IsHealth determines whether task is health -func (task *Task) IsHealth() bool { - return task.GetStatus() == TaskStatusRunning || task.GetStatus() == TaskStatusSeeding || task.GetStatus() == TaskStatusSuccess -} - -// IsFail determines whether task is fail -func (task *Task) IsFail() bool { - return task.GetStatus() == TaskStatusFail -} - -func (task *Task) Touch() { - task.lastAccessAt.Store(time.Now()) -} - -func (task *Task) UpdateSuccess(pieceCount int32, contentLength int64) { - task.lock.Lock() - defer task.lock.Unlock() - - if task.GetStatus() != TaskStatusSuccess { - task.SetStatus(TaskStatusSuccess) - task.TotalPieceCount.Store(pieceCount) - task.ContentLength.Store(contentLength) - } -} - -func (task *Task) AddPeer(peer *Peer) { - task.peers.Insert(peer) -} - -func (task *Task) UpdatePeer(peer *Peer) { - task.peers.Insert(peer) -} - -func (task *Task) DeletePeer(peer *Peer) { - task.peers.Remove(peer) -} - -func (task *Task) GetPeers() list.SortedUniqueList { - return task.peers -} - -func (task *Task) GetPiece(n int32) (*base.PieceInfo, bool) { - piece, ok := task.pieces.Load(n) - if !ok { - return nil, false - } - - return piece.(*base.PieceInfo), ok -} - -func (task *Task) GetOrAddPiece(p *base.PieceInfo) (*base.PieceInfo, bool) { - piece, ok := task.pieces.LoadOrStore(p.PieceNum, p) - return piece.(*base.PieceInfo), ok -} - -func (task *Task) GetSizeScope() base.SizeScope { - if task.ContentLength.Load() <= TinyFileSize { - return base.SizeScope_TINY - } - - if task.TotalPieceCount.Load() == 1 { - return base.SizeScope_SMALL - } - - return base.SizeScope_NORMAL -} - -func (task *Task) CanBackToSource() bool { - return task.BackToSourceWeight.Load() > 0 -} - -func (task *Task) ContainsBackToSourcePeer(peerID string) bool { - task.lock.RLock() - defer task.lock.RUnlock() - - for _, backToSourcePeer := range task.backToSourcePeers { - if backToSourcePeer == peerID { - return true - } - } - return false -} - -func (task *Task) AddBackToSourcePeer(peerID string) { - if ok := task.ContainsBackToSourcePeer(peerID); ok { - return - } - - if task.BackToSourceWeight.Load() <= 0 { - return - } - - task.lock.Lock() - defer task.lock.Unlock() - - task.backToSourcePeers = append(task.backToSourcePeers, peerID) - task.BackToSourceWeight.Dec() -} - -func (task *Task) GetBackToSourcePeers() []string { - task.lock.RLock() - defer task.lock.RUnlock() - - return task.backToSourcePeers -} - -func (task *Task) Pick(limit int, pickFn func(peer *Peer) bool) []*Peer { - var peers []*Peer - - task.GetPeers().Range(func(item list.Item) bool { - if len(peers) >= limit { - return false - } - peer, ok := item.(*Peer) - if !ok { - return true - } - - if pickFn(peer) { - peers = append(peers, peer) - } - return true - }) - - return peers -} - -func (task *Task) PickReverse(limit int, pickFn func(peer *Peer) bool) []*Peer { - var peers []*Peer - - task.GetPeers().ReverseRange(func(item list.Item) bool { - if len(peers) >= limit { - return false - } - peer, ok := item.(*Peer) - if !ok { - return true - } - - if pickFn(peer) { - peers = append(peers, peer) - } - return true - }) - - return peers -} - -func (task *Task) Log() *logger.SugaredLoggerOnWith { - return task.logger -} diff --git a/scheduler/supervisor/task_test.go b/scheduler/supervisor/task_test.go deleted file mode 100644 index 297cb7d0d13..00000000000 --- a/scheduler/supervisor/task_test.go +++ /dev/null @@ -1,569 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package supervisor_test - -import ( - "strconv" - "testing" - - "github.com/golang/mock/gomock" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/scheduler/config" - "d7y.io/dragonfly/v2/scheduler/supervisor" - "d7y.io/dragonfly/v2/scheduler/supervisor/mocks" -) - -func TestTask_New(t *testing.T) { - tests := []struct { - name string - task *supervisor.Task - expect func(t *testing.T, task *supervisor.Task) - }{ - { - name: "create by normal config", - task: supervisor.NewTask("main", "127.0.0.1", &base.UrlMeta{}), - expect: func(t *testing.T, task *supervisor.Task) { - assert := assert.New(t) - assert.Equal("main", task.ID) - }, - }, - { - name: "create by special symbol", - task: supervisor.NewTask("\x07\b%$!!\x7F✌ (>‿<)✌", "d7y.io/dragonfly", &base.UrlMeta{Tag: "d7y-test"}), - expect: func(t *testing.T, task *supervisor.Task) { - assert := assert.New(t) - assert.Equal("\x07\b%$!!\x7F✌ (>‿<)✌", task.ID) - }, - }, - { - name: "create by http url", - task: supervisor.NewTask("task", "http://370.moe/", &base.UrlMeta{}), - expect: func(t *testing.T, task *supervisor.Task) { - assert := assert.New(t) - assert.Equal("task", task.ID) - }, - }, - { - name: "create by normal config", - task: supervisor.NewTask("task", "android://370.moe", &base.UrlMeta{}), - expect: func(t *testing.T, task *supervisor.Task) { - assert := assert.New(t) - assert.Equal("task", task.ID) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - tc.expect(t, tc.task) - }) - } -} - -func TestTask_Status(t *testing.T) { - tests := []struct { - name string - status supervisor.TaskStatus - statusName string - judgeArray []bool - expect func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) - }{ - { - name: "status Waiting", - status: supervisor.TaskStatusWaiting, - statusName: "Waiting", - judgeArray: []bool{false, false, true, false, false}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status Running", - status: supervisor.TaskStatusRunning, - statusName: "Running", - judgeArray: []bool{false, false, false, true, false}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status seeding", - status: supervisor.TaskStatusSeeding, - statusName: "Seeding", - judgeArray: []bool{false, true, false, true, false}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status success", - status: supervisor.TaskStatusSuccess, - statusName: "Success", - judgeArray: []bool{true, true, false, true, false}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status zombie", - status: supervisor.TaskStatusZombie, - statusName: "Zombie", - judgeArray: []bool{false, false, false, false, false}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "status Fail", - status: supervisor.TaskStatusFail, - statusName: "Fail", - judgeArray: []bool{false, false, false, false, true}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - { - name: "unknown", - status: 100, - statusName: "unknown", - judgeArray: []bool{false, false, false, false, false}, - expect: func(t *testing.T, task *supervisor.Task, status supervisor.TaskStatus, statusName string, judgeArray []bool) { - assert := assert.New(t) - assert.Equal(supervisor.TaskStatus.String(status), statusName) - assert.Equal(task.GetStatus(), status) - - statutusJudgeArray := []bool{ - task.IsSuccess(), task.CanSchedule(), - task.IsWaiting(), task.IsHealth(), task.IsFail(), - } - assert.Equal(statutusJudgeArray, judgeArray) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := mockATask("task") - task.SetStatus(tc.status) - tc.expect(t, task, tc.status, tc.statusName, tc.judgeArray) - }) - } -} - -func TestTask_BackToSourcePeer(t *testing.T) { - tests := []struct { - name string - initialWeight int32 - add []string - expect func(t *testing.T, task *supervisor.Task, add []string) - }{ - { - name: "able to backsource", - initialWeight: 4, - add: []string{"0", "1", "2"}, - expect: func(t *testing.T, task *supervisor.Task, add []string) { - assert := assert.New(t) - assert.EqualValues(task.BackToSourceWeight.Load(), 1) - assert.True(task.CanBackToSource()) - assert.ElementsMatch(task.GetBackToSourcePeers(), add) - for _, ID := range add { - contain := task.ContainsBackToSourcePeer(ID) - assert.True(contain) - } - - }, - }, - { - name: "unable to backsource", - initialWeight: -1, - add: []string{}, - expect: func(t *testing.T, task *supervisor.Task, add []string) { - assert := assert.New(t) - assert.EqualValues(task.BackToSourceWeight.Load(), -1) - assert.False(task.CanBackToSource()) - - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := mockATask("task") - task.BackToSourceWeight.Store(tc.initialWeight) - for _, ID := range tc.add { - task.AddBackToSourcePeer(ID) - } - tc.expect(t, task, tc.add) - }) - } -} - -func TestTask_Pick(t *testing.T) { - tests := []struct { - name string - number int - pick func(peer *supervisor.Peer) bool - reverse bool - limit int - answer []string - }{ - { - name: "pick three odd", - number: 10, - pick: func(peer *supervisor.Peer) bool { - id, _ := strconv.Atoi(peer.ID) - return id%2 != 0 - }, - reverse: false, - limit: 3, - answer: []string{"1", "3", "5"}, - }, - { - name: "pick 100 odd", - number: 10, - pick: func(peer *supervisor.Peer) bool { - id, _ := strconv.Atoi(peer.ID) - return id%2 != 0 - }, - reverse: true, - limit: 3, - answer: []string{"5", "7", "9"}, - }, - { - name: "pick all odd", - number: 10, - pick: func(peer *supervisor.Peer) bool { - id, _ := strconv.Atoi(peer.ID) - return id%2 != 0 - }, - reverse: false, - limit: 100, - answer: []string{"1", "3", "5", "7", "9"}, - }, - { - name: "pick all", - number: 10, - pick: func(peer *supervisor.Peer) bool { - return true - }, - reverse: false, - limit: 100, - answer: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}, - }, - { - name: "pick nil", - number: 10, - pick: func(peer *supervisor.Peer) bool { - return false - }, - reverse: false, - limit: 100, - answer: []string{}, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - task := mockATask("task") - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - peer := mockAPeer(index, task) - peer.UpdateProgress((int32)(i), i) - task.AddPeer(peer) - } - var peers []*supervisor.Peer - if tc.reverse { - peers = task.PickReverse(tc.limit, tc.pick) - } else { - peers = task.Pick(tc.limit, tc.pick) - } - var peerIDs []string - for _, peer := range peers { - peerIDs = append(peerIDs, peer.ID) - } - assert := assert.New(t) - assert.ElementsMatch(peerIDs, tc.answer) - }) - } -} - -func TestTaskManager_New(t *testing.T) { - tests := []struct { - name string - config *config.GCConfig - mock func(m *mocks.MockGCMockRecorder) - expect func(t *testing.T, taskManager supervisor.TaskManager, err error) - }{ - { - name: "simple create", - config: config.New().Scheduler.GC, - mock: func(m *mocks.MockGCMockRecorder) { - m.Add(gomock.Any()).Return(nil).AnyTimes() - }, - expect: func(t *testing.T, taskManager supervisor.TaskManager, err error) { - assert := assert.New(t) - assert.NotNil(taskManager) - assert.Nil(err) - }, - }, - { - name: "gc failed", - config: config.New().Scheduler.GC, - mock: func(m *mocks.MockGCMockRecorder) { - m.Add(gomock.Any()).Return(errors.New("mockError")).AnyTimes() - }, - expect: func(t *testing.T, taskManager supervisor.TaskManager, err error) { - assert := assert.New(t) - assert.Nil(taskManager) - assert.Error(err) - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockAPeerManager := mocks.NewMockPeerManager(ctl) - mockGC := mocks.NewMockGC(ctl) - tc.mock(mockGC.EXPECT()) - - taskManager, err := supervisor.NewTaskManager(tc.config, mockGC, mockAPeerManager) - tc.expect(t, taskManager, err) - }) - } -} - -func TestTaskManager_Get(t *testing.T) { - tests := []struct { - name string - number int - fetch int - expect func(t *testing.T, task *supervisor.Task, success bool) - }{ - { - name: "fetch first task", - number: 3, - fetch: 0, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.Equal("0", task.ID) - assert.True(success) - }, - }, - { - name: "fetch last task", - number: 3, - fetch: 2, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.Equal("2", task.ID) - assert.True(success) - }, - }, - { - name: "fetch not exist task", - number: 3, - fetch: -1, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.Equal((*supervisor.Task)(nil), task) - assert.False(success) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockPeerManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - taskManager, _ := supervisor.NewTaskManager(cfg.Scheduler.GC, mockGC, mockHostManager) - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - task := mockATask(index) - taskManager.Add(task) - } - task, success := taskManager.Get(strconv.Itoa(tc.fetch)) - tc.expect(t, task, success) - }) - } -} - -func TestTaskManager_GetOrAdd(t *testing.T) { - tests := []struct { - name string - create int - add int - expect func(t *testing.T, task *supervisor.Task, success bool) - }{ - { - name: "get exist task", - create: 3, - add: 0, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.Equal("2", task.ID) - assert.False(success) - }, - }, - { - name: "add not exist task", - create: 3, - add: 3, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.Equal("2", task.ID) - assert.True(success) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockHostManager := mocks.NewMockPeerManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - taskManager, _ := supervisor.NewTaskManager(cfg.Scheduler.GC, mockGC, mockHostManager) - var tasks []*supervisor.Task - for i := 0; i < tc.create; i++ { - index := strconv.Itoa(i) - task := mockATask(index) - tasks = append(tasks, task) - } - for i := 0; i < tc.add; i++ { - taskManager.Add(tasks[i]) - } - task, success := taskManager.GetOrAdd(tasks[len(tasks)-1]) - tc.expect(t, task, success) - }) - } -} - -func TestTaskManager_Delete(t *testing.T) { - tests := []struct { - name string - number int - delete int - fetch int - expect func(t *testing.T, task *supervisor.Task, success bool) - }{ - { - name: "delete exist task", - number: 1, - delete: 0, - fetch: 0, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.Nil(task) - assert.False(success) - }, - }, - { - name: "delete not exist task", - number: 1, - delete: 100, - fetch: 0, - expect: func(t *testing.T, task *supervisor.Task, success bool) { - assert := assert.New(t) - assert.NotNil(task) - assert.True(success) - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - mockAPeerManager := mocks.NewMockPeerManager(ctl) - mockGC := mocks.NewMockGC(ctl) - mockGC.EXPECT().Add(gomock.Any()).Return(nil).AnyTimes() - - cfg := config.New() - taskManager, _ := supervisor.NewTaskManager(cfg.Scheduler.GC, mockGC, mockAPeerManager) - for i := 0; i < tc.number; i++ { - index := strconv.Itoa(i) - task := mockATask(index) - taskManager.Add(task) - } - taskManager.Delete(strconv.Itoa(tc.delete)) - task, success := taskManager.Get(strconv.Itoa(tc.fetch)) - - tc.expect(t, task, success) - }) - } -} - -func mockATask(ID string) *supervisor.Task { - urlMeta := &base.UrlMeta{ - Tag: "d7y-test", - } - return supervisor.NewTask(ID, "d7y.io/dragonfly", urlMeta) -}