-
Notifications
You must be signed in to change notification settings - Fork 1
/
server.go
331 lines (281 loc) · 9.01 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
package totem
import (
"context"
"fmt"
"io"
"log"
"log/slog"
"reflect"
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
type Server struct {
ServerOptions
stream Stream
lock sync.RWMutex
controller *StreamController
splicedControllers []*StreamController
tracer trace.Tracer
setupCtx context.Context
setupSpan trace.Span
}
type ServerOptions struct {
logger *slog.Logger
name string
discoveryHopLimit int32
interceptors InterceptorConfig
metrics *MetricsExporter
tracerOptions []resource.Option
}
type InterceptorConfig struct {
// This interceptor functions similarly to a standard unary server interceptor,
// and will be called for RPCs that are about to be invoked locally. When
// an RPC is passed through to a spliced stream, this interceptor will not
// be called.
Incoming grpc.UnaryServerInterceptor
// This interceptor functions similarly to a standard unary client interceptor,
// with the one caveat that the [grpc.ClientConn] passed to the interceptor
// will always be nil, and must not be used. The interceptor should still
// forward the nil argument to the invoker for potential forward compatibility.
// This interceptor is not called for RPCs being passed through to a spliced
// stream.
Outgoing grpc.UnaryClientInterceptor
}
func WithInterceptors(config InterceptorConfig) ServerOption {
return func(o *ServerOptions) {
o.interceptors = config
}
}
func WithMetrics(provider *metric.MeterProvider, staticAttrs ...attribute.KeyValue) ServerOption {
return func(o *ServerOptions) {
o.metrics = NewMetricsExporter(provider, staticAttrs...)
}
}
func WithTracerOptions(opts ...resource.Option) ServerOption {
return func(o *ServerOptions) {
o.tracerOptions = opts
}
}
func WithDiscoveryHopLimit(limit int32) ServerOption {
return func(o *ServerOptions) {
o.discoveryHopLimit = limit
}
}
type ServerOption func(*ServerOptions)
func (o *ServerOptions) apply(opts ...ServerOption) {
for _, op := range opts {
op(o)
}
}
func WithName(name string) ServerOption {
return func(o *ServerOptions) {
o.name = name
}
}
func WithLogger(logger *slog.Logger) ServerOption {
return func(o *ServerOptions) {
o.logger = logger
}
}
var defaultNoopLogger = slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelError}))
func NewServer(stream Stream, opts ...ServerOption) (*Server, error) {
options := ServerOptions{
discoveryHopLimit: -1,
logger: defaultNoopLogger,
}
options.apply(opts...)
ctrl := NewStreamController(stream, StreamControllerOptions{
Logger: options.logger,
Name: options.name,
Metrics: options.metrics,
WorkerPoolParams: DefaultWorkerPoolParams(),
TracerOptions: options.tracerOptions,
})
tracer := TracerProvider(options.tracerOptions...).Tracer(TracerName)
setupCtx, span := tracer.Start(stream.Context(), "Stream Initialization",
trace.WithAttributes(attribute.String("name", options.name)),
trace.WithNewRoot(),
)
srv := &Server{
ServerOptions: options,
stream: stream,
controller: ctrl,
setupCtx: setupCtx,
setupSpan: span,
tracer: tracer,
}
return srv, nil
}
// Implements grpc.ServiceRegistrar
func (r *Server) RegisterService(desc *grpc.ServiceDesc, impl interface{}) {
if impl != nil {
ht := reflect.TypeOf(desc.HandlerType).Elem()
st := reflect.TypeOf(impl)
if !st.Implements(ht) {
log.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
}
r.register(desc, impl)
} else {
log.Fatalf("grpc: Server.RegisterService found nil service implementation")
}
}
// Splice configures this server to forward any incoming RPCs for the given
// service(s) to a different totem stream.
// The totem server will handle closing the spliced stream.
func (r *Server) Splice(stream Stream, opts ...ServerOption) error {
options := ServerOptions{
name: r.name,
discoveryHopLimit: r.discoveryHopLimit,
metrics: r.metrics,
tracerOptions: r.tracerOptions,
}
options.apply(opts...)
r.lock.Lock()
defer r.lock.Unlock()
ctrl := NewStreamController(stream, StreamControllerOptions{
Logger: r.logger,
Name: options.name,
Metrics: options.metrics,
WorkerPoolParams: DefaultWorkerPoolParams(),
TracerOptions: options.tracerOptions,
BaseTopologyFlags: TopologySpliced,
})
r.controller.services.Range(func(key string, value *ServiceHandlerList) bool {
value.Range(func(sh *ServiceHandler) bool {
if sh.TopologyFlags&TopologyLocal == 0 {
return true
}
if proto.HasExtension(sh.Descriptor.Options, E_Visibility) {
vis := proto.GetExtension(sh.Descriptor.Options, E_Visibility).(*Visibility)
if vis.SplicedClients {
r.logger.Debug("enabling local service on spliced controller due to visibility option", "service", sh.Descriptor.GetName())
ctrl.RegisterServiceHandler(sh)
}
}
return true
})
return true
})
go func() {
if err := ctrl.Run(stream.Context()); err != nil {
if status.Code(err) == codes.Canceled {
r.logger.Debug("stream closed")
} else {
r.logger.Warn("stream exited with error", "error", err)
}
}
}()
info, err := discoverServices(r.setupCtx, ctrl, discoverOptions{
MaxHops: r.discoveryHopLimit,
})
if err != nil {
err := fmt.Errorf("service discovery failed: %w", err)
return err
}
r.logger.Debug("splicing stream", "methods", info.MethodNames())
handlerInvoker := ctrl.NewInvoker()
for _, desc := range info.Services {
r.controller.RegisterServiceHandler(NewDefaultServiceHandler(stream.Context(), desc, handlerInvoker))
}
// these are tracked so that when the server starts and runs discovery for
// the main controller, discovered service handlers can be replicated to
// spliced clients
r.splicedControllers = append(r.splicedControllers, ctrl)
return nil
}
func (r *Server) register(serviceDesc *grpc.ServiceDesc, impl interface{}) {
r.lock.Lock()
defer r.lock.Unlock()
r.logger.Debug("registering service", "service", serviceDesc.ServiceName)
if TracingEnabled {
r.setupSpan.AddEvent("Registering Local Service", trace.WithAttributes(
attribute.String("service", serviceDesc.ServiceName),
))
}
reflectionDesc, err := LoadServiceDesc(serviceDesc)
if err != nil {
log.Fatalf("totem: failed to load service descriptor: %v", err)
}
r.controller.RegisterServiceHandler(NewDefaultServiceHandler(r.Context(), reflectionDesc,
newLocalServiceInvoker(impl, serviceDesc, r.logger, r.interceptors.Incoming, r.metrics, 0)))
}
// Serve starts the totem server, which takes control of the stream and begins
// handling incoming and outgoing RPCs.
func (r *Server) Serve() (grpc.ClientConnInterface, <-chan error) {
r.lock.RLock()
r.logger.Debug("starting totem server")
if TracingEnabled {
r.setupSpan.AddEvent("Starting Server")
}
ch := make(chan error, 2)
go func() {
runErr := r.controller.Run(r.Context())
if runErr != nil {
if status.Code(runErr) == codes.Canceled {
r.logger.Debug("stream canceled", "error", runErr)
} else {
r.logger.Warn("stream exited with error", "error", runErr)
}
} else {
r.logger.Debug("stream closed")
}
r.lock.RUnlock()
r.lock.Lock()
defer r.lock.Unlock()
if runErr != nil {
r.logger.Debug("kicking spliced controllers", "error", runErr,
"numControllers", len(r.splicedControllers))
}
for _, spliced := range r.splicedControllers {
if runErr != nil {
spliced.Kick(runErr)
}
spliced.CloseOrRecv()
}
r.controller.CloseOrRecv()
ch <- runErr
}()
info, err := discoverServices(r.setupCtx, r.controller, discoverOptions{
MaxHops: int32(r.discoveryHopLimit),
})
if err != nil {
r.controller.Kick(fmt.Errorf("service discovery failed: %w", err))
r.controller.CloseOrRecv()
ch <- err
return nil, ch
}
r.logger.Debug("service discovery complete", "methods", info.MethodNames())
r.setupSpan.AddEvent("Service Discovery Complete", trace.WithAttributes(
attribute.StringSlice("services", info.ServiceNames()),
))
invoker := r.controller.NewInvoker()
for _, ctrl := range r.splicedControllers {
for _, svcDesc := range info.Services {
ctrl.RegisterServiceHandler(NewDefaultServiceHandler(r.Context(), svcDesc, invoker))
}
if TracingEnabled {
r.setupSpan.AddEvent("Syncing Spliced Controller", trace.WithAttributes(
attribute.String("controller", ctrl.Name),
))
}
}
r.setupSpan.End(trace.WithTimestamp(time.Now()))
return &ClientConn{
controller: r.controller,
interceptor: r.interceptors.Outgoing,
logger: r.logger.WithGroup("cc"),
metrics: r.metrics,
}, ch
}
// Returns the server's stream context. Only valid after Serve has been called.
func (r *Server) Context() context.Context {
return r.stream.Context()
}