From 104aa6b11850ad49a05f1d6d9da02c9f6c9abc87 Mon Sep 17 00:00:00 2001 From: Richard Ramos Date: Fri, 4 Oct 2024 08:56:59 -0400 Subject: [PATCH 1/3] chore_: bump go-waku with filter loop fix --- go.mod | 82 +- go.sum | 188 +- vendor/github.com/cespare/xxhash/v2/README.md | 2 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 29 +- .../cespare/xxhash/v2/xxhash_asm.go | 2 +- .../cespare/xxhash/v2/xxhash_other.go | 2 +- .../cespare/xxhash/v2/xxhash_safe.go | 2 +- .../cespare/xxhash/v2/xxhash_unsafe.go | 2 +- .../github.com/elastic/gosigar/CHANGELOG.md | 10 +- vendor/github.com/elastic/gosigar/README.md | 2 +- .../elastic/gosigar/sigar_common_darwin.go | 28 +- .../go-task/slim-sprig/{ => v3}/.editorconfig | 0 .../slim-sprig/{ => v3}/.gitattributes | 0 .../go-task/slim-sprig/{ => v3}/.gitignore | 0 .../go-task/slim-sprig/{ => v3}/CHANGELOG.md | 19 + .../go-task/slim-sprig/{ => v3}/LICENSE.txt | 0 .../go-task/slim-sprig/{ => v3}/README.md | 2 +- .../go-task/slim-sprig/{ => v3}/Taskfile.yml | 2 +- .../go-task/slim-sprig/{ => v3}/crypto.go | 0 .../go-task/slim-sprig/{ => v3}/date.go | 0 .../go-task/slim-sprig/{ => v3}/defaults.go | 0 .../go-task/slim-sprig/{ => v3}/dict.go | 0 .../go-task/slim-sprig/{ => v3}/doc.go | 0 .../go-task/slim-sprig/{ => v3}/functions.go | 0 .../go-task/slim-sprig/{ => v3}/list.go | 0 .../go-task/slim-sprig/{ => v3}/network.go | 0 .../go-task/slim-sprig/{ => v3}/numeric.go | 0 .../go-task/slim-sprig/{ => v3}/reflect.go | 0 .../go-task/slim-sprig/{ => v3}/regex.go | 0 .../go-task/slim-sprig/{ => v3}/strings.go | 0 .../go-task/slim-sprig/{ => v3}/url.go | 0 .../google/pprof/profile/profile.go | 2 +- vendor/github.com/google/uuid/CHANGELOG.md | 20 + vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 53 + vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 104 + .../klauspost/compress/zstd/dict.go | 31 + .../zstd/internal/xxhash/xxhash_arm64.s | 4 +- .../klauspost/compress/zstd/matchlen_amd64.s | 10 +- .../github.com/klauspost/cpuid/v2/README.md | 1 + vendor/github.com/klauspost/cpuid/v2/cpuid.go | 2 + .../klauspost/cpuid/v2/featureid_string.go | 369 +- .../libp2p/go-libp2p-pubsub/backoff.go | 1 - .../libp2p/go-libp2p-pubsub/comm.go | 41 +- .../libp2p/go-libp2p-pubsub/floodsub.go | 12 +- .../libp2p/go-libp2p-pubsub/gossipsub.go | 248 +- .../libp2p/go-libp2p-pubsub/gossipsub_feat.go | 10 +- .../libp2p/go-libp2p-pubsub/notify.go | 75 - .../libp2p/go-libp2p-pubsub/pb/rpc.pb.go | 328 +- .../libp2p/go-libp2p-pubsub/pb/rpc.proto | 8 +- .../libp2p/go-libp2p-pubsub/pb/trace.pb.go | 391 +- .../libp2p/go-libp2p-pubsub/pb/trace.proto | 5 + .../libp2p/go-libp2p-pubsub/peer_notify.go | 112 + .../libp2p/go-libp2p-pubsub/pubsub.go | 96 +- .../libp2p/go-libp2p-pubsub/randomsub.go | 12 +- .../libp2p/go-libp2p-pubsub/rpc_queue.go | 147 + .../go-libp2p-pubsub/timecache/time_cache.go | 4 - .../libp2p/go-libp2p-pubsub/trace.go | 20 +- .../libp2p/go-libp2p-pubsub/tracer.go | 1 + .../libp2p/go-libp2p-pubsub/version.json | 3 + vendor/github.com/libp2p/go-libp2p/README.md | 4 +- .../libp2p/go-libp2p/config/config.go | 203 +- .../go-libp2p/core/crypto/pb/crypto.pb.go | 10 +- .../libp2p/go-libp2p/core/network/network.go | 3 + .../go-libp2p/core/peer/pb/peer_record.pb.go | 10 +- .../go-libp2p/core/record/pb/envelope.pb.go | 8 +- .../core/sec/insecure/pb/plaintext.pb.go | 8 +- .../github.com/libp2p/go-libp2p/defaults.go | 29 + vendor/github.com/libp2p/go-libp2p/options.go | 26 + .../p2p/host/autonat/pb/autonat.pb.go | 14 +- .../go-libp2p/p2p/host/basic/basic_host.go | 200 +- .../libp2p/go-libp2p/p2p/host/blank/blank.go | 233 + .../go-libp2p/p2p/metricshelper/conn.go | 15 +- .../p2p/net/swarm/black_hole_detector.go | 200 +- .../go-libp2p/p2p/net/swarm/dial_worker.go | 39 +- .../libp2p/go-libp2p/p2p/net/swarm/swarm.go | 43 +- .../go-libp2p/p2p/net/swarm/swarm_conn.go | 9 + .../go-libp2p/p2p/net/swarm/swarm_dial.go | 5 + .../go-libp2p/p2p/net/swarm/swarm_listen.go | 40 +- .../go-libp2p/p2p/net/swarm/swarm_metrics.go | 22 +- .../p2p/protocol/autonatv2/autonat.go | 236 + .../p2p/protocol/autonatv2/client.go | 342 + .../p2p/protocol/autonatv2/metrics.go | 97 + .../p2p/protocol/autonatv2/msg_reader.go | 38 + .../p2p/protocol/autonatv2/options.go | 64 + .../p2p/protocol/autonatv2/pb/autonatv2.pb.go | 818 +++ .../p2p/protocol/autonatv2/pb/autonatv2.proto | 64 + .../p2p/protocol/autonatv2/server.go | 509 ++ .../p2p/protocol/circuitv2/pb/circuit.pb.go | 26 +- .../p2p/protocol/circuitv2/pb/voucher.pb.go | 10 +- .../p2p/protocol/holepunch/pb/holepunch.pb.go | 8 +- .../p2p/protocol/identify/obsaddr.go | 44 + .../p2p/protocol/identify/pb/identify.pb.go | 8 +- .../go-libp2p/p2p/protocol/ping/ping.go | 7 +- .../p2p/security/noise/pb/payload.pb.go | 10 +- .../go-libp2p/p2p/transport/quic/transport.go | 6 + .../p2p/transport/quicreuse/connmgr.go | 63 +- .../transport/quicreuse/nonquic_packetconn.go | 74 + .../p2p/transport/quicreuse/options.go | 10 +- .../p2p/transport/webrtc/connection.go | 60 +- .../p2p/transport/webrtc/listener.go | 1 + .../go-libp2p/p2p/transport/webrtc/logger.go | 22 +- .../p2p/transport/webrtc/pb/message.pb.go | 8 +- .../p2p/transport/webrtc/transport.go | 51 +- .../p2p/transport/webrtc/udpmux/mux.go | 5 +- .../p2p/transport/websocket/listener.go | 23 +- .../github.com/libp2p/go-libp2p/version.json | 2 +- vendor/github.com/miekg/dns/README.md | 6 + vendor/github.com/miekg/dns/defaults.go | 8 +- vendor/github.com/miekg/dns/edns.go | 62 +- vendor/github.com/miekg/dns/msg.go | 2 +- vendor/github.com/miekg/dns/scan.go | 11 +- vendor/github.com/miekg/dns/scan_rr.go | 69 +- vendor/github.com/miekg/dns/server.go | 19 +- vendor/github.com/miekg/dns/svcb.go | 50 +- vendor/github.com/miekg/dns/types.go | 14 + vendor/github.com/miekg/dns/version.go | 2 +- vendor/github.com/miekg/dns/xfr.go | 11 +- vendor/github.com/miekg/dns/zduplicate.go | 9 + vendor/github.com/miekg/dns/zmsg.go | 11 + vendor/github.com/miekg/dns/ztypes.go | 12 + .../multiformats/go-multiaddr/protocols.go | 9 + .../multiformats/go-multiaddr/transcoders.go | 28 + .../multiformats/go-multiaddr/version.json | 2 +- vendor/github.com/munnerz/goautoneg/LICENSE | 31 + vendor/github.com/munnerz/goautoneg/Makefile | 13 + .../ww => munnerz}/goautoneg/README.txt | 0 .../ww => munnerz}/goautoneg/autoneg.go | 127 +- .../v2/ginkgo/generators/bootstrap_command.go | 2 +- .../v2/ginkgo/generators/generate_command.go | 3 +- .../onsi/ginkgo/v2/ginkgo/internal/compile.go | 14 +- .../ginkgo/v2/ginkgo/internal/gocovmerge.go | 129 + .../ginkgo/internal/profiles_and_reports.go | 44 +- .../ginkgo/v2/ginkgo/watch/package_hash.go | 9 + .../ginkgo/v2/reporters/default_reporter.go | 61 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 2 + .../github.com/onsi/ginkgo/v2/types/config.go | 26 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 15 +- .../onsi/ginkgo/v2/types/label_filter.go | 229 +- .../onsi/ginkgo/v2/types/version.go | 2 +- .../github.com/pion/datachannel/.golangci.yml | 7 +- vendor/github.com/pion/datachannel/message.go | 15 + .../pion/datachannel/message_channel_open.go | 2 +- .../extension/srtp_protection_profile.go | 8 + .../pion/dtls/v2/srtp_protection_profile.go | 4 + vendor/github.com/pion/ice/v2/agent.go | 28 +- .../github.com/pion/ice/v2/agent_handlers.go | 49 +- .../github.com/pion/ice/v2/candidate_base.go | 11 +- .../github.com/pion/interceptor/attributes.go | 4 +- .../github.com/pion/rtp/codecs/h264_packet.go | 6 +- vendor/github.com/pion/rtp/codecs/vp9/bits.go | 65 + .../github.com/pion/rtp/codecs/vp9/header.go | 221 + .../github.com/pion/rtp/codecs/vp9_packet.go | 152 +- vendor/github.com/pion/rtp/error.go | 2 + vendor/github.com/pion/rtp/packet.go | 7 + vendor/github.com/pion/rtp/sequencer.go | 10 +- vendor/github.com/pion/sctp/ack_timer.go | 22 +- vendor/github.com/pion/sctp/association.go | 96 +- .../pion/sctp/chunk_payload_data.go | 2 +- .../pion/sctp/error_cause_header.go | 7 + vendor/github.com/pion/sctp/packet.go | 16 + .../sctp/param_requested_hmac_algorithm.go | 8 +- vendor/github.com/pion/sctp/payload_queue.go | 147 +- vendor/github.com/pion/sctp/queue.go | 70 + .../pion/sctp/receive_payload_queue.go | 186 + vendor/github.com/pion/sctp/rtx_timer.go | 113 +- vendor/github.com/pion/srtp/v2/context.go | 125 +- vendor/github.com/pion/srtp/v2/errors.go | 12 +- .../github.com/pion/srtp/v2/key_derivation.go | 8 +- vendor/github.com/pion/srtp/v2/keying.go | 4 +- vendor/github.com/pion/srtp/v2/option.go | 47 + .../pion/srtp/v2/protection_profile.go | 70 +- vendor/github.com/pion/srtp/v2/srtcp.go | 33 +- vendor/github.com/pion/srtp/v2/srtp.go | 33 +- vendor/github.com/pion/srtp/v2/srtp_cipher.go | 11 +- .../pion/srtp/v2/srtp_cipher_aead_aes_gcm.go | 132 +- .../srtp/v2/srtp_cipher_aes_cm_hmac_sha1.go | 149 +- .../pion/transport/v2/packetio/buffer.go | 27 +- .../pion/transport/v2/stdnet/net.go | 9 +- .../github.com/pion/webrtc/v3/datachannel.go | 62 + .../pion/webrtc/v3/dtlstransport.go | 16 +- .../github.com/pion/webrtc/v3/icegatherer.go | 22 +- .../github.com/pion/webrtc/v3/icetransport.go | 24 +- .../github.com/pion/webrtc/v3/interceptor.go | 13 + .../pion/webrtc/v3/internal/fmtp/av1.go | 41 + .../pion/webrtc/v3/internal/fmtp/fmtp.go | 45 +- .../pion/webrtc/v3/internal/fmtp/vp9.go | 41 + .../pion/webrtc/v3/internal/mux/mux.go | 4 + .../github.com/pion/webrtc/v3/mediaengine.go | 54 +- .../github.com/pion/webrtc/v3/operations.go | 88 +- .../pion/webrtc/v3/peerconnection.go | 190 +- .../pion/webrtc/v3/peerconnectionstate.go | 11 - .../github.com/pion/webrtc/v3/rtpreceiver.go | 16 +- .../pion/webrtc/v3/sctptransport.go | 32 +- vendor/github.com/pion/webrtc/v3/sdp.go | 49 +- .../github.com/pion/webrtc/v3/track_remote.go | 3 +- .../prometheus/client_golang/NOTICE | 5 - .../internal/github.com/golang/gddo/LICENSE | 27 + .../golang/gddo/httputil/header/header.go | 145 + .../golang/gddo/httputil/negotiate.go | 36 + .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_latest.go | 19 +- .../client_golang/prometheus/histogram.go | 226 +- .../internal/go_collector_options.go | 2 + .../client_golang/prometheus/metric.go | 2 +- .../prometheus/process_collector.go | 27 +- .../prometheus/process_collector_other.go | 14 + .../prometheus/promhttp/delegator.go | 6 + .../client_golang/prometheus/promhttp/http.go | 111 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 42 + .../client_golang/prometheus/vec.go | 2 +- .../prometheus/common/expfmt/decode.go | 6 +- .../prometheus/common/expfmt/encode.go | 13 +- .../prometheus/common/expfmt/expfmt.go | 22 +- .../common/expfmt/openmetrics_create.go | 202 +- .../prometheus/common/model/alert.go | 27 +- .../prometheus/common/model/labelset.go | 11 - .../common/model/labelset_string.go | 45 + .../common/model/labelset_string_go120.go | 39 + .../prometheus/common/model/metric.go | 1 + .../prometheus/procfs/.golangci.yml | 7 + .../prometheus/procfs/MAINTAINERS.md | 3 +- .../prometheus/procfs/Makefile.common | 26 +- vendor/github.com/prometheus/procfs/arp.go | 6 +- .../github.com/prometheus/procfs/buddyinfo.go | 6 +- .../github.com/prometheus/procfs/cpuinfo.go | 4 +- vendor/github.com/prometheus/procfs/crypto.go | 6 +- .../github.com/prometheus/procfs/fscache.go | 4 +- vendor/github.com/prometheus/procfs/ipvs.go | 6 +- .../github.com/prometheus/procfs/loadavg.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 60 +- .../github.com/prometheus/procfs/meminfo.go | 220 +- .../github.com/prometheus/procfs/mountinfo.go | 2 +- .../prometheus/procfs/mountstats.go | 11 +- .../prometheus/procfs/net_conntrackstat.go | 4 +- .../prometheus/procfs/net_ip_socket.go | 46 +- .../prometheus/procfs/net_sockstat.go | 4 +- .../prometheus/procfs/net_softnet.go | 2 +- .../prometheus/procfs/net_tls_stat.go | 119 + .../github.com/prometheus/procfs/net_unix.go | 14 +- .../prometheus/procfs/net_wireless.go | 22 +- vendor/github.com/prometheus/procfs/proc.go | 8 +- .../prometheus/procfs/proc_limits.go | 2 +- .../github.com/prometheus/procfs/proc_ns.go | 4 +- .../github.com/prometheus/procfs/proc_psi.go | 2 +- .../prometheus/procfs/proc_smaps.go | 2 +- .../github.com/prometheus/procfs/proc_stat.go | 7 + .../prometheus/procfs/proc_status.go | 29 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 22 +- vendor/github.com/prometheus/procfs/stat.go | 22 +- vendor/github.com/prometheus/procfs/swaps.go | 6 +- vendor/github.com/prometheus/procfs/thread.go | 2 +- .../github.com/prometheus/procfs/zoneinfo.go | 4 +- vendor/github.com/quic-go/quic-go/.gitignore | 1 + vendor/github.com/quic-go/quic-go/client.go | 2 +- .../github.com/quic-go/quic-go/connection.go | 265 +- .../quic-go/quic-go/connection_logging.go | 173 + .../quic-go/quic-go/crypto_stream.go | 49 +- .../quic-go/quic-go/crypto_stream_manager.go | 51 +- vendor/github.com/quic-go/quic-go/framer.go | 136 +- .../quic-go/quic-go/http3/client.go | 9 +- .../github.com/quic-go/quic-go/http3/conn.go | 33 + .../quic-go/quic-go/http3/roundtrip.go | 2 + .../quic-go/quic-go/http3/server.go | 103 +- .../github.com/quic-go/quic-go/interface.go | 38 +- .../ackhandler/received_packet_history.go | 104 +- .../ackhandler/sent_packet_handler.go | 8 +- .../ackhandler/sent_packet_history.go | 11 +- .../flowcontrol/connection_flow_controller.go | 10 +- .../quic-go/internal/flowcontrol/interface.go | 6 +- .../flowcontrol/stream_flow_controller.go | 21 +- .../internal/handshake/crypto_setup.go | 54 +- .../quic-go/internal/handshake/interface.go | 3 +- .../internal/handshake/token_generator.go | 2 +- .../internal/handshake/token_protector.go | 20 +- .../quic-go/internal/logutils/frame.go | 50 - .../internal/{handshake => qtls}/conn.go | 2 +- .../quic-go/quic-go/internal/qtls/qtls.go | 34 +- .../quic-go/internal/utils/byteorder.go | 21 - .../internal/utils/byteorder_big_endian.go | 103 - .../quic-go/quic-go/internal/utils/ip.go | 10 - .../quic-go/quic-go/internal/utils/minmax.go | 36 - .../quic-go/internal/utils/rtt_stats.go | 2 +- .../quic-go/internal/wire/ack_frame.go | 4 +- .../quic-go/internal/wire/extended_header.go | 68 +- .../quic-go/quic-go/internal/wire/header.go | 64 +- .../quic-go/internal/wire/short_header.go | 17 +- .../quic-go/logging/connection_tracer.go | 8 +- .../quic-go/quic-go/logging/interface.go | 19 +- .../quic-go/quic-go/logging/tracer.go | 4 +- .../quic-go/metrics/connection_tracer.go | 192 + .../quic-go/quic-go/metrics/pool.go | 27 + .../quic-go/quic-go/metrics/tracer.go | 147 + vendor/github.com/quic-go/quic-go/mockgen.go | 11 +- .../quic-go/quic-go/mtu_discoverer.go | 136 +- .../quic-go/quic-go/packet_packer.go | 11 +- .../quic-go/quic-go/packet_unpacker.go | 23 +- .../quic-go/quic-go/qlog/connection_tracer.go | 24 +- .../github.com/quic-go/quic-go/qlog/event.go | 13 +- .../quic-go/quic-go/qlog/packet_header.go | 4 +- .../quic-go/quic-go/qlog/qlog_dir.go | 14 +- .../github.com/quic-go/quic-go/qlog/trace.go | 4 +- .../github.com/quic-go/quic-go/qlog/tracer.go | 6 +- .../github.com/quic-go/quic-go/qlog/types.go | 4 +- .../github.com/quic-go/quic-go/qlog/writer.go | 14 + .../quic-go/quic-go/receive_stream.go | 86 +- .../github.com/quic-go/quic-go/send_stream.go | 132 +- vendor/github.com/quic-go/quic-go/server.go | 42 +- vendor/github.com/quic-go/quic-go/stream.go | 41 +- .../github.com/quic-go/quic-go/streams_map.go | 31 +- .../quic-go/quic-go/streams_map_outgoing.go | 2 +- .../github.com/quic-go/quic-go/transport.go | 12 + .../quic-go/quic-go/window_update_queue.go | 71 - .../go-waku/waku/persistence/store.go | 3 + .../go-waku/waku/v2/api/filter/filter.go | 44 +- .../waku/v2/api/filter/filter_manager.go | 17 +- .../waku/v2/api/missing/missing_messages.go | 4 + .../waku/v2/api/publish/message_check.go | 2 + .../waku/v2/api/publish/message_queue.go | 2 + .../go-waku/waku/v2/discv5/discover.go | 17 +- .../go-waku/waku/v2/discv5/filters.go | 12 +- .../waku/v2/discv5/mock_peer_discoverer.go | 2 + .../go-waku/waku/v2/node/connectedness.go | 2 + .../go-waku/waku/v2/node/keepalive.go | 3 + .../go-waku/waku/v2/node/localnode.go | 3 + .../go-waku/waku/v2/node/wakunode2.go | 11 +- .../go-waku/waku/v2/node/wakuoptions.go | 31 + .../waku/v2/peermanager/connection_gater.go | 2 + .../v2/peermanager/fastest_peer_selector.go | 5 +- .../waku/v2/peermanager/peer_connector.go | 15 +- .../waku/v2/peermanager/peer_discovery.go | 2 + .../waku/v2/peermanager/peer_manager.go | 40 +- .../waku/v2/peermanager/peer_selection.go | 7 +- .../v2/peermanager/topic_event_handler.go | 2 + .../go-waku/waku/v2/protocol/enr/enr.go | 17 + .../go-waku/waku/v2/protocol/filter/client.go | 9 +- .../v2/protocol/filter/filter_health_check.go | 3 + .../go-waku/waku/v2/protocol/filter/server.go | 10 +- .../v2/protocol/filter/subscribers_map.go | 3 + .../legacy_store/waku_store_client.go | 5 +- .../legacy_store/waku_store_protocol.go | 3 + .../v2/protocol/lightpush/waku_lightpush.go | 6 +- .../v2/protocol/metadata/waku_metadata.go | 2 + .../waku/v2/protocol/peer_exchange/client.go | 8 +- .../v2/protocol/peer_exchange/protocol.go | 2 + .../waku/v2/protocol/relay/broadcast.go | 2 + .../go-waku/waku/v2/protocol/relay/config.go | 5 + .../go-waku/waku/v2/protocol/relay/metrics.go | 2 + .../waku/v2/protocol/relay/topic_events.go | 2 + .../waku/v2/protocol/relay/waku_relay.go | 2 + .../dynamic/membership_fetcher.go | 2 + .../waku/v2/protocol/rln/nullifier_log.go | 2 + .../go-waku/waku/v2/protocol/store/client.go | 48 +- .../go-waku/waku/v2/protocol/store/options.go | 9 + .../waku/v2/protocol/store/pb/validation.go | 6 +- .../go-waku/waku/v2/protocol/store/result.go | 4 +- .../waku-org/go-waku/waku/v2/rendezvous/db.go | 2 + .../go-waku/waku/v2/rendezvous/rendezvous.go | 3 + .../v2/service/common_discovery_service.go | 2 + .../go-waku/waku/v2/timesource/ntp.go | 3 + .../waku-org/go-waku/waku/v2/utils/logger.go | 8 + .../waku-org/go-waku/waku/v2/utils/peer.go | 11 + vendor/github.com/wlynxg/anet/.gitignore | 1 + vendor/github.com/wlynxg/anet/LICENSE | 28 + vendor/github.com/wlynxg/anet/README.md | 120 + vendor/github.com/wlynxg/anet/README_zh.md | 25 + vendor/github.com/wlynxg/anet/interface.go | 31 + .../wlynxg/anet/interface_android.go | 434 ++ .../github.com/wlynxg/anet/netlink_android.go | 179 + vendor/go.uber.org/dig/.golangci.yml | 89 + vendor/go.uber.org/dig/CHANGELOG.md | 9 + vendor/go.uber.org/dig/Makefile | 49 +- vendor/go.uber.org/dig/callback.go | 7 +- vendor/go.uber.org/dig/check_license.sh | 17 - vendor/go.uber.org/dig/constructor.go | 6 +- vendor/go.uber.org/dig/container.go | 19 + vendor/go.uber.org/dig/decorate.go | 6 +- vendor/go.uber.org/dig/error.go | 2 - vendor/go.uber.org/dig/glide.yaml | 7 - .../dig/internal/digclock/clock.go | 82 + vendor/go.uber.org/dig/invoke.go | 3 +- vendor/go.uber.org/dig/param.go | 3 +- vendor/go.uber.org/dig/scope.go | 13 +- vendor/go.uber.org/dig/version.go | 2 +- vendor/go.uber.org/dig/visualize.go | 10 +- vendor/go.uber.org/fx/CHANGELOG.md | 8 + vendor/go.uber.org/fx/broadcast.go | 179 + vendor/go.uber.org/fx/shutdown.go | 2 +- vendor/go.uber.org/fx/signal.go | 130 +- vendor/go.uber.org/fx/version.go | 2 +- vendor/golang.org/x/crypto/LICENSE | 4 +- vendor/golang.org/x/crypto/blake2s/blake2s.go | 10 +- .../golang.org/x/crypto/blake2s/register.go | 21 - .../chacha20poly1305/chacha20poly1305.go | 2 +- .../x/crypto/cryptobyte/asn1/asn1.go | 2 +- .../golang.org/x/crypto/cryptobyte/string.go | 2 +- .../x/crypto/curve25519/curve25519.go | 39 +- .../x/crypto/curve25519/curve25519_compat.go | 105 - .../x/crypto/curve25519/curve25519_go120.go | 46 - .../x/crypto/curve25519/internal/field/README | 7 - .../x/crypto/curve25519/internal/field/fe.go | 416 -- .../curve25519/internal/field/fe_amd64.go | 15 - .../curve25519/internal/field/fe_amd64.s | 378 -- .../internal/field/fe_amd64_noasm.go | 11 - .../curve25519/internal/field/fe_arm64.go | 15 - .../curve25519/internal/field/fe_arm64.s | 42 - .../internal/field/fe_arm64_noasm.go | 11 - .../curve25519/internal/field/fe_generic.go | 264 - .../curve25519/internal/field/sync.checkpoint | 1 - .../crypto/curve25519/internal/field/sync.sh | 19 - vendor/golang.org/x/crypto/hkdf/hkdf.go | 2 +- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- .../x/crypto/ripemd160/ripemd160.go | 2 +- .../x/crypto/salsa20/salsa/hsalsa20.go | 2 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 2 +- vendor/golang.org/x/crypto/sha3/doc.go | 2 +- vendor/golang.org/x/crypto/sha3/hashes.go | 42 +- .../x/crypto/sha3/hashes_generic.go | 27 - .../golang.org/x/crypto/sha3/hashes_noasm.go | 23 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 5787 +++++++++++++++-- vendor/golang.org/x/crypto/sha3/register.go | 18 - vendor/golang.org/x/crypto/sha3/sha3.go | 62 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 48 +- vendor/golang.org/x/crypto/sha3/shake.go | 16 +- .../golang.org/x/crypto/sha3/shake_generic.go | 19 - .../golang.org/x/crypto/sha3/shake_noasm.go | 15 + vendor/golang.org/x/crypto/sha3/xor.go | 45 +- .../golang.org/x/crypto/sha3/xor_generic.go | 28 - .../golang.org/x/crypto/sha3/xor_unaligned.go | 66 - vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/exp/slices/sort.go | 4 +- vendor/golang.org/x/mod/LICENSE | 4 +- vendor/golang.org/x/mod/modfile/read.go | 7 +- vendor/golang.org/x/mod/modfile/rule.go | 184 +- vendor/golang.org/x/mod/modfile/work.go | 54 +- vendor/golang.org/x/net/LICENSE | 4 +- vendor/golang.org/x/net/proxy/per_host.go | 8 +- vendor/golang.org/x/sync/LICENSE | 4 +- vendor/golang.org/x/sys/LICENSE | 4 +- vendor/golang.org/x/sys/cpu/cpu.go | 2 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 5 + vendor/golang.org/x/sys/unix/mkerrors.sh | 3 + vendor/golang.org/x/sys/unix/mremap.go | 5 + .../golang.org/x/sys/unix/syscall_darwin.go | 24 + vendor/golang.org/x/sys/unix/syscall_linux.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 9 + .../x/sys/unix/zerrors_darwin_amd64.go | 5 + .../x/sys/unix/zerrors_darwin_arm64.go | 5 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 58 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 81 + .../x/sys/unix/zsyscall_darwin_amd64.s | 20 + .../x/sys/unix/zsyscall_darwin_arm64.go | 81 + .../x/sys/unix/zsyscall_darwin_arm64.s | 20 + .../golang.org/x/sys/unix/zsyscall_linux.go | 16 + .../x/sys/unix/zsyscall_openbsd_386.go | 24 + .../x/sys/unix/zsyscall_openbsd_386.s | 5 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 24 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 24 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 24 + .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 24 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 + .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 47 +- .../x/sys/windows/security_windows.go | 25 +- .../x/sys/windows/syscall_windows.go | 12 +- .../golang.org/x/sys/windows/types_windows.go | 71 +- .../x/sys/windows/zsyscall_windows.go | 51 + vendor/golang.org/x/term/LICENSE | 4 +- vendor/golang.org/x/text/LICENSE | 4 +- vendor/golang.org/x/tools/LICENSE | 4 +- .../x/tools/go/ast/astutil/enclosing.go | 24 +- .../golang.org/x/tools/go/ast/astutil/util.go | 1 + .../tools/go/internal/packagesdriver/sizes.go | 53 - vendor/golang.org/x/tools/go/packages/doc.go | 8 - .../x/tools/go/packages/external.go | 22 +- .../golang.org/x/tools/go/packages/golist.go | 126 +- .../x/tools/go/packages/packages.go | 104 +- .../golang.org/x/tools/go/packages/visit.go | 9 + .../x/tools/go/types/objectpath/objectpath.go | 93 +- .../x/tools/internal/aliases/aliases_go121.go | 12 +- .../x/tools/internal/aliases/aliases_go122.go | 36 + .../x/tools/internal/gocommand/invoke.go | 113 +- .../x/tools/internal/imports/fix.go | 357 +- .../x/tools/internal/imports/mod.go | 8 +- .../x/tools/internal/pkgbits/decoder.go | 4 - .../x/tools/internal/stdlib/manifest.go | 111 + .../x/tools/internal/typesinternal/types.go | 15 + .../x/tools/internal/versions/constraint.go | 13 + .../internal/versions/constraint_go121.go | 14 + .../x/tools/internal/versions/types_go122.go | 2 +- .../protoc-gen-go/internal_gengo/reflect.go | 6 +- .../internal_gengo/well_known_types.go | 40 +- .../protobuf/compiler/protogen/protogen.go | 2 +- .../protobuf/encoding/protojson/decode.go | 4 +- .../protobuf/encoding/prototext/decode.go | 4 +- .../protobuf/internal/encoding/json/decode.go | 2 +- .../protobuf/internal/encoding/text/decode.go | 2 +- .../protobuf/internal/errors/errors.go | 6 +- .../protobuf/internal/filedesc/desc.go | 4 + .../protobuf/internal/filedesc/desc_init.go | 2 +- .../protobuf/internal/filedesc/desc_lazy.go | 5 + .../protobuf/internal/filetype/build.go | 4 +- .../protobuf/internal/genid/descriptor_gen.go | 3 + .../protobuf/internal/impl/api_export.go | 6 +- .../protobuf/internal/impl/checkinit.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 22 + .../internal/impl/codec_messageset.go | 22 + .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/convert_list.go | 2 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/encode.go | 48 +- .../protobuf/internal/impl/extension.go | 8 +- .../protobuf/internal/impl/legacy_enum.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 4 +- .../protobuf/internal/impl/message.go | 8 +- .../protobuf/internal/impl/message_reflect.go | 14 +- .../internal/impl/message_reflect_gen.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 6 +- .../protobuf/internal/impl/pointer_unsafe.go | 4 +- .../protobuf/internal/msgfmt/format.go | 4 +- .../protobuf/internal/order/range.go | 4 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/extension.go | 6 +- .../reflect/protodesc/desc_resolve.go | 5 + .../reflect/protodesc/desc_validate.go | 12 - .../reflect/protoreflect/source_gen.go | 2 + .../protobuf/reflect/protoreflect/type.go | 6 +- .../reflect/protoreflect/value_pure.go | 14 +- .../reflect/protoreflect/value_union.go | 14 +- .../protoreflect/value_unsafe_go120.go | 6 +- .../protoreflect/value_unsafe_go121.go | 8 +- .../reflect/protoregistry/registry.go | 14 +- .../types/descriptorpb/descriptor.pb.go | 995 +-- .../protobuf/types/dynamicpb/dynamic.go | 16 +- .../types/gofeaturespb/go_features.pb.go | 44 +- .../protobuf/types/known/anypb/any.pb.go | 4 +- .../types/known/durationpb/duration.pb.go | 4 +- .../types/known/timestamppb/timestamp.pb.go | 4 +- .../protobuf/types/pluginpb/plugin.pb.go | 10 +- vendor/lukechampine.com/blake3/README.md | 4 - vendor/lukechampine.com/blake3/bao.go | 151 - vendor/lukechampine.com/blake3/bao/bao.go | 385 ++ vendor/lukechampine.com/blake3/blake3.go | 154 +- .../lukechampine.com/blake3/compress_amd64.go | 144 - .../blake3/compress_generic.go | 143 - .../lukechampine.com/blake3/compress_noasm.go | 93 - .../blake3/guts/compress_amd64.go | 135 + .../{blake3_amd64.s => guts/compress_amd64.s} | 5 +- .../blake3/guts/compress_generic.go | 145 + .../blake3/guts/compress_noasm.go | 67 + .../lukechampine.com/blake3/{ => guts}/cpu.go | 3 +- .../blake3/{ => guts}/cpu_darwin.go | 2 +- vendor/lukechampine.com/blake3/guts/node.go | 48 + vendor/modules.txt | 110 +- 596 files changed, 21744 insertions(+), 7298 deletions(-) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.editorconfig (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.gitattributes (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/.gitignore (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/CHANGELOG.md (95%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/LICENSE.txt (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/README.md (88%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/Taskfile.yml (89%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/crypto.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/date.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/defaults.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/dict.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/doc.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/functions.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/list.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/network.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/numeric.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/reflect.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/regex.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/strings.go (100%) rename vendor/github.com/go-task/slim-sprig/{ => v3}/url.go (100%) create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go delete mode 100644 vendor/github.com/libp2p/go-libp2p-pubsub/notify.go create mode 100644 vendor/github.com/libp2p/go-libp2p-pubsub/peer_notify.go create mode 100644 vendor/github.com/libp2p/go-libp2p-pubsub/rpc_queue.go create mode 100644 vendor/github.com/libp2p/go-libp2p-pubsub/version.json create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/autonat.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/client.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/metrics.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/msg_reader.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/options.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.pb.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.proto create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/server.go create mode 100644 vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/nonquic_packetconn.go create mode 100644 vendor/github.com/munnerz/goautoneg/LICENSE create mode 100644 vendor/github.com/munnerz/goautoneg/Makefile rename vendor/github.com/{prometheus/common/internal/bitbucket.org/ww => munnerz}/goautoneg/README.txt (100%) rename vendor/github.com/{prometheus/common/internal/bitbucket.org/ww => munnerz}/goautoneg/autoneg.go (52%) create mode 100644 vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go create mode 100644 vendor/github.com/pion/rtp/codecs/vp9/bits.go create mode 100644 vendor/github.com/pion/rtp/codecs/vp9/header.go create mode 100644 vendor/github.com/pion/sctp/queue.go create mode 100644 vendor/github.com/pion/sctp/receive_payload_queue.go create mode 100644 vendor/github.com/pion/webrtc/v3/internal/fmtp/av1.go create mode 100644 vendor/github.com/pion/webrtc/v3/internal/fmtp/vp9.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_string.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go create mode 100644 vendor/github.com/prometheus/procfs/net_tls_stat.go create mode 100644 vendor/github.com/quic-go/quic-go/connection_logging.go delete mode 100644 vendor/github.com/quic-go/quic-go/internal/logutils/frame.go rename vendor/github.com/quic-go/quic-go/internal/{handshake => qtls}/conn.go (97%) delete mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go delete mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go delete mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/ip.go delete mode 100644 vendor/github.com/quic-go/quic-go/internal/utils/minmax.go create mode 100644 vendor/github.com/quic-go/quic-go/metrics/connection_tracer.go create mode 100644 vendor/github.com/quic-go/quic-go/metrics/pool.go create mode 100644 vendor/github.com/quic-go/quic-go/metrics/tracer.go delete mode 100644 vendor/github.com/quic-go/quic-go/window_update_queue.go create mode 100644 vendor/github.com/wlynxg/anet/.gitignore create mode 100644 vendor/github.com/wlynxg/anet/LICENSE create mode 100644 vendor/github.com/wlynxg/anet/README.md create mode 100644 vendor/github.com/wlynxg/anet/README_zh.md create mode 100644 vendor/github.com/wlynxg/anet/interface.go create mode 100644 vendor/github.com/wlynxg/anet/interface_android.go create mode 100644 vendor/github.com/wlynxg/anet/netlink_android.go create mode 100644 vendor/go.uber.org/dig/.golangci.yml delete mode 100644 vendor/go.uber.org/dig/check_license.sh delete mode 100644 vendor/go.uber.org/dig/glide.yaml create mode 100644 vendor/go.uber.org/dig/internal/digclock/clock.go create mode 100644 vendor/go.uber.org/fx/broadcast.go delete mode 100644 vendor/golang.org/x/crypto/blake2s/register.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_compat.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_go120.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint delete mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh delete mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_noasm.go delete mode 100644 vendor/golang.org/x/crypto/sha3/register.go delete mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/shake_noasm.go delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_generic.go delete mode 100644 vendor/golang.org/x/crypto/sha3/xor_unaligned.go delete mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint_go121.go delete mode 100644 vendor/lukechampine.com/blake3/bao.go create mode 100644 vendor/lukechampine.com/blake3/bao/bao.go delete mode 100644 vendor/lukechampine.com/blake3/compress_amd64.go delete mode 100644 vendor/lukechampine.com/blake3/compress_generic.go delete mode 100644 vendor/lukechampine.com/blake3/compress_noasm.go create mode 100644 vendor/lukechampine.com/blake3/guts/compress_amd64.go rename vendor/lukechampine.com/blake3/{blake3_amd64.s => guts/compress_amd64.s} (99%) create mode 100644 vendor/lukechampine.com/blake3/guts/compress_generic.go create mode 100644 vendor/lukechampine.com/blake3/guts/compress_noasm.go rename vendor/lukechampine.com/blake3/{ => guts}/cpu.go (83%) rename vendor/lukechampine.com/blake3/{ => guts}/cpu_darwin.go (96%) create mode 100644 vendor/lukechampine.com/blake3/guts/node.go diff --git a/go.mod b/go.mod index 68c78fee3f8..b3abfac22bc 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ replace github.com/forPelevin/gomoji => github.com/status-im/gomoji v1.1.3-0.202 replace github.com/mutecomm/go-sqlcipher/v4 v4.4.2 => github.com/status-im/go-sqlcipher/v4 v4.5.4-status.3 -replace github.com/libp2p/go-libp2p-pubsub v0.11.0 => github.com/waku-org/go-libp2p-pubsub v0.0.0-20240703191659-2cbb09eac9b5 +replace github.com/libp2p/go-libp2p-pubsub v0.12.0 => github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f require ( github.com/anacrolix/torrent v1.41.0 @@ -28,7 +28,7 @@ require ( github.com/ethereum/go-ethereum v1.10.26 github.com/forPelevin/gomoji v1.1.2 github.com/golang/protobuf v1.5.3 - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.6.0 github.com/hashicorp/go-version v1.2.0 github.com/imdario/mergo v0.3.12 github.com/ipfs/go-cid v0.4.1 @@ -36,11 +36,11 @@ require ( github.com/keighl/metabolize v0.0.0-20150915210303-97ab655d4034 github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f github.com/lib/pq v1.10.4 - github.com/libp2p/go-libp2p v0.35.2 - github.com/libp2p/go-libp2p-pubsub v0.11.0 + github.com/libp2p/go-libp2p v0.36.2 + github.com/libp2p/go-libp2p-pubsub v0.12.0 github.com/lucasb-eyer/go-colorful v1.0.3 github.com/mat/besticon v0.0.0-20210314201728-1579f269edb7 - github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-varint v0.0.7 @@ -49,7 +49,7 @@ require ( github.com/oliamb/cutter v0.2.2 github.com/pborman/uuid v1.2.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.0 github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a github.com/status-im/doubleratchet v3.0.0+incompatible github.com/status-im/markdown v0.0.0-20240404192634-b7e33c6ac3d4 @@ -66,9 +66,9 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 github.com/zenthangplus/goccm v0.0.0-20211005163543-2f2e522aca15 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.23.0 + golang.org/x/crypto v0.26.0 golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb - google.golang.org/protobuf v1.34.1 + google.golang.org/protobuf v1.34.2 gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/go-playground/validator.v9 v9.31.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -95,17 +95,17 @@ require ( github.com/schollz/peerdiscovery v1.7.0 github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7 github.com/urfave/cli/v2 v2.27.2 - github.com/waku-org/go-waku v0.8.1-0.20240904143057-f9e7895202da + github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb github.com/wk8/go-ordered-map/v2 v2.1.7 github.com/yeqown/go-qrcode/v2 v2.2.1 github.com/yeqown/go-qrcode/writer/standard v1.2.1 go.uber.org/mock v0.4.0 go.uber.org/multierr v1.11.0 - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 - golang.org/x/net v0.25.0 - golang.org/x/text v0.15.0 + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa + golang.org/x/net v0.28.0 + golang.org/x/text v0.17.0 golang.org/x/time v0.5.0 - golang.org/x/tools v0.21.0 + golang.org/x/tools v0.24.0 ) require ( @@ -140,7 +140,7 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect @@ -150,7 +150,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/elastic/gosigar v0.14.2 // indirect + github.com/elastic/gosigar v0.14.3 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -159,7 +159,7 @@ require ( github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-stack/stack v1.8.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect @@ -169,7 +169,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect @@ -182,8 +182,8 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect @@ -201,7 +201,7 @@ require ( github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/miekg/dns v1.1.58 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -215,33 +215,34 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/ginkgo/v2 v2.15.0 // indirect + github.com/onsi/ginkgo/v2 v2.20.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pion/datachannel v1.5.6 // indirect - github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.25 // indirect - github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.34 // indirect + github.com/pion/interceptor v0.1.30 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.6 // indirect - github.com/pion/sctp v1.8.16 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.5 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.2.40 // indirect + github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/tsdb v0.10.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.44.0 // indirect + github.com/quic-go/quic-go v0.46.0 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect @@ -267,6 +268,7 @@ require ( github.com/waku-org/go-zerokit-rln-arm v0.0.0-20230916171929-1dd9494ff065 // indirect github.com/waku-org/go-zerokit-rln-x86_64 v0.0.0-20230916171518-2a77c3734dd1 // indirect github.com/wk8/go-ordered-map v1.0.0 // indirect + github.com/wlynxg/anet v0.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect @@ -274,15 +276,15 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.22.1 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/term v0.20.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/term v0.23.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.3.0 // indirect modernc.org/libc v1.11.82 // indirect modernc.org/mathutil v1.4.1 // indirect modernc.org/memory v1.0.5 // indirect diff --git a/go.sum b/go.sum index 9850a4982a3..9112614af33 100644 --- a/go.sum +++ b/go.sum @@ -476,8 +476,8 @@ github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -733,8 +733,8 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elliotchance/orderedmap v1.2.0/go.mod h1:8hdSl6jmveQw8ScByd3AaNHNk51RhbTazdqtTty+NFw= @@ -837,8 +837,8 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -874,8 +874,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -1033,8 +1033,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1043,8 +1043,8 @@ github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -1300,13 +1300,13 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= @@ -1333,6 +1333,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= @@ -1357,8 +1358,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.35.2 h1:287oHbuplkrLdAF+syB0n/qDgd50AUBtEODqS0e0HDs= -github.com/libp2p/go-libp2p v0.35.2/go.mod h1:RKCDNt30IkFipGL0tl8wQW/3zVWEGFUZo8g2gAKxwjU= +github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= +github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -1462,8 +1463,8 @@ github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8/go.mod h1:Uz8uoD6o+eQ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= @@ -1530,8 +1531,8 @@ github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.2/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= +github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -1555,6 +1556,7 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -1607,8 +1609,8 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1623,8 +1625,8 @@ github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQ github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1692,8 +1694,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pion/datachannel v1.4.21/go.mod h1:oiNyP4gHx2DIwRzX/MFyH0Rz/Gz05OgBlayAI2hAWjg= github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= -github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= -github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= github.com/pion/dtls/v2 v2.0.1/go.mod h1:uMQkz2W0cSqY00xav7WByQ4Hb+18xeQh2oH2fRezr5U= github.com/pion/dtls/v2 v2.0.2/go.mod h1:27PEO3MDdaCfo21heT59/vsdmZc0zMt9wQPcSlLu/1I= github.com/pion/dtls/v2 v2.0.4/go.mod h1:qAkFscX0ZHoI1E07RfYPoRw3manThveu+mlTDdOxoGI= @@ -1702,23 +1704,23 @@ github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQK github.com/pion/dtls/v2 v2.1.1/go.mod h1:qG3gA7ZPZemBqpEFqRKyURYdKEwFZQCGb7gv9T3ON3Y= github.com/pion/dtls/v2 v2.1.2/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= -github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= github.com/pion/ice v0.7.18/go.mod h1:+Bvnm3nYC6Nnp7VV6glUkuOfToB/AtMRZpOU8ihuf4c= github.com/pion/ice/v2 v2.0.15/go.mod h1:ZIiVGevpgAxF/cXiIVmuIUtCb3Xs4gCzCbXB6+nFkSI= github.com/pion/ice/v2 v2.1.7/go.mod h1:kV4EODVD5ux2z8XncbLHIOtcXKtYXVgLVCeVqnpoeP0= github.com/pion/ice/v2 v2.1.10/go.mod h1:kV4EODVD5ux2z8XncbLHIOtcXKtYXVgLVCeVqnpoeP0= github.com/pion/ice/v2 v2.1.12/go.mod h1:ovgYHUmwYLlRvcCLI67PnQ5YGe+upXZbGgllBDG/ktU= github.com/pion/ice/v2 v2.1.20/go.mod h1:hEAldRzBhTtAfvlU1V/2/nLCMvveQWFKPNCop+63/Iw= -github.com/pion/ice/v2 v2.3.25 h1:M5rJA07dqhi3nobJIg+uPtcVjFECTrhcR3n0ns8kDZs= -github.com/pion/ice/v2 v2.3.25/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= +github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= +github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= github.com/pion/interceptor v0.0.9/go.mod h1:dHgEP5dtxOTf21MObuBAjJeAayPxLUAZjerGH8Xr07c= github.com/pion/interceptor v0.0.12/go.mod h1:qzeuWuD/ZXvPqOnxNcnhWfkCZ2e1kwwslicyyPnhoK4= github.com/pion/interceptor v0.0.13/go.mod h1:svsW2QoLHLoGLUr4pDoSopGBEWk8FZwlfxId/OKRKzo= github.com/pion/interceptor v0.0.15/go.mod h1:pg3J253eGi5bqyKzA74+ej5Y19ez2jkWANVnF+Z9Dfk= github.com/pion/interceptor v0.1.7/go.mod h1:Lh3JSl/cbJ2wP8I3ccrjh1K/deRGRn3UlSPuOTiHb6U= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= +github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.4/go.mod h1:R1sL0p50l42S5lJs91oNdUL58nm0QHrhxnSegr++qC0= @@ -1745,16 +1747,15 @@ github.com/pion/rtp v1.7.0/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko github.com/pion/rtp v1.7.2/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= github.com/pion/rtp v1.7.4/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= -github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= github.com/pion/sctp v1.7.10/go.mod h1:EhpTUQu1/lcK3xI+eriS6/96fWetHGCvBi9MSsnaBN0= github.com/pion/sctp v1.7.11/go.mod h1:EhpTUQu1/lcK3xI+eriS6/96fWetHGCvBi9MSsnaBN0= github.com/pion/sctp v1.7.12/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= github.com/pion/sctp v1.8.0/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= github.com/pion/sctp v1.8.2/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= -github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= -github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= -github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v2 v2.4.0/go.mod h1:L2LxrOpSTJbAns244vfPChbciR/ReU1KWfG04OpkR7E= github.com/pion/sdp/v3 v3.0.4/go.mod h1:bNiSknmJE0HYBprTHXKPQ3+JjacTv5uap92ueJZKsRk= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= @@ -1764,8 +1765,8 @@ github.com/pion/srtp v1.5.2/go.mod h1:NiBff/MSxUwMUwx/fRNyD/xGE+dVvf8BOCeXhjCXZ9 github.com/pion/srtp/v2 v2.0.1/go.mod h1:c8NWHhhkFf/drmHTAblkdu8++lsISEBBdAuiyxgqIsE= github.com/pion/srtp/v2 v2.0.2/go.mod h1:VEyLv4CuxrwGY8cxM+Ng3bmVy8ckz/1t6A0q/msKOw0= github.com/pion/srtp/v2 v2.0.5/go.mod h1:8k6AJlal740mrZ6WYxc4Dg6qDqqhxoRG2GSjlUhDF0A= -github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= -github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.3.5/go.mod h1:gDMim+47EeEtfWogA37n6qXZS88L5V6LqFcf+DZA2UA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= @@ -1779,14 +1780,13 @@ github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZ github.com/pion/transport v0.13.0 h1:KWTA5ZrQogizzYwPEciGtHPLwpAjE91FgXnyu+Hv2uY= github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= -github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= -github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.0.4/go.mod h1:1812p4DcGVbYVBTiraUmP50XoKye++AMkbfp+N27mog= github.com/pion/turn/v2 v2.0.5/go.mod h1:APg43CFyt/14Uy7heYUOGWdkem/Wu4PhCO/bjyrTqMw= github.com/pion/turn/v2 v2.0.6/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= @@ -1800,8 +1800,8 @@ github.com/pion/webrtc/v3 v3.0.11/go.mod h1:WEvXneGTeqNmiR59v5jTsxMc4yXQyOQcRsrd github.com/pion/webrtc/v3 v3.0.27/go.mod h1:QpLDmsU5a/a05n230gRtxZRvfHhFzn9ukGUL2x4G5ic= github.com/pion/webrtc/v3 v3.0.32/go.mod h1:wX3V5dQQUGCifhT1mYftC2kCrDQX6ZJ3B7Yad0R9JK0= github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38/go.mod h1:L5S/oAhL0Fzt/rnftVQRrP80/j5jygY7XRZzWwFx6P4= -github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU= -github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= +github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= +github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1829,8 +1829,8 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= +github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1854,8 +1854,8 @@ github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1873,15 +1873,15 @@ github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.2/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= -github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= +github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= +github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -2132,12 +2132,12 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1 github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/waku-org/go-discover v0.0.0-20240506173252-4912704efdc5 h1:4K3IS97JryAEV8pRXB//qPcg+8bPXl/O+AOLt3FeCKc= github.com/waku-org/go-discover v0.0.0-20240506173252-4912704efdc5/go.mod h1:eBHgM6T4EG0RZzxpxKy+rGz/6Dw2Nd8DWxS0lm9ESDw= -github.com/waku-org/go-libp2p-pubsub v0.0.0-20240703191659-2cbb09eac9b5 h1:9u16Et17jSGWM3mQhlIOZYiG+O+rlX5BsrBumw5flxk= -github.com/waku-org/go-libp2p-pubsub v0.0.0-20240703191659-2cbb09eac9b5/go.mod h1:QEb+hEV9WL9wCiUAnpY29FZR6W3zK8qYlaml8R4q6gQ= +github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f h1:4dQcthDPhxQIY96BFm6Ab2m57+I/z9+FgaYnIER6IfM= +github.com/waku-org/go-libp2p-pubsub v0.12.0-gowaku.0.20240823143342-b0f2429ca27f/go.mod h1:Oi0zw9aw8/Y5GC99zt+Ef2gYAl+0nZlwdJonDyOz/sE= github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0 h1:R4YYx2QamhBRl/moIxkDCNW+OP7AHbyWLBygDc/xIMo= github.com/waku-org/go-libp2p-rendezvous v0.0.0-20240110193335-a67d1cc760a0/go.mod h1:EhZP9fee0DYjKH/IOQvoNSy1tSHp2iZadsHGphcAJgY= -github.com/waku-org/go-waku v0.8.1-0.20240904143057-f9e7895202da h1:bkAJVlJL4Ba83frABWjI9p9MeLGmEHuD/QcjYu3HNbQ= -github.com/waku-org/go-waku v0.8.1-0.20240904143057-f9e7895202da/go.mod h1:VNbVmh5UYg3vIvhGV4hCw8QEykq3RScDACo2Y2dIFfg= +github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb h1:E3J49PH9iXpjaOOI/VrEX/VhSk3obKjxVehGEDzZgXI= +github.com/waku-org/go-waku v0.8.1-0.20241004054019-0ed94ce0b1cb/go.mod h1:1BRnyg2mQ2aBNLTBaPq6vEvobzywGykPOhGQFbHGf74= github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59 h1:jisj+OCI6QydLtFq3Pyhu49wl9ytPN7oAHjMfepHDrA= github.com/waku-org/go-zerokit-rln v0.1.14-0.20240102145250-fa738c0bdf59/go.mod h1:1PdBdPzyTaKt3VnpAHk3zj+r9dXPFOr3IHZP9nFle6E= github.com/waku-org/go-zerokit-rln-apple v0.0.0-20230916172309-ee0ee61dde2b h1:KgZVhsLkxsj5gb/FfndSCQu6VYwALrCOgYI3poR95yE= @@ -2163,6 +2163,9 @@ github.com/wk8/go-ordered-map v1.0.0 h1:BV7z+2PaK8LTSd/mWgY12HyMAo5CEgkHqbkVq2th github.com/wk8/go-ordered-map v1.0.0/go.mod h1:9ZIbRunKbuvfPKyBP1SIKLcXNlv74YCOZ3t3VTS6gRk= github.com/wk8/go-ordered-map/v2 v2.1.7 h1:aUZ1xBMdbvY8wnNt77qqo4nyT3y0pX4Usat48Vm+hik= github.com/wk8/go-ordered-map/v2 v2.1.7/go.mod h1:9Xvgm2mV2kSq2SAm0Y608tBmu8akTzI7c2bz7/G7ZN4= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= @@ -2259,10 +2262,10 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -2332,13 +2335,10 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2353,8 +2353,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2393,8 +2393,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2489,13 +2489,10 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2532,8 +2529,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2695,13 +2692,10 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2711,13 +2705,10 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2730,11 +2721,10 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2835,8 +2825,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -3033,8 +3023,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -3145,8 +3135,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= +lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b7815..33c88305c46 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9dc..78bddf1ceed 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40c1..78f95f25610 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba4b..118e49e819e 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd8e..05f5e7dfe7b 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd88a..cf9d42aed53 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/elastic/gosigar/CHANGELOG.md b/vendor/github.com/elastic/gosigar/CHANGELOG.md index ebc11d81642..08bc4d07e4d 100644 --- a/vendor/github.com/elastic/gosigar/CHANGELOG.md +++ b/vendor/github.com/elastic/gosigar/CHANGELOG.md @@ -15,6 +15,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Deprecated +## [0.14.3] + +### Fixed + +- darwin: Fix checkptr error in `(*Mem).Get` for Go 1.22. + ## [0.14.2] ### Fixed @@ -196,7 +202,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed `ProcStatus.PPID` value is wrong on Windows. [#55](https://github.com/elastic/gosigar/pull/55) - Fixed `ProcStatus.Username` error on Windows XP [#56](https://github.com/elastic/gosigar/pull/56) -[Unreleased]: https://github.com/elastic/gosigar/compare/v0.14.1...HEAD +[Unreleased]: https://github.com/elastic/gosigar/compare/v0.14.3...HEAD +[0.14.3]: https://github.com/elastic/gosigar/releases/tag/v0.14.3 +[0.14.2]: https://github.com/elastic/gosigar/releases/tag/v0.14.2 [0.14.1]: https://github.com/elastic/gosigar/releases/tag/v0.14.1 [0.14.0]: https://github.com/elastic/gosigar/releases/tag/v0.14.0 [0.13.0]: https://github.com/elastic/gosigar/releases/tag/v0.13.0 diff --git a/vendor/github.com/elastic/gosigar/README.md b/vendor/github.com/elastic/gosigar/README.md index ab8f5bcfe66..18cabdb4ddb 100644 --- a/vendor/github.com/elastic/gosigar/README.md +++ b/vendor/github.com/elastic/gosigar/README.md @@ -1,4 +1,4 @@ -# Go sigar [![Build Status](https://beats-ci.elastic.co/job/Beats/job/gosigar/job/master/badge/icon)](https://beats-ci.elastic.co/job/Beats/job/gosigar/job/master/) +# Go sigar [![ci](https://github.com/elastic/gosigar/actions/workflows/ci.yml/badge.svg)](https://github.com/elastic/gosigar/actions/workflows/ci.yml) ## Overview diff --git a/vendor/github.com/elastic/gosigar/sigar_common_darwin.go b/vendor/github.com/elastic/gosigar/sigar_common_darwin.go index db02df71d5b..89e660a0806 100644 --- a/vendor/github.com/elastic/gosigar/sigar_common_darwin.go +++ b/vendor/github.com/elastic/gosigar/sigar_common_darwin.go @@ -26,6 +26,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/unix" ) // Get fetches LoadAverage data @@ -471,21 +473,23 @@ func vmInfo(vmstat *C.vm_statistics_data_t) error { // generic Sysctl buffer unmarshalling func sysctlbyname(name string, data interface{}) (err error) { - val, err := syscall.Sysctl(name) - if err != nil { - return err - } - - buf := []byte(val) - switch v := data.(type) { case *uint64: - *v = *(*uint64)(unsafe.Pointer(&buf[0])) - return - } + res, err := unix.SysctlUint64(name) + if err != nil { + return err + } + *v = res + return nil + default: + val, err := syscall.Sysctl(name) + if err != nil { + return err + } - bbuf := bytes.NewBuffer([]byte(val)) - return binary.Read(bbuf, binary.LittleEndian, data) + bbuf := bytes.NewBuffer([]byte(val)) + return binary.Read(bbuf, binary.LittleEndian, data) + } } func taskInfo(pid int, info *C.struct_proc_taskallinfo) error { diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 61d8ebffc37..2ce45dd4eca 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index 72579471ff0..b5ab564254f 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223b71..8e6346bb19e 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 62df80a5563..5551eb0bfa4 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -847,7 +847,7 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 7ed347d3ad7..7ec5ac7ea90 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec27..dc60082d3b3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc87..c351129279f 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d9909..5232b486780 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 00000000000..339a959a7a2 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 00000000000..3167b643d45 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe64c..b7b83164bc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08040..ae7d4d3295a 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0f76..0782b86e3d1 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 30f8d2963e2..21508edbdb3 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -310,6 +310,7 @@ Exit Code 1 | AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | | AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | | AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| AVXVNNIINT16 | AVX-VNNI-INT16 instructions | | BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | | BMI1 | Bit Manipulation Instruction Set 1 | | BMI2 | Bit Manipulation Instruction Set 2 | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 805f5e7b4ed..53bc18ca719 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -104,6 +104,7 @@ const ( AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions AVXVNNIINT8 // AVX-VNNI-INT8 instructions + AVXVNNIINT16 // AVX-VNNI-INT16 instructions BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 @@ -1242,6 +1243,7 @@ func support() flagSet { // CPUID.(EAX=7, ECX=1).EDX fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) fs.setIf(edx1&(1<<14) != 0, PREFETCHI) fs.setIf(edx1&(1<<19) != 0, AVX10) fs.setIf(edx1&(1<<21) != 0, APX_F) diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 57a085a53bf..3a256031039 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -44,194 +44,195 @@ func _() { _ = x[AVXSLOW-34] _ = x[AVXVNNI-35] _ = x[AVXVNNIINT8-36] - _ = x[BHI_CTRL-37] - _ = x[BMI1-38] - _ = x[BMI2-39] - _ = x[CETIBT-40] - _ = x[CETSS-41] - _ = x[CLDEMOTE-42] - _ = x[CLMUL-43] - _ = x[CLZERO-44] - _ = x[CMOV-45] - _ = x[CMPCCXADD-46] - _ = x[CMPSB_SCADBS_SHORT-47] - _ = x[CMPXCHG8-48] - _ = x[CPBOOST-49] - _ = x[CPPC-50] - _ = x[CX16-51] - _ = x[EFER_LMSLE_UNS-52] - _ = x[ENQCMD-53] - _ = x[ERMS-54] - _ = x[F16C-55] - _ = x[FLUSH_L1D-56] - _ = x[FMA3-57] - _ = x[FMA4-58] - _ = x[FP128-59] - _ = x[FP256-60] - _ = x[FSRM-61] - _ = x[FXSR-62] - _ = x[FXSROPT-63] - _ = x[GFNI-64] - _ = x[HLE-65] - _ = x[HRESET-66] - _ = x[HTT-67] - _ = x[HWA-68] - _ = x[HYBRID_CPU-69] - _ = x[HYPERVISOR-70] - _ = x[IA32_ARCH_CAP-71] - _ = x[IA32_CORE_CAP-72] - _ = x[IBPB-73] - _ = x[IBPB_BRTYPE-74] - _ = x[IBRS-75] - _ = x[IBRS_PREFERRED-76] - _ = x[IBRS_PROVIDES_SMP-77] - _ = x[IBS-78] - _ = x[IBSBRNTRGT-79] - _ = x[IBSFETCHSAM-80] - _ = x[IBSFFV-81] - _ = x[IBSOPCNT-82] - _ = x[IBSOPCNTEXT-83] - _ = x[IBSOPSAM-84] - _ = x[IBSRDWROPCNT-85] - _ = x[IBSRIPINVALIDCHK-86] - _ = x[IBS_FETCH_CTLX-87] - _ = x[IBS_OPDATA4-88] - _ = x[IBS_OPFUSE-89] - _ = x[IBS_PREVENTHOST-90] - _ = x[IBS_ZEN4-91] - _ = x[IDPRED_CTRL-92] - _ = x[INT_WBINVD-93] - _ = x[INVLPGB-94] - _ = x[KEYLOCKER-95] - _ = x[KEYLOCKERW-96] - _ = x[LAHF-97] - _ = x[LAM-98] - _ = x[LBRVIRT-99] - _ = x[LZCNT-100] - _ = x[MCAOVERFLOW-101] - _ = x[MCDT_NO-102] - _ = x[MCOMMIT-103] - _ = x[MD_CLEAR-104] - _ = x[MMX-105] - _ = x[MMXEXT-106] - _ = x[MOVBE-107] - _ = x[MOVDIR64B-108] - _ = x[MOVDIRI-109] - _ = x[MOVSB_ZL-110] - _ = x[MOVU-111] - _ = x[MPX-112] - _ = x[MSRIRC-113] - _ = x[MSRLIST-114] - _ = x[MSR_PAGEFLUSH-115] - _ = x[NRIPS-116] - _ = x[NX-117] - _ = x[OSXSAVE-118] - _ = x[PCONFIG-119] - _ = x[POPCNT-120] - _ = x[PPIN-121] - _ = x[PREFETCHI-122] - _ = x[PSFD-123] - _ = x[RDPRU-124] - _ = x[RDRAND-125] - _ = x[RDSEED-126] - _ = x[RDTSCP-127] - _ = x[RRSBA_CTRL-128] - _ = x[RTM-129] - _ = x[RTM_ALWAYS_ABORT-130] - _ = x[SBPB-131] - _ = x[SERIALIZE-132] - _ = x[SEV-133] - _ = x[SEV_64BIT-134] - _ = x[SEV_ALTERNATIVE-135] - _ = x[SEV_DEBUGSWAP-136] - _ = x[SEV_ES-137] - _ = x[SEV_RESTRICTED-138] - _ = x[SEV_SNP-139] - _ = x[SGX-140] - _ = x[SGXLC-141] - _ = x[SHA-142] - _ = x[SME-143] - _ = x[SME_COHERENT-144] - _ = x[SPEC_CTRL_SSBD-145] - _ = x[SRBDS_CTRL-146] - _ = x[SRSO_MSR_FIX-147] - _ = x[SRSO_NO-148] - _ = x[SRSO_USER_KERNEL_NO-149] - _ = x[SSE-150] - _ = x[SSE2-151] - _ = x[SSE3-152] - _ = x[SSE4-153] - _ = x[SSE42-154] - _ = x[SSE4A-155] - _ = x[SSSE3-156] - _ = x[STIBP-157] - _ = x[STIBP_ALWAYSON-158] - _ = x[STOSB_SHORT-159] - _ = x[SUCCOR-160] - _ = x[SVM-161] - _ = x[SVMDA-162] - _ = x[SVMFBASID-163] - _ = x[SVML-164] - _ = x[SVMNP-165] - _ = x[SVMPF-166] - _ = x[SVMPFT-167] - _ = x[SYSCALL-168] - _ = x[SYSEE-169] - _ = x[TBM-170] - _ = x[TDX_GUEST-171] - _ = x[TLB_FLUSH_NESTED-172] - _ = x[TME-173] - _ = x[TOPEXT-174] - _ = x[TSCRATEMSR-175] - _ = x[TSXLDTRK-176] - _ = x[VAES-177] - _ = x[VMCBCLEAN-178] - _ = x[VMPL-179] - _ = x[VMSA_REGPROT-180] - _ = x[VMX-181] - _ = x[VPCLMULQDQ-182] - _ = x[VTE-183] - _ = x[WAITPKG-184] - _ = x[WBNOINVD-185] - _ = x[WRMSRNS-186] - _ = x[X87-187] - _ = x[XGETBV1-188] - _ = x[XOP-189] - _ = x[XSAVE-190] - _ = x[XSAVEC-191] - _ = x[XSAVEOPT-192] - _ = x[XSAVES-193] - _ = x[AESARM-194] - _ = x[ARMCPUID-195] - _ = x[ASIMD-196] - _ = x[ASIMDDP-197] - _ = x[ASIMDHP-198] - _ = x[ASIMDRDM-199] - _ = x[ATOMICS-200] - _ = x[CRC32-201] - _ = x[DCPOP-202] - _ = x[EVTSTRM-203] - _ = x[FCMA-204] - _ = x[FP-205] - _ = x[FPHP-206] - _ = x[GPA-207] - _ = x[JSCVT-208] - _ = x[LRCPC-209] - _ = x[PMULL-210] - _ = x[SHA1-211] - _ = x[SHA2-212] - _ = x[SHA3-213] - _ = x[SHA512-214] - _ = x[SM3-215] - _ = x[SM4-216] - _ = x[SVE-217] - _ = x[lastID-218] + _ = x[AVXVNNIINT16-37] + _ = x[BHI_CTRL-38] + _ = x[BMI1-39] + _ = x[BMI2-40] + _ = x[CETIBT-41] + _ = x[CETSS-42] + _ = x[CLDEMOTE-43] + _ = x[CLMUL-44] + _ = x[CLZERO-45] + _ = x[CMOV-46] + _ = x[CMPCCXADD-47] + _ = x[CMPSB_SCADBS_SHORT-48] + _ = x[CMPXCHG8-49] + _ = x[CPBOOST-50] + _ = x[CPPC-51] + _ = x[CX16-52] + _ = x[EFER_LMSLE_UNS-53] + _ = x[ENQCMD-54] + _ = x[ERMS-55] + _ = x[F16C-56] + _ = x[FLUSH_L1D-57] + _ = x[FMA3-58] + _ = x[FMA4-59] + _ = x[FP128-60] + _ = x[FP256-61] + _ = x[FSRM-62] + _ = x[FXSR-63] + _ = x[FXSROPT-64] + _ = x[GFNI-65] + _ = x[HLE-66] + _ = x[HRESET-67] + _ = x[HTT-68] + _ = x[HWA-69] + _ = x[HYBRID_CPU-70] + _ = x[HYPERVISOR-71] + _ = x[IA32_ARCH_CAP-72] + _ = x[IA32_CORE_CAP-73] + _ = x[IBPB-74] + _ = x[IBPB_BRTYPE-75] + _ = x[IBRS-76] + _ = x[IBRS_PREFERRED-77] + _ = x[IBRS_PROVIDES_SMP-78] + _ = x[IBS-79] + _ = x[IBSBRNTRGT-80] + _ = x[IBSFETCHSAM-81] + _ = x[IBSFFV-82] + _ = x[IBSOPCNT-83] + _ = x[IBSOPCNTEXT-84] + _ = x[IBSOPSAM-85] + _ = x[IBSRDWROPCNT-86] + _ = x[IBSRIPINVALIDCHK-87] + _ = x[IBS_FETCH_CTLX-88] + _ = x[IBS_OPDATA4-89] + _ = x[IBS_OPFUSE-90] + _ = x[IBS_PREVENTHOST-91] + _ = x[IBS_ZEN4-92] + _ = x[IDPRED_CTRL-93] + _ = x[INT_WBINVD-94] + _ = x[INVLPGB-95] + _ = x[KEYLOCKER-96] + _ = x[KEYLOCKERW-97] + _ = x[LAHF-98] + _ = x[LAM-99] + _ = x[LBRVIRT-100] + _ = x[LZCNT-101] + _ = x[MCAOVERFLOW-102] + _ = x[MCDT_NO-103] + _ = x[MCOMMIT-104] + _ = x[MD_CLEAR-105] + _ = x[MMX-106] + _ = x[MMXEXT-107] + _ = x[MOVBE-108] + _ = x[MOVDIR64B-109] + _ = x[MOVDIRI-110] + _ = x[MOVSB_ZL-111] + _ = x[MOVU-112] + _ = x[MPX-113] + _ = x[MSRIRC-114] + _ = x[MSRLIST-115] + _ = x[MSR_PAGEFLUSH-116] + _ = x[NRIPS-117] + _ = x[NX-118] + _ = x[OSXSAVE-119] + _ = x[PCONFIG-120] + _ = x[POPCNT-121] + _ = x[PPIN-122] + _ = x[PREFETCHI-123] + _ = x[PSFD-124] + _ = x[RDPRU-125] + _ = x[RDRAND-126] + _ = x[RDSEED-127] + _ = x[RDTSCP-128] + _ = x[RRSBA_CTRL-129] + _ = x[RTM-130] + _ = x[RTM_ALWAYS_ABORT-131] + _ = x[SBPB-132] + _ = x[SERIALIZE-133] + _ = x[SEV-134] + _ = x[SEV_64BIT-135] + _ = x[SEV_ALTERNATIVE-136] + _ = x[SEV_DEBUGSWAP-137] + _ = x[SEV_ES-138] + _ = x[SEV_RESTRICTED-139] + _ = x[SEV_SNP-140] + _ = x[SGX-141] + _ = x[SGXLC-142] + _ = x[SHA-143] + _ = x[SME-144] + _ = x[SME_COHERENT-145] + _ = x[SPEC_CTRL_SSBD-146] + _ = x[SRBDS_CTRL-147] + _ = x[SRSO_MSR_FIX-148] + _ = x[SRSO_NO-149] + _ = x[SRSO_USER_KERNEL_NO-150] + _ = x[SSE-151] + _ = x[SSE2-152] + _ = x[SSE3-153] + _ = x[SSE4-154] + _ = x[SSE42-155] + _ = x[SSE4A-156] + _ = x[SSSE3-157] + _ = x[STIBP-158] + _ = x[STIBP_ALWAYSON-159] + _ = x[STOSB_SHORT-160] + _ = x[SUCCOR-161] + _ = x[SVM-162] + _ = x[SVMDA-163] + _ = x[SVMFBASID-164] + _ = x[SVML-165] + _ = x[SVMNP-166] + _ = x[SVMPF-167] + _ = x[SVMPFT-168] + _ = x[SYSCALL-169] + _ = x[SYSEE-170] + _ = x[TBM-171] + _ = x[TDX_GUEST-172] + _ = x[TLB_FLUSH_NESTED-173] + _ = x[TME-174] + _ = x[TOPEXT-175] + _ = x[TSCRATEMSR-176] + _ = x[TSXLDTRK-177] + _ = x[VAES-178] + _ = x[VMCBCLEAN-179] + _ = x[VMPL-180] + _ = x[VMSA_REGPROT-181] + _ = x[VMX-182] + _ = x[VPCLMULQDQ-183] + _ = x[VTE-184] + _ = x[WAITPKG-185] + _ = x[WBNOINVD-186] + _ = x[WRMSRNS-187] + _ = x[X87-188] + _ = x[XGETBV1-189] + _ = x[XOP-190] + _ = x[XSAVE-191] + _ = x[XSAVEC-192] + _ = x[XSAVEOPT-193] + _ = x[XSAVES-194] + _ = x[AESARM-195] + _ = x[ARMCPUID-196] + _ = x[ASIMD-197] + _ = x[ASIMDDP-198] + _ = x[ASIMDHP-199] + _ = x[ASIMDRDM-200] + _ = x[ATOMICS-201] + _ = x[CRC32-202] + _ = x[DCPOP-203] + _ = x[EVTSTRM-204] + _ = x[FCMA-205] + _ = x[FP-206] + _ = x[FPHP-207] + _ = x[GPA-208] + _ = x[JSCVT-209] + _ = x[LRCPC-210] + _ = x[PMULL-211] + _ = x[SHA1-212] + _ = x[SHA2-213] + _ = x[SHA3-214] + _ = x[SHA512-215] + _ = x[SM3-216] + _ = x[SM4-217] + _ = x[SVE-218] + _ = x[lastID-219] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 561, 565, 579, 596, 599, 609, 620, 626, 634, 645, 653, 665, 681, 695, 706, 716, 731, 739, 750, 760, 767, 776, 786, 790, 793, 800, 805, 816, 823, 830, 838, 841, 847, 852, 861, 868, 876, 880, 883, 889, 896, 909, 914, 916, 923, 930, 936, 940, 949, 953, 958, 964, 970, 976, 986, 989, 1005, 1009, 1018, 1021, 1030, 1045, 1058, 1064, 1078, 1085, 1088, 1093, 1096, 1099, 1111, 1125, 1135, 1147, 1154, 1173, 1176, 1180, 1184, 1188, 1193, 1198, 1203, 1208, 1222, 1233, 1239, 1242, 1247, 1256, 1260, 1265, 1270, 1276, 1283, 1288, 1291, 1300, 1316, 1319, 1325, 1335, 1343, 1347, 1356, 1360, 1372, 1375, 1385, 1388, 1395, 1403, 1410, 1413, 1420, 1423, 1428, 1434, 1442, 1448, 1454, 1462, 1467, 1474, 1481, 1489, 1496, 1501, 1506, 1513, 1517, 1519, 1523, 1526, 1531, 1536, 1541, 1545, 1549, 1553, 1559, 1562, 1565, 1568, 1574} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/backoff.go b/vendor/github.com/libp2p/go-libp2p-pubsub/backoff.go index 4909e153745..99ca7fd03dc 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/backoff.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/backoff.go @@ -43,7 +43,6 @@ func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Dur info: make(map[peer.ID]*backoffHistory), } - rand.Seed(time.Now().UnixNano()) // used for jitter go b.cleanupLoop(ctx) return b diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/comm.go b/vendor/github.com/libp2p/go-libp2p-pubsub/comm.go index 2dee9b2e5c6..d38cce080dc 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/comm.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/comm.go @@ -114,7 +114,7 @@ func (p *PubSub) notifyPeerDead(pid peer.ID) { } } -func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan *RPC) { +func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing *rpcQueue) { s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...) if err != nil { log.Debug("opening new stream to peer: ", err, pid) @@ -135,7 +135,7 @@ func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan } } -func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing <-chan *RPC) { +func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing *rpcQueue) { select { case <-time.After(backoff): p.handleNewPeer(ctx, pid, outgoing) @@ -156,7 +156,7 @@ func (p *PubSub) handlePeerDead(s network.Stream) { p.notifyPeerDead(pid) } -func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) { +func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing *rpcQueue) { writeRpc := func(rpc *RPC) error { size := uint64(rpc.Size()) @@ -174,20 +174,17 @@ func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, ou } defer s.Close() - for { - select { - case rpc, ok := <-outgoing: - if !ok { - return - } + for ctx.Err() == nil { + rpc, err := outgoing.Pop(ctx) + if err != nil { + log.Debugf("popping message from the queue to send to %s: %s", s.Conn().RemotePeer(), err) + return + } - err := writeRpc(rpc) - if err != nil { - s.Reset() - log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) - return - } - case <-ctx.Done(): + err = writeRpc(rpc) + if err != nil { + s.Reset() + log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) return } } @@ -209,15 +206,17 @@ func rpcWithControl(msgs []*pb.Message, ihave []*pb.ControlIHave, iwant []*pb.ControlIWant, graft []*pb.ControlGraft, - prune []*pb.ControlPrune) *RPC { + prune []*pb.ControlPrune, + idontwant []*pb.ControlIDontWant) *RPC { return &RPC{ RPC: pb.RPC{ Publish: msgs, Control: &pb.ControlMessage{ - Ihave: ihave, - Iwant: iwant, - Graft: graft, - Prune: prune, + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + Idontwant: idontwant, }, }, } diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/floodsub.go b/vendor/github.com/libp2p/go-libp2p-pubsub/floodsub.go index 20f592e2d75..45b3fdeef77 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/floodsub.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/floodsub.go @@ -71,6 +71,8 @@ func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } +func (fs *FloodSubRouter) PreValidation([]*Message) {} + func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {} func (fs *FloodSubRouter) Publish(msg *Message) { @@ -83,19 +85,19 @@ func (fs *FloodSubRouter) Publish(msg *Message) { continue } - mch, ok := fs.p.peers[pid] + q, ok := fs.p.peers[pid] if !ok { continue } - select { - case mch <- out: - fs.tracer.SendRPC(out, pid) - default: + err := q.Push(out, false) + if err != nil { log.Infof("dropping message to peer %s: queue full", pid) fs.tracer.DropRPC(out, pid) // Drop it. The peer is too slow. + continue } + fs.tracer.SendRPC(out, pid) } } diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub.go b/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub.go index 21f34bece3e..7d01d02fe22 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub.go @@ -2,7 +2,9 @@ package pubsub import ( "context" + "crypto/sha256" "fmt" + "io" "math/rand" "sort" "time" @@ -21,13 +23,19 @@ import ( const ( // GossipSubID_v10 is the protocol ID for version 1.0.0 of the GossipSub protocol. - // It is advertised along with GossipSubID_v11 for backwards compatibility. + // It is advertised along with GossipSubID_v11 and GossipSubID_v12 for backwards compatibility. GossipSubID_v10 = protocol.ID("/meshsub/1.0.0") // GossipSubID_v11 is the protocol ID for version 1.1.0 of the GossipSub protocol. + // It is advertised along with GossipSubID_v12 for backwards compatibility. // See the spec for details about how v1.1.0 compares to v1.0.0: // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md GossipSubID_v11 = protocol.ID("/meshsub/1.1.0") + + // GossipSubID_v12 is the protocol ID for version 1.2.0 of the GossipSub protocol. + // See the spec for details about how v1.2.0 compares to v1.1.0: + // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md + GossipSubID_v12 = protocol.ID("/meshsub/1.2.0") ) // Defines the default gossipsub parameters. @@ -58,9 +66,17 @@ var ( GossipSubGraftFloodThreshold = 10 * time.Second GossipSubMaxIHaveLength = 5000 GossipSubMaxIHaveMessages = 10 + GossipSubMaxIDontWantMessages = 1000 GossipSubIWantFollowupTime = 3 * time.Second + GossipSubIDontWantMessageThreshold = 1024 // 1KB + GossipSubIDontWantMessageTTL = 3 // 3 heartbeats ) +type checksum struct { + payload [32]byte + length uint8 +} + // GossipSubParams defines all the gossipsub specific parameters. type GossipSubParams struct { // overlay parameters. @@ -200,10 +216,21 @@ type GossipSubParams struct { // MaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat. MaxIHaveMessages int + // MaxIDontWantMessages is the maximum number of IDONTWANT messages to accept from a peer within a heartbeat. + MaxIDontWantMessages int + // Time to wait for a message requested through IWANT following an IHAVE advertisement. // If the message is not received within this window, a broken promise is declared and // the router may apply bahavioural penalties. IWantFollowupTime time.Duration + + // IDONTWANT is only sent for messages larger than the threshold. This should be greater than + // D_high * the size of the message id. Otherwise, the attacker can do the amplication attack by sending + // small messages while the receiver replies back with larger IDONTWANT messages. + IDontWantMessageThreshold int + + // IDONTWANT is cleared when it's older than the TTL. + IDontWantMessageTTL int } // NewGossipSub returns a new PubSub object using the default GossipSubRouter as the router. @@ -222,23 +249,25 @@ func NewGossipSubWithRouter(ctx context.Context, h host.Host, rt PubSubRouter, o func DefaultGossipSubRouter(h host.Host) *GossipSubRouter { params := DefaultGossipSubParams() return &GossipSubRouter{ - peers: make(map[peer.ID]protocol.ID), - mesh: make(map[string]map[peer.ID]struct{}), - fanout: make(map[string]map[peer.ID]struct{}), - lastpub: make(map[string]int64), - gossip: make(map[peer.ID][]*pb.ControlIHave), - control: make(map[peer.ID]*pb.ControlMessage), - backoff: make(map[string]map[peer.ID]time.Time), - peerhave: make(map[peer.ID]int), - iasked: make(map[peer.ID]int), - outbound: make(map[peer.ID]bool), - connect: make(chan connectInfo, params.MaxPendingConnections), - cab: pstoremem.NewAddrBook(), - mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), - protos: GossipSubDefaultProtocols, - feature: GossipSubDefaultFeatures, - tagTracer: newTagTracer(h.ConnManager()), - params: params, + peers: make(map[peer.ID]protocol.ID), + mesh: make(map[string]map[peer.ID]struct{}), + fanout: make(map[string]map[peer.ID]struct{}), + lastpub: make(map[string]int64), + gossip: make(map[peer.ID][]*pb.ControlIHave), + control: make(map[peer.ID]*pb.ControlMessage), + backoff: make(map[string]map[peer.ID]time.Time), + peerhave: make(map[peer.ID]int), + peerdontwant: make(map[peer.ID]int), + unwanted: make(map[peer.ID]map[checksum]int), + iasked: make(map[peer.ID]int), + outbound: make(map[peer.ID]bool), + connect: make(chan connectInfo, params.MaxPendingConnections), + cab: pstoremem.NewAddrBook(), + mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), + protos: GossipSubDefaultProtocols, + feature: GossipSubDefaultFeatures, + tagTracer: newTagTracer(h.ConnManager()), + params: params, } } @@ -272,7 +301,10 @@ func DefaultGossipSubParams() GossipSubParams { GraftFloodThreshold: GossipSubGraftFloodThreshold, MaxIHaveLength: GossipSubMaxIHaveLength, MaxIHaveMessages: GossipSubMaxIHaveMessages, + MaxIDontWantMessages: GossipSubMaxIDontWantMessages, IWantFollowupTime: GossipSubIWantFollowupTime, + IDontWantMessageThreshold: GossipSubIDontWantMessageThreshold, + IDontWantMessageTTL: GossipSubIDontWantMessageTTL, SlowHeartbeatWarning: 0.1, } } @@ -421,20 +453,22 @@ func WithGossipSubParams(cfg GossipSubParams) Option { // is the fanout map. Fanout peer lists are expired if we don't publish any // messages to their topic for GossipSubFanoutTTL. type GossipSubRouter struct { - p *PubSub - peers map[peer.ID]protocol.ID // peer protocols - direct map[peer.ID]struct{} // direct peers - mesh map[string]map[peer.ID]struct{} // topic meshes - fanout map[string]map[peer.ID]struct{} // topic fanout - lastpub map[string]int64 // last publish time for fanout topics - gossip map[peer.ID][]*pb.ControlIHave // pending gossip - control map[peer.ID]*pb.ControlMessage // pending control messages - peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat - iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat - outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections - backoff map[string]map[peer.ID]time.Time // prune backoff - connect chan connectInfo // px connection requests - cab peerstore.AddrBook + p *PubSub + peers map[peer.ID]protocol.ID // peer protocols + direct map[peer.ID]struct{} // direct peers + mesh map[string]map[peer.ID]struct{} // topic meshes + fanout map[string]map[peer.ID]struct{} // topic fanout + lastpub map[string]int64 // last publish time for fanout topics + gossip map[peer.ID][]*pb.ControlIHave // pending gossip + control map[peer.ID]*pb.ControlMessage // pending control messages + peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat + peerdontwant map[peer.ID]int // number of IDONTWANTs received from peer in the last heartbeat + unwanted map[peer.ID]map[checksum]int // TTL of the message ids peers don't want + iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat + outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections + backoff map[string]map[peer.ID]time.Time // prune backoff + connect chan connectInfo // px connection requests + cab peerstore.AddrBook protos []protocol.ID feature GossipSubFeatureTest @@ -543,6 +577,13 @@ func (gs *GossipSubRouter) manageAddrBook() { for { select { case <-gs.p.ctx.Done(): + cabCloser, ok := gs.cab.(io.Closer) + if ok { + errClose := cabCloser.Close() + if errClose != nil { + log.Warnf("failed to close addr book: %v", errClose) + } + } return case ev := <-sub.Out(): switch ev := ev.(type) { @@ -655,6 +696,36 @@ func (gs *GossipSubRouter) AcceptFrom(p peer.ID) AcceptStatus { return gs.gate.AcceptFrom(p) } +// PreValidation sends the IDONTWANT control messages to all the mesh +// peers. They need to be sent right before the validation because they +// should be seen by the peers as soon as possible. +func (gs *GossipSubRouter) PreValidation(msgs []*Message) { + tmids := make(map[string][]string) + for _, msg := range msgs { + if len(msg.GetData()) < gs.params.IDontWantMessageThreshold { + continue + } + topic := msg.GetTopic() + tmids[topic] = append(tmids[topic], gs.p.idGen.ID(msg)) + } + for topic, mids := range tmids { + if len(mids) == 0 { + continue + } + // shuffle the messages got from the RPC envelope + shuffleStrings(mids) + // send IDONTWANT to all the mesh peers + for p := range gs.mesh[topic] { + // send to only peers that support IDONTWANT + if gs.feature(GossipSubFeatureIdontwant, gs.peers[p]) { + idontwant := []*pb.ControlIDontWant{{MessageIDs: mids}} + out := rpcWithControl(nil, nil, nil, nil, nil, idontwant) + gs.sendRPC(p, out, true) + } + } + } +} + func (gs *GossipSubRouter) HandleRPC(rpc *RPC) { ctl := rpc.GetControl() if ctl == nil { @@ -665,13 +736,14 @@ func (gs *GossipSubRouter) HandleRPC(rpc *RPC) { ihave := gs.handleIWant(rpc.from, ctl) prune := gs.handleGraft(rpc.from, ctl) gs.handlePrune(rpc.from, ctl) + gs.handleIDontWant(rpc.from, ctl) if len(iwant) == 0 && len(ihave) == 0 && len(prune) == 0 { return } - out := rpcWithControl(ihave, nil, iwant, nil, prune) - gs.sendRPC(rpc.from, out) + out := rpcWithControl(ihave, nil, iwant, nil, prune, nil) + gs.sendRPC(rpc.from, out, false) } func (gs *GossipSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlIWant { @@ -923,6 +995,26 @@ func (gs *GossipSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) { } } +func (gs *GossipSubRouter) handleIDontWant(p peer.ID, ctl *pb.ControlMessage) { + if gs.unwanted[p] == nil { + gs.unwanted[p] = make(map[checksum]int) + } + + // IDONTWANT flood protection + if gs.peerdontwant[p] >= gs.params.MaxIDontWantMessages { + log.Debugf("IDONWANT: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring", p, gs.peerdontwant[p]) + return + } + gs.peerdontwant[p]++ + + // Remember all the unwanted message ids + for _, idontwant := range ctl.GetIdontwant() { + for _, mid := range idontwant.GetMessageIDs() { + gs.unwanted[p][computeChecksum(mid)] = gs.params.IDontWantMessageTTL + } + } +} + func (gs *GossipSubRouter) addBackoff(p peer.ID, topic string, isUnsubscribe bool) { backoff := gs.params.PruneBackoff if isUnsubscribe { @@ -1083,6 +1175,12 @@ func (gs *GossipSubRouter) Publish(msg *Message) { } for p := range gmap { + mid := gs.p.idGen.ID(msg) + // Check if it has already received an IDONTWANT for the message. + // If so, don't send it to the peer + if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok { + continue + } tosend[p] = struct{}{} } } @@ -1093,7 +1191,7 @@ func (gs *GossipSubRouter) Publish(msg *Message) { continue } - gs.sendRPC(pid, out) + gs.sendRPC(pid, out, false) } } @@ -1178,17 +1276,17 @@ func (gs *GossipSubRouter) Leave(topic string) { func (gs *GossipSubRouter) sendGraft(p peer.ID, topic string) { graft := []*pb.ControlGraft{{TopicID: &topic}} - out := rpcWithControl(nil, nil, nil, graft, nil) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, graft, nil, nil) + gs.sendRPC(p, out, false) } func (gs *GossipSubRouter) sendPrune(p peer.ID, topic string, isUnsubscribe bool) { prune := []*pb.ControlPrune{gs.makePrune(p, topic, gs.doPX, isUnsubscribe)} - out := rpcWithControl(nil, nil, nil, nil, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, nil, prune, nil) + gs.sendRPC(p, out, false) } -func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { +func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) { // do we own the RPC? own := false @@ -1212,14 +1310,14 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { delete(gs.gossip, p) } - mch, ok := gs.p.peers[p] + q, ok := gs.p.peers[p] if !ok { return } // If we're below the max message size, go ahead and send if out.Size() < gs.p.maxMessageSize { - gs.doSendRPC(out, p, mch) + gs.doSendRPC(out, p, q, urgent) return } @@ -1231,7 +1329,7 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { gs.doDropRPC(out, p, fmt.Sprintf("Dropping oversized RPC. Size: %d, limit: %d. (Over by %d bytes)", rpc.Size(), gs.p.maxMessageSize, rpc.Size()-gs.p.maxMessageSize)) continue } - gs.doSendRPC(rpc, p, mch) + gs.doSendRPC(rpc, p, q, urgent) } } @@ -1245,13 +1343,18 @@ func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { } } -func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, mch chan *RPC) { - select { - case mch <- rpc: - gs.tracer.SendRPC(rpc, p) - default: +func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, q *rpcQueue, urgent bool) { + var err error + if urgent { + err = q.UrgentPush(rpc, false) + } else { + err = q.Push(rpc, false) + } + if err != nil { gs.doDropRPC(rpc, p, "queue full") + return } + gs.tracer.SendRPC(rpc, p) } // appendOrMergeRPC appends the given RPCs to the slice, merging them if possible. @@ -1433,6 +1536,9 @@ func (gs *GossipSubRouter) heartbeat() { // clean up iasked counters gs.clearIHaveCounters() + // clean up IDONTWANT counters + gs.clearIDontWantCounters() + // apply IWANT request penalties gs.applyIwantPenalties() @@ -1685,6 +1791,23 @@ func (gs *GossipSubRouter) clearIHaveCounters() { } } +func (gs *GossipSubRouter) clearIDontWantCounters() { + if len(gs.peerdontwant) > 0 { + // throw away the old map and make a new one + gs.peerdontwant = make(map[peer.ID]int) + } + + // decrement TTLs of all the IDONTWANTs and delete it from the cache when it reaches zero + for _, mids := range gs.unwanted { + for mid := range mids { + mids[mid]-- + if mids[mid] == 0 { + delete(mids, mid) + } + } + } +} + func (gs *GossipSubRouter) applyIwantPenalties() { for p, count := range gs.gossipTracer.GetBrokenPromises() { log.Infof("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count) @@ -1759,8 +1882,8 @@ func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string, } } - out := rpcWithControl(nil, nil, nil, graft, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, graft, prune, nil) + gs.sendRPC(p, out, false) } for p, topics := range toprune { @@ -1769,8 +1892,8 @@ func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string, prune = append(prune, gs.makePrune(p, topic, gs.doPX && !noPX[p], false)) } - out := rpcWithControl(nil, nil, nil, nil, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, nil, prune, nil) + gs.sendRPC(p, out, false) } } @@ -1836,15 +1959,15 @@ func (gs *GossipSubRouter) flush() { // send gossip first, which will also piggyback pending control for p, ihave := range gs.gossip { delete(gs.gossip, p) - out := rpcWithControl(nil, ihave, nil, nil, nil) - gs.sendRPC(p, out) + out := rpcWithControl(nil, ihave, nil, nil, nil, nil) + gs.sendRPC(p, out, false) } // send the remaining control messages that wasn't merged with gossip for p, ctl := range gs.control { delete(gs.control, p) - out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune, nil) + gs.sendRPC(p, out, false) } } @@ -1865,9 +1988,10 @@ func (gs *GossipSubRouter) piggybackGossip(p peer.ID, out *RPC, ihave []*pb.Cont } func (gs *GossipSubRouter) pushControl(p peer.ID, ctl *pb.ControlMessage) { - // remove IHAVE/IWANT from control message, gossip is not retried + // remove IHAVE/IWANT/IDONTWANT from control message, gossip is not retried ctl.Ihave = nil ctl.Iwant = nil + ctl.Idontwant = nil if ctl.Graft != nil || ctl.Prune != nil { gs.control[p] = ctl } @@ -2045,3 +2169,13 @@ func shuffleStrings(lst []string) { lst[i], lst[j] = lst[j], lst[i] } } + +func computeChecksum(mid string) checksum { + var cs checksum + if len(mid) > 32 || len(mid) == 0 { + cs.payload = sha256.Sum256([]byte(mid)) + } else { + cs.length = uint8(copy(cs.payload[:], mid)) + } + return cs +} diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub_feat.go b/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub_feat.go index d5750af3ba6..49c7423c2d3 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub_feat.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/gossipsub_feat.go @@ -18,18 +18,22 @@ const ( GossipSubFeatureMesh = iota // Protocol supports Peer eXchange on prune -- gossipsub-v1.1 compatible GossipSubFeaturePX + // Protocol supports IDONTWANT -- gossipsub-v1.2 compatible + GossipSubFeatureIdontwant ) // GossipSubDefaultProtocols is the default gossipsub router protocol list -var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v11, GossipSubID_v10, FloodSubID} +var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v12, GossipSubID_v11, GossipSubID_v10, FloodSubID} // GossipSubDefaultFeatures is the feature test function for the default gossipsub protocols func GossipSubDefaultFeatures(feat GossipSubFeature, proto protocol.ID) bool { switch feat { case GossipSubFeatureMesh: - return proto == GossipSubID_v11 || proto == GossipSubID_v10 + return proto == GossipSubID_v12 || proto == GossipSubID_v11 || proto == GossipSubID_v10 case GossipSubFeaturePX: - return proto == GossipSubID_v11 + return proto == GossipSubID_v12 || proto == GossipSubID_v11 + case GossipSubFeatureIdontwant: + return proto == GossipSubID_v12 default: return false } diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/notify.go b/vendor/github.com/libp2p/go-libp2p-pubsub/notify.go deleted file mode 100644 index f560d398448..00000000000 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/notify.go +++ /dev/null @@ -1,75 +0,0 @@ -package pubsub - -import ( - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -var _ network.Notifiee = (*PubSubNotif)(nil) - -type PubSubNotif PubSub - -func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) { -} - -func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) { -} - -func (p *PubSubNotif) Connected(n network.Network, c network.Conn) { - // ignore transient connections - if c.Stat().Limited { - return - } - - go func() { - p.newPeersPrioLk.RLock() - p.newPeersMx.Lock() - p.newPeersPend[c.RemotePeer()] = struct{}{} - p.newPeersMx.Unlock() - p.newPeersPrioLk.RUnlock() - - select { - case p.newPeers <- struct{}{}: - default: - } - }() -} - -func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) { -} - -func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) { -} - -func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) { -} - -func (p *PubSubNotif) Initialize() { - isTransient := func(pid peer.ID) bool { - for _, c := range p.host.Network().ConnsToPeer(pid) { - if !c.Stat().Limited { - return false - } - } - - return true - } - - p.newPeersPrioLk.RLock() - p.newPeersMx.Lock() - for _, pid := range p.host.Network().Peers() { - if isTransient(pid) { - continue - } - - p.newPeersPend[pid] = struct{}{} - } - p.newPeersMx.Unlock() - p.newPeersPrioLk.RUnlock() - - select { - case p.newPeers <- struct{}{}: - default: - } -} diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.pb.go b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.pb.go index c6a2475f678..151cb44dcda 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.pb.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.pb.go @@ -228,13 +228,14 @@ func (m *Message) GetKey() []byte { } type ControlMessage struct { - Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` - Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` - Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` - Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + Idontwant []*ControlIDontWant `protobuf:"bytes,5,rep,name=idontwant" json:"idontwant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ControlMessage) Reset() { *m = ControlMessage{} } @@ -298,6 +299,13 @@ func (m *ControlMessage) GetPrune() []*ControlPrune { return nil } +func (m *ControlMessage) GetIdontwant() []*ControlIDontWant { + if m != nil { + return m.Idontwant + } + return nil +} + type ControlIHave struct { TopicID *string `protobuf:"bytes,1,opt,name=topicID" json:"topicID,omitempty"` // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings @@ -512,6 +520,54 @@ func (m *ControlPrune) GetBackoff() uint64 { return 0 } +type ControlIDontWant struct { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + MessageIDs []string `protobuf:"bytes,1,rep,name=messageIDs" json:"messageIDs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControlIDontWant) Reset() { *m = ControlIDontWant{} } +func (m *ControlIDontWant) String() string { return proto.CompactTextString(m) } +func (*ControlIDontWant) ProtoMessage() {} +func (*ControlIDontWant) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{7} +} +func (m *ControlIDontWant) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControlIDontWant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ControlIDontWant.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ControlIDontWant) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControlIDontWant.Merge(m, src) +} +func (m *ControlIDontWant) XXX_Size() int { + return m.Size() +} +func (m *ControlIDontWant) XXX_DiscardUnknown() { + xxx_messageInfo_ControlIDontWant.DiscardUnknown(m) +} + +var xxx_messageInfo_ControlIDontWant proto.InternalMessageInfo + +func (m *ControlIDontWant) GetMessageIDs() []string { + if m != nil { + return m.MessageIDs + } + return nil +} + type PeerInfo struct { PeerID []byte `protobuf:"bytes,1,opt,name=peerID" json:"peerID,omitempty"` SignedPeerRecord []byte `protobuf:"bytes,2,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"` @@ -524,7 +580,7 @@ func (m *PeerInfo) Reset() { *m = PeerInfo{} } func (m *PeerInfo) String() string { return proto.CompactTextString(m) } func (*PeerInfo) ProtoMessage() {} func (*PeerInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} + return fileDescriptor_77a6da22d6a3feb1, []int{8} } func (m *PeerInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -576,43 +632,46 @@ func init() { proto.RegisterType((*ControlIWant)(nil), "pubsub.pb.ControlIWant") proto.RegisterType((*ControlGraft)(nil), "pubsub.pb.ControlGraft") proto.RegisterType((*ControlPrune)(nil), "pubsub.pb.ControlPrune") + proto.RegisterType((*ControlIDontWant)(nil), "pubsub.pb.ControlIDontWant") proto.RegisterType((*PeerInfo)(nil), "pubsub.pb.PeerInfo") } func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 480 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x3c, - 0x10, 0xc7, 0xe5, 0x6d, 0xbb, 0xd9, 0xcc, 0xe6, 0xfb, 0xb4, 0x32, 0x68, 0x31, 0x08, 0x55, 0x55, - 0x4e, 0x01, 0x41, 0x0e, 0xcb, 0x95, 0x0b, 0xb4, 0x12, 0x9b, 0x03, 0x50, 0x99, 0x03, 0x67, 0x27, - 0x75, 0xba, 0xd1, 0x6e, 0x63, 0x63, 0x3b, 0x8b, 0x78, 0x08, 0xde, 0x8b, 0x03, 0x07, 0x1e, 0x01, - 0xf5, 0xc6, 0x5b, 0x20, 0x3b, 0x4e, 0x9a, 0xa5, 0x94, 0x9b, 0xe7, 0xef, 0xdf, 0xcc, 0xfc, 0x3d, - 0x1e, 0x08, 0x95, 0x2c, 0x52, 0xa9, 0x84, 0x11, 0x38, 0x94, 0x4d, 0xae, 0x9b, 0x3c, 0x95, 0x79, - 0xfc, 0x0b, 0xc1, 0x88, 0x2e, 0xe7, 0xf8, 0x25, 0xfc, 0xa7, 0x9b, 0x5c, 0x17, 0xaa, 0x92, 0xa6, - 0x12, 0xb5, 0x26, 0x68, 0x36, 0x4a, 0x4e, 0x2f, 0xce, 0xd3, 0x1e, 0x4d, 0xe9, 0x72, 0x9e, 0x7e, - 0x68, 0xf2, 0xf7, 0xd2, 0x68, 0x7a, 0x17, 0xc6, 0xcf, 0x20, 0x90, 0x4d, 0x7e, 0x53, 0xe9, 0x2b, - 0x72, 0xe4, 0xf2, 0xf0, 0x20, 0xef, 0x2d, 0xd7, 0x9a, 0xad, 0x39, 0xed, 0x10, 0xfc, 0x02, 0x82, - 0x42, 0xd4, 0x46, 0x89, 0x1b, 0x32, 0x9a, 0xa1, 0xe4, 0xf4, 0xe2, 0xe1, 0x80, 0x9e, 0xb7, 0x37, - 0x7d, 0x92, 0x27, 0x1f, 0xbd, 0x82, 0xc0, 0x37, 0xc7, 0x8f, 0x21, 0xf4, 0xed, 0x73, 0x4e, 0xd0, - 0x0c, 0x25, 0x27, 0x74, 0x27, 0x60, 0x02, 0x81, 0x11, 0xb2, 0x2a, 0xaa, 0x15, 0x39, 0x9a, 0xa1, - 0x24, 0xa4, 0x5d, 0x18, 0x7f, 0x45, 0x10, 0xf8, 0xba, 0x18, 0xc3, 0xb8, 0x54, 0x62, 0xe3, 0xd2, - 0x23, 0xea, 0xce, 0x56, 0x5b, 0x31, 0xc3, 0x5c, 0x5a, 0x44, 0xdd, 0x19, 0xdf, 0x87, 0x89, 0xe6, - 0x9f, 0x6a, 0xe1, 0x9c, 0x46, 0xb4, 0x0d, 0xac, 0xea, 0x8a, 0x92, 0xb1, 0xeb, 0xd0, 0x06, 0xce, - 0x57, 0xb5, 0xae, 0x99, 0x69, 0x14, 0x27, 0x13, 0xc7, 0xef, 0x04, 0x7c, 0x06, 0xa3, 0x6b, 0xfe, - 0x85, 0x1c, 0x3b, 0xdd, 0x1e, 0xe3, 0xef, 0x08, 0xfe, 0xbf, 0xfb, 0x5c, 0xfc, 0x1c, 0x26, 0xd5, - 0x15, 0xbb, 0xe5, 0x7e, 0xfc, 0x0f, 0xf6, 0x07, 0x93, 0x5d, 0xb2, 0x5b, 0x4e, 0x5b, 0xca, 0xe1, - 0x9f, 0x59, 0x6d, 0xfc, 0xd4, 0xff, 0x86, 0x7f, 0x64, 0xb5, 0xa1, 0x2d, 0x65, 0xf1, 0xb5, 0x62, - 0xa5, 0x21, 0xa3, 0x43, 0xf8, 0x1b, 0x7b, 0x4d, 0x5b, 0xca, 0xe2, 0x52, 0x35, 0x35, 0x27, 0xe3, - 0x43, 0xf8, 0xd2, 0x5e, 0xd3, 0x96, 0x8a, 0x2f, 0x21, 0x1a, 0x7a, 0xec, 0x3f, 0x22, 0x5b, 0xb8, - 0x29, 0x77, 0x1f, 0x91, 0x2d, 0xf0, 0x14, 0x60, 0xd3, 0x3e, 0x38, 0x5b, 0x68, 0xe7, 0x3d, 0xa4, - 0x03, 0x25, 0x4e, 0x77, 0x95, 0xac, 0xfd, 0x3f, 0x78, 0xb4, 0xc7, 0x27, 0x3d, 0xef, 0xfc, 0x1f, - 0xee, 0x1c, 0x6f, 0x7a, 0xd2, 0x59, 0xff, 0x87, 0xc7, 0x27, 0x30, 0x91, 0x9c, 0x2b, 0xed, 0x47, - 0x7b, 0x6f, 0xf0, 0xf8, 0x25, 0xe7, 0x2a, 0xab, 0x4b, 0x41, 0x5b, 0xc2, 0x16, 0xc9, 0x59, 0x71, - 0x2d, 0xca, 0xd2, 0x6d, 0xc9, 0x98, 0x76, 0x61, 0xfc, 0x0e, 0x4e, 0x3a, 0x18, 0x9f, 0xc3, 0xb1, - 0xc5, 0x7d, 0xa7, 0x88, 0xfa, 0x08, 0x3f, 0x85, 0x33, 0xbb, 0x24, 0x7c, 0x65, 0x49, 0xca, 0x0b, - 0xa1, 0x56, 0x7e, 0x03, 0xf7, 0xf4, 0xd7, 0xd1, 0xb7, 0xed, 0x14, 0xfd, 0xd8, 0x4e, 0xd1, 0xcf, - 0xed, 0x14, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xf8, 0xc4, 0x6e, 0xd2, 0x03, 0x00, 0x00, + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcd, 0x6e, 0x13, 0x31, + 0x10, 0xc7, 0xe5, 0x7c, 0x34, 0xdd, 0xe9, 0x82, 0x22, 0x83, 0x8a, 0xf9, 0x50, 0x14, 0xed, 0x29, + 0x20, 0xd8, 0x43, 0x38, 0x21, 0x71, 0x81, 0x44, 0xa2, 0x39, 0x00, 0x91, 0x39, 0x70, 0xde, 0xdd, + 0x38, 0xe9, 0xaa, 0x8d, 0x6d, 0x6c, 0x6f, 0x11, 0x4f, 0xc0, 0x89, 0xf7, 0xe2, 0xc8, 0x23, 0xa0, + 0xdc, 0x78, 0x0b, 0xe4, 0x59, 0xe7, 0xa3, 0x4d, 0x03, 0x37, 0xcf, 0xf8, 0x37, 0xfe, 0xff, 0x67, + 0xc6, 0x10, 0x19, 0x5d, 0xa4, 0xda, 0x28, 0xa7, 0x68, 0xa4, 0xab, 0xdc, 0x56, 0x79, 0xaa, 0xf3, + 0xe4, 0x0f, 0x81, 0x26, 0x9f, 0x8e, 0xe8, 0x6b, 0xb8, 0x63, 0xab, 0xdc, 0x16, 0xa6, 0xd4, 0xae, + 0x54, 0xd2, 0x32, 0xd2, 0x6f, 0x0e, 0x4e, 0x86, 0xa7, 0xe9, 0x06, 0x4d, 0xf9, 0x74, 0x94, 0x7e, + 0xaa, 0xf2, 0x8f, 0xda, 0x59, 0x7e, 0x1d, 0xa6, 0xcf, 0xa1, 0xa3, 0xab, 0xfc, 0xb2, 0xb4, 0xe7, + 0xac, 0x81, 0x75, 0x74, 0xa7, 0xee, 0xbd, 0xb0, 0x36, 0x5b, 0x08, 0xbe, 0x46, 0xe8, 0x4b, 0xe8, + 0x14, 0x4a, 0x3a, 0xa3, 0x2e, 0x59, 0xb3, 0x4f, 0x06, 0x27, 0xc3, 0x87, 0x3b, 0xf4, 0xa8, 0xbe, + 0xd9, 0x14, 0x05, 0xf2, 0xd1, 0x1b, 0xe8, 0x04, 0x71, 0xfa, 0x04, 0xa2, 0x20, 0x9f, 0x0b, 0x46, + 0xfa, 0x64, 0x70, 0xcc, 0xb7, 0x09, 0xca, 0xa0, 0xe3, 0x94, 0x2e, 0x8b, 0x72, 0xc6, 0x1a, 0x7d, + 0x32, 0x88, 0xf8, 0x3a, 0x4c, 0x7e, 0x10, 0xe8, 0x84, 0x77, 0x29, 0x85, 0xd6, 0xdc, 0xa8, 0x25, + 0x96, 0xc7, 0x1c, 0xcf, 0x3e, 0x37, 0xcb, 0x5c, 0x86, 0x65, 0x31, 0xc7, 0x33, 0xbd, 0x0f, 0x6d, + 0x2b, 0xbe, 0x48, 0x85, 0x4e, 0x63, 0x5e, 0x07, 0x3e, 0x8b, 0x8f, 0xb2, 0x16, 0x2a, 0xd4, 0x01, + 0xfa, 0x2a, 0x17, 0x32, 0x73, 0x95, 0x11, 0xac, 0x8d, 0xfc, 0x36, 0x41, 0xbb, 0xd0, 0xbc, 0x10, + 0xdf, 0xd8, 0x11, 0xe6, 0xfd, 0x31, 0xf9, 0xde, 0x80, 0xbb, 0xd7, 0xdb, 0xa5, 0x2f, 0xa0, 0x5d, + 0x9e, 0x67, 0x57, 0x22, 0x8c, 0xff, 0xc1, 0xfe, 0x60, 0x26, 0x67, 0xd9, 0x95, 0xe0, 0x35, 0x85, + 0xf8, 0xd7, 0x4c, 0xba, 0x30, 0xf5, 0xdb, 0xf0, 0xcf, 0x99, 0x74, 0xbc, 0xa6, 0x3c, 0xbe, 0x30, + 0xd9, 0xdc, 0xb1, 0xe6, 0x21, 0xfc, 0x9d, 0xbf, 0xe6, 0x35, 0xe5, 0x71, 0x6d, 0x2a, 0x29, 0x58, + 0xeb, 0x10, 0x3e, 0xf5, 0xd7, 0xbc, 0xa6, 0xe8, 0x2b, 0x88, 0xca, 0x99, 0x92, 0x0e, 0x0d, 0xb5, + 0xb1, 0xe4, 0xf1, 0x2d, 0x86, 0xc6, 0x4a, 0x3a, 0x34, 0xb5, 0xa5, 0x93, 0x33, 0x88, 0x77, 0xdb, + 0xdb, 0xec, 0x70, 0x32, 0xc6, 0x05, 0xad, 0x77, 0x38, 0x19, 0xd3, 0x1e, 0xc0, 0xb2, 0x9e, 0xd5, + 0x64, 0x6c, 0xb1, 0xed, 0x88, 0xef, 0x64, 0x92, 0x74, 0xfb, 0x92, 0x17, 0xb9, 0xc1, 0x93, 0x3d, + 0x7e, 0xb0, 0xe1, 0xb1, 0xf5, 0xc3, 0xca, 0xc9, 0x72, 0x43, 0x62, 0xd7, 0xff, 0xf0, 0xf8, 0x14, + 0xda, 0x5a, 0x08, 0x63, 0xc3, 0x56, 0xee, 0xed, 0x0c, 0x61, 0x2a, 0x84, 0x99, 0xc8, 0xb9, 0xe2, + 0x35, 0xe1, 0x1f, 0xc9, 0xb3, 0xe2, 0x42, 0xcd, 0xe7, 0xf8, 0xc1, 0x5a, 0x7c, 0x1d, 0x26, 0x43, + 0xe8, 0xde, 0x9c, 0xd8, 0x7f, 0x9b, 0xf9, 0x00, 0xc7, 0x6b, 0x01, 0x7a, 0x0a, 0x47, 0x5e, 0x22, + 0xb8, 0x8b, 0x79, 0x88, 0xe8, 0x33, 0xe8, 0xfa, 0x3f, 0x29, 0x66, 0x9e, 0xe4, 0xa2, 0x50, 0x66, + 0x16, 0x3e, 0xfc, 0x5e, 0xfe, 0x6d, 0xfc, 0x73, 0xd5, 0x23, 0xbf, 0x56, 0x3d, 0xf2, 0x7b, 0xd5, + 0x23, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xba, 0x73, 0x8e, 0xbf, 0x41, 0x04, 0x00, 0x00, } func (m *RPC) Marshal() (dAtA []byte, err error) { @@ -819,6 +878,20 @@ func (m *ControlMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Idontwant) > 0 { + for iNdEx := len(m.Idontwant) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Idontwant[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Prune) > 0 { for iNdEx := len(m.Prune) - 1; iNdEx >= 0; iNdEx-- { { @@ -1044,6 +1117,42 @@ func (m *ControlPrune) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ControlIDontWant) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControlIDontWant) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControlIDontWant) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MessageIDs) > 0 { + for iNdEx := len(m.MessageIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MessageIDs[iNdEx]) + copy(dAtA[i:], m.MessageIDs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.MessageIDs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *PeerInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1209,6 +1318,12 @@ func (m *ControlMessage) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if len(m.Idontwant) > 0 { + for _, e := range m.Idontwant { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1296,6 +1411,24 @@ func (m *ControlPrune) Size() (n int) { return n } +func (m *ControlIDontWant) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessageIDs) > 0 { + for _, s := range m.MessageIDs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PeerInfo) Size() (n int) { if m == nil { return 0 @@ -2001,6 +2134,40 @@ func (m *ControlMessage) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Idontwant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Idontwant = append(m.Idontwant, &ControlIDontWant{}) + if err := m.Idontwant[len(m.Idontwant)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -2444,6 +2611,89 @@ func (m *ControlPrune) Unmarshal(dAtA []byte) error { } return nil } +func (m *ControlIDontWant) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControlIDontWant: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControlIDontWant: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageIDs = append(m.MessageIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PeerInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.proto b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.proto index e5df8401956..bd0234c3c92 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.proto +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/rpc.proto @@ -28,6 +28,7 @@ message ControlMessage { repeated ControlIWant iwant = 2; repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; + repeated ControlIDontWant idontwant = 5; } message ControlIHave { @@ -51,7 +52,12 @@ message ControlPrune { optional uint64 backoff = 3; } +message ControlIDontWant { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + repeated string messageIDs = 1; +} + message PeerInfo { optional bytes peerID = 1; optional bytes signedPeerRecord = 2; -} \ No newline at end of file +} diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.pb.go b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.pb.go index dd806155253..04f1ec1b237 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.pb.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.pb.go @@ -1159,13 +1159,14 @@ func (m *TraceEvent_SubMeta) GetTopic() string { } type TraceEvent_ControlMeta struct { - Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` - Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` - Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` - Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + Idontwant []*TraceEvent_ControlIDontWantMeta `protobuf:"bytes,5,rep,name=idontwant" json:"idontwant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TraceEvent_ControlMeta) Reset() { *m = TraceEvent_ControlMeta{} } @@ -1229,6 +1230,13 @@ func (m *TraceEvent_ControlMeta) GetPrune() []*TraceEvent_ControlPruneMeta { return nil } +func (m *TraceEvent_ControlMeta) GetIdontwant() []*TraceEvent_ControlIDontWantMeta { + if m != nil { + return m.Idontwant + } + return nil +} + type TraceEvent_ControlIHaveMeta struct { Topic *string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` MessageIDs [][]byte `protobuf:"bytes,2,rep,name=messageIDs" json:"messageIDs,omitempty"` @@ -1433,6 +1441,53 @@ func (m *TraceEvent_ControlPruneMeta) GetPeers() [][]byte { return nil } +type TraceEvent_ControlIDontWantMeta struct { + MessageIDs [][]byte `protobuf:"bytes,1,rep,name=messageIDs" json:"messageIDs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceEvent_ControlIDontWantMeta) Reset() { *m = TraceEvent_ControlIDontWantMeta{} } +func (m *TraceEvent_ControlIDontWantMeta) String() string { return proto.CompactTextString(m) } +func (*TraceEvent_ControlIDontWantMeta) ProtoMessage() {} +func (*TraceEvent_ControlIDontWantMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_0571941a1d628a80, []int{0, 21} +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TraceEvent_ControlIDontWantMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceEvent_ControlIDontWantMeta.Merge(m, src) +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Size() int { + return m.Size() +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TraceEvent_ControlIDontWantMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceEvent_ControlIDontWantMeta proto.InternalMessageInfo + +func (m *TraceEvent_ControlIDontWantMeta) GetMessageIDs() [][]byte { + if m != nil { + return m.MessageIDs + } + return nil +} + type TraceEventBatch struct { Batch []*TraceEvent `protobuf:"bytes,1,rep,name=batch" json:"batch,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1504,76 +1559,79 @@ func init() { proto.RegisterType((*TraceEvent_ControlIWantMeta)(nil), "pubsub.pb.TraceEvent.ControlIWantMeta") proto.RegisterType((*TraceEvent_ControlGraftMeta)(nil), "pubsub.pb.TraceEvent.ControlGraftMeta") proto.RegisterType((*TraceEvent_ControlPruneMeta)(nil), "pubsub.pb.TraceEvent.ControlPruneMeta") + proto.RegisterType((*TraceEvent_ControlIDontWantMeta)(nil), "pubsub.pb.TraceEvent.ControlIDontWantMeta") proto.RegisterType((*TraceEventBatch)(nil), "pubsub.pb.TraceEventBatch") } func init() { proto.RegisterFile("trace.proto", fileDescriptor_0571941a1d628a80) } var fileDescriptor_0571941a1d628a80 = []byte{ - // 999 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x51, 0x6f, 0xda, 0x56, - 0x14, 0xc7, 0xe7, 0x00, 0x01, 0x0e, 0x84, 0x78, 0x77, 0x6d, 0x65, 0xb1, 0x36, 0x62, 0x59, 0x55, - 0x21, 0x4d, 0x42, 0x6a, 0xa4, 0xa9, 0x0f, 0x6b, 0xab, 0x11, 0xec, 0x26, 0x44, 0x24, 0xb1, 0x0e, - 0x24, 0x7b, 0xcc, 0x0c, 0xdc, 0x35, 0x8e, 0xc0, 0xb6, 0xec, 0x0b, 0x53, 0x9f, 0xf6, 0xb4, 0xef, - 0xd6, 0xb7, 0xed, 0x23, 0x54, 0xf9, 0x24, 0xd3, 0xbd, 0xd7, 0x36, 0x36, 0xd8, 0xb4, 0x8b, 0xfa, - 0xe6, 0x73, 0xf3, 0xff, 0x9d, 0x7b, 0xce, 0xbd, 0xe7, 0x7f, 0x03, 0xd4, 0x98, 0x6f, 0x4d, 0x68, - 0xc7, 0xf3, 0x5d, 0xe6, 0x92, 0xaa, 0xb7, 0x18, 0x07, 0x8b, 0x71, 0xc7, 0x1b, 0x1f, 0x7e, 0x7a, - 0x02, 0x30, 0xe2, 0x7f, 0x32, 0x96, 0xd4, 0x61, 0xa4, 0x03, 0x45, 0xf6, 0xc1, 0xa3, 0x9a, 0xd2, - 0x52, 0xda, 0x8d, 0xa3, 0x66, 0x27, 0x16, 0x76, 0x56, 0xa2, 0xce, 0xe8, 0x83, 0x47, 0x51, 0xe8, - 0xc8, 0x13, 0xd8, 0xf5, 0x28, 0xf5, 0xfb, 0xba, 0xb6, 0xd3, 0x52, 0xda, 0x75, 0x0c, 0x23, 0xf2, - 0x14, 0xaa, 0xcc, 0x9e, 0xd3, 0x80, 0x59, 0x73, 0x4f, 0x2b, 0xb4, 0x94, 0x76, 0x01, 0x57, 0x0b, - 0x64, 0x00, 0x0d, 0x6f, 0x31, 0x9e, 0xd9, 0xc1, 0xed, 0x39, 0x0d, 0x02, 0xeb, 0x3d, 0xd5, 0x8a, - 0x2d, 0xa5, 0x5d, 0x3b, 0x7a, 0x9e, 0xbd, 0x9f, 0x99, 0xd2, 0xe2, 0x1a, 0x4b, 0xfa, 0xb0, 0xe7, - 0xd3, 0x3b, 0x3a, 0x61, 0x51, 0xb2, 0x92, 0x48, 0xf6, 0x63, 0x76, 0x32, 0x4c, 0x4a, 0x31, 0x4d, - 0x12, 0x04, 0x75, 0xba, 0xf0, 0x66, 0xf6, 0xc4, 0x62, 0x34, 0xca, 0xb6, 0x2b, 0xb2, 0xbd, 0xc8, - 0xce, 0xa6, 0xaf, 0xa9, 0x71, 0x83, 0xe7, 0xcd, 0x4e, 0xe9, 0xcc, 0x5e, 0x52, 0x3f, 0xca, 0x58, - 0xde, 0xd6, 0xac, 0x9e, 0xd2, 0xe2, 0x1a, 0x4b, 0x5e, 0x41, 0xd9, 0x9a, 0x4e, 0x4d, 0x4a, 0x7d, - 0xad, 0x22, 0xd2, 0x3c, 0xcb, 0x4e, 0xd3, 0x95, 0x22, 0x8c, 0xd4, 0xe4, 0x57, 0x00, 0x9f, 0xce, - 0xdd, 0x25, 0x15, 0x6c, 0x55, 0xb0, 0xad, 0xbc, 0x23, 0x8a, 0x74, 0x98, 0x60, 0xf8, 0xd6, 0x3e, - 0x9d, 0x2c, 0xd1, 0xec, 0x69, 0xb0, 0x6d, 0x6b, 0x94, 0x22, 0x8c, 0xd4, 0x1c, 0x0c, 0xa8, 0x33, - 0xe5, 0x60, 0x6d, 0x1b, 0x38, 0x94, 0x22, 0x8c, 0xd4, 0x1c, 0x9c, 0xfa, 0xae, 0xc7, 0xc1, 0xfa, - 0x36, 0x50, 0x97, 0x22, 0x8c, 0xd4, 0x7c, 0x8c, 0xef, 0x5c, 0xdb, 0xd1, 0xf6, 0x04, 0x95, 0x33, - 0xc6, 0x67, 0xae, 0xed, 0xa0, 0xd0, 0x91, 0x97, 0x50, 0x9a, 0x51, 0x6b, 0x49, 0xb5, 0x86, 0x00, - 0xbe, 0xcf, 0x06, 0x06, 0x5c, 0x82, 0x52, 0xc9, 0x91, 0xf7, 0xbe, 0xf5, 0x07, 0xd3, 0xf6, 0xb7, - 0x21, 0x27, 0x5c, 0x82, 0x52, 0xc9, 0x11, 0xcf, 0x5f, 0x38, 0x54, 0x53, 0xb7, 0x21, 0x26, 0x97, - 0xa0, 0x54, 0x36, 0x75, 0x68, 0xa4, 0xa7, 0x9f, 0x3b, 0x6b, 0x2e, 0x3f, 0xfb, 0xba, 0xb0, 0x69, - 0x1d, 0x57, 0x0b, 0xe4, 0x11, 0x94, 0x98, 0xeb, 0xd9, 0x13, 0x61, 0xc7, 0x2a, 0xca, 0xa0, 0xf9, - 0x17, 0xec, 0xa5, 0xc6, 0xfe, 0x33, 0x49, 0x0e, 0xa1, 0xee, 0xd3, 0x09, 0xb5, 0x97, 0x74, 0xfa, - 0xce, 0x77, 0xe7, 0xa1, 0xb5, 0x53, 0x6b, 0xdc, 0xf8, 0x3e, 0xb5, 0x02, 0xd7, 0x11, 0xee, 0xae, - 0x62, 0x18, 0xad, 0x0a, 0x28, 0x26, 0x0b, 0xb8, 0x03, 0x75, 0xdd, 0x29, 0x5f, 0xa1, 0x86, 0x78, - 0xaf, 0x42, 0x72, 0xaf, 0x5b, 0x68, 0xa4, 0x3d, 0xf4, 0x90, 0x23, 0xdb, 0xd8, 0xbf, 0xb0, 0xb9, - 0x7f, 0xf3, 0x15, 0x94, 0x43, 0x9b, 0x25, 0xde, 0x41, 0x25, 0xf5, 0x0e, 0x3e, 0xe2, 0x57, 0xee, - 0x32, 0x37, 0x4a, 0x2e, 0x82, 0xe6, 0x73, 0x80, 0x95, 0xc7, 0xf2, 0xd8, 0xe6, 0xef, 0x50, 0x0e, - 0xad, 0xb4, 0x51, 0x8d, 0x92, 0x71, 0x1a, 0x2f, 0xa1, 0x38, 0xa7, 0xcc, 0x12, 0x3b, 0xe5, 0x7b, - 0xd3, 0xec, 0x9d, 0x53, 0x66, 0xa1, 0x90, 0x36, 0x47, 0x50, 0x0e, 0x3d, 0xc7, 0x8b, 0xe0, 0xae, - 0x1b, 0xb9, 0x51, 0x11, 0x32, 0x7a, 0x60, 0xd6, 0xd0, 0x90, 0x5f, 0x33, 0xeb, 0x53, 0x28, 0x72, - 0xc3, 0xae, 0xae, 0x4b, 0x49, 0x5e, 0xfa, 0x33, 0x28, 0x09, 0x77, 0xe6, 0x18, 0xe0, 0x67, 0x28, - 0x09, 0x27, 0x6e, 0xbb, 0xa7, 0x6c, 0x4c, 0xb8, 0xf1, 0x7f, 0x62, 0x1f, 0x15, 0x28, 0x87, 0xc5, - 0x93, 0x37, 0x50, 0x09, 0x47, 0x2d, 0xd0, 0x94, 0x56, 0xa1, 0x5d, 0x3b, 0xfa, 0x21, 0xbb, 0xdb, - 0x70, 0x58, 0x45, 0xc7, 0x31, 0x42, 0xba, 0x50, 0x0f, 0x16, 0xe3, 0x60, 0xe2, 0xdb, 0x1e, 0xb3, - 0x5d, 0x47, 0xdb, 0x11, 0x29, 0xf2, 0xde, 0xcf, 0xc5, 0x58, 0xe0, 0x29, 0x84, 0xfc, 0x02, 0xe5, - 0x89, 0xeb, 0x30, 0xdf, 0x9d, 0x89, 0x21, 0xce, 0x2d, 0xa0, 0x27, 0x45, 0x22, 0x43, 0x44, 0x34, - 0xbb, 0x50, 0x4b, 0x14, 0xf6, 0xa0, 0xc7, 0xe7, 0x0d, 0x94, 0xc3, 0xc2, 0x38, 0x1e, 0x96, 0x36, - 0x96, 0x3f, 0x31, 0x2a, 0xb8, 0x5a, 0xc8, 0xc1, 0xff, 0xde, 0x81, 0x5a, 0xa2, 0x34, 0xf2, 0x1a, - 0x4a, 0xf6, 0x2d, 0x7f, 0xaa, 0xe5, 0x69, 0xbe, 0xd8, 0xda, 0x4c, 0xff, 0xd4, 0x5a, 0xca, 0x23, - 0x95, 0x90, 0xa0, 0xff, 0xb4, 0x1c, 0x16, 0x1e, 0xe4, 0x67, 0xe8, 0xdf, 0x2c, 0x87, 0x85, 0x34, - 0x87, 0x38, 0x2d, 0xdf, 0xfc, 0xc2, 0x17, 0xd0, 0x62, 0xe0, 0x24, 0x2d, 0x9f, 0xff, 0xd7, 0xd1, - 0xf3, 0x5f, 0xfc, 0x02, 0x5a, 0xcc, 0x9d, 0xa4, 0xe5, 0x7f, 0x82, 0x53, 0x50, 0xd7, 0x9b, 0xca, - 0xf6, 0x02, 0x39, 0x00, 0x88, 0xef, 0x24, 0x10, 0x8d, 0xd6, 0x31, 0xb1, 0xd2, 0x3c, 0x5a, 0x65, - 0x8a, 0x1a, 0x5c, 0x63, 0x94, 0x0d, 0xa6, 0x1d, 0x33, 0x71, 0x5b, 0x39, 0x4e, 0x7c, 0x1b, 0x2b, - 0xe3, 0x16, 0x72, 0xea, 0xe4, 0x6f, 0x23, 0xa5, 0x7e, 0x54, 0xa2, 0x0c, 0x0e, 0xff, 0x51, 0xa0, - 0xc8, 0x7f, 0x60, 0x92, 0xef, 0x60, 0xdf, 0xbc, 0x3a, 0x1e, 0xf4, 0x87, 0xa7, 0x37, 0xe7, 0xc6, - 0x70, 0xd8, 0x3d, 0x31, 0xd4, 0x6f, 0x08, 0x81, 0x06, 0x1a, 0x67, 0x46, 0x6f, 0x14, 0xaf, 0x29, - 0xe4, 0x31, 0x7c, 0xab, 0x5f, 0x99, 0x83, 0x7e, 0xaf, 0x3b, 0x32, 0xe2, 0xe5, 0x1d, 0xce, 0xeb, - 0xc6, 0xa0, 0x7f, 0x6d, 0x60, 0xbc, 0x58, 0x20, 0x75, 0xa8, 0x74, 0x75, 0xfd, 0xc6, 0x34, 0x0c, - 0x54, 0x8b, 0x64, 0x1f, 0x6a, 0x68, 0x9c, 0x5f, 0x5e, 0x1b, 0x72, 0xa1, 0xc4, 0xff, 0x8c, 0x46, - 0xef, 0xfa, 0x06, 0xcd, 0x9e, 0xba, 0xcb, 0xa3, 0xa1, 0x71, 0xa1, 0x8b, 0xa8, 0xcc, 0x23, 0x1d, - 0x2f, 0x4d, 0x11, 0x55, 0x48, 0x05, 0x8a, 0x67, 0x97, 0xfd, 0x0b, 0xb5, 0x4a, 0xaa, 0x50, 0x1a, - 0x18, 0xdd, 0x6b, 0x43, 0x05, 0xfe, 0x79, 0x82, 0xdd, 0x77, 0x23, 0xb5, 0xc6, 0x3f, 0x4d, 0xbc, - 0xba, 0x30, 0xd4, 0xfa, 0xe1, 0x5b, 0xd8, 0x5f, 0xdd, 0xef, 0xb1, 0xc5, 0x26, 0xb7, 0xe4, 0x27, - 0x28, 0x8d, 0xf9, 0x47, 0x38, 0xc4, 0x8f, 0x33, 0x47, 0x01, 0xa5, 0xe6, 0xb8, 0xfe, 0xf1, 0xfe, - 0x40, 0xf9, 0xf7, 0xfe, 0x40, 0xf9, 0x74, 0x7f, 0xa0, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xdb, - 0x3a, 0x1c, 0xe4, 0xc9, 0x0b, 0x00, 0x00, + // 1027 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xdf, 0x6e, 0xe2, 0x46, + 0x14, 0xc6, 0xeb, 0x80, 0x03, 0x1c, 0x08, 0x71, 0xa7, 0xd9, 0xd6, 0x72, 0x77, 0x23, 0x9a, 0xae, + 0x56, 0xa8, 0x95, 0x90, 0x36, 0x52, 0xbb, 0x17, 0xdd, 0x5d, 0x95, 0x60, 0x6f, 0x42, 0x44, 0x12, + 0x6b, 0x20, 0xe9, 0x65, 0x6a, 0x60, 0xba, 0x71, 0x04, 0xb6, 0x65, 0x0f, 0x54, 0x7b, 0xd5, 0xd7, + 0xdb, 0xbb, 0xed, 0x23, 0x54, 0x79, 0x92, 0x6a, 0x66, 0xfc, 0x07, 0x83, 0xed, 0xec, 0x46, 0xb9, + 0xf3, 0x19, 0xbe, 0xdf, 0x99, 0x33, 0x67, 0xce, 0x37, 0x02, 0xea, 0xd4, 0xb7, 0x26, 0xa4, 0xe3, + 0xf9, 0x2e, 0x75, 0x51, 0xcd, 0x5b, 0x8c, 0x83, 0xc5, 0xb8, 0xe3, 0x8d, 0x0f, 0xee, 0xbe, 0x03, + 0x18, 0xb1, 0x9f, 0x8c, 0x25, 0x71, 0x28, 0xea, 0x40, 0x99, 0x7e, 0xf0, 0x88, 0x2a, 0xb5, 0xa4, + 0x76, 0xf3, 0x50, 0xeb, 0xc4, 0xc2, 0x4e, 0x22, 0xea, 0x8c, 0x3e, 0x78, 0x04, 0x73, 0x1d, 0xfa, + 0x16, 0xb6, 0x3d, 0x42, 0xfc, 0xbe, 0xae, 0x6e, 0xb5, 0xa4, 0x76, 0x03, 0x87, 0x11, 0x7a, 0x0a, + 0x35, 0x6a, 0xcf, 0x49, 0x40, 0xad, 0xb9, 0xa7, 0x96, 0x5a, 0x52, 0xbb, 0x84, 0x93, 0x05, 0x34, + 0x80, 0xa6, 0xb7, 0x18, 0xcf, 0xec, 0xe0, 0xe6, 0x8c, 0x04, 0x81, 0xf5, 0x9e, 0xa8, 0xe5, 0x96, + 0xd4, 0xae, 0x1f, 0x3e, 0xcf, 0xde, 0xcf, 0x4c, 0x69, 0xf1, 0x1a, 0x8b, 0xfa, 0xb0, 0xe3, 0x93, + 0x5b, 0x32, 0xa1, 0x51, 0x32, 0x99, 0x27, 0xfb, 0x31, 0x3b, 0x19, 0x5e, 0x95, 0xe2, 0x34, 0x89, + 0x30, 0x28, 0xd3, 0x85, 0x37, 0xb3, 0x27, 0x16, 0x25, 0x51, 0xb6, 0x6d, 0x9e, 0xed, 0x45, 0x76, + 0x36, 0x7d, 0x4d, 0x8d, 0x37, 0x78, 0x76, 0xd8, 0x29, 0x99, 0xd9, 0x4b, 0xe2, 0x47, 0x19, 0x2b, + 0x45, 0x87, 0xd5, 0x53, 0x5a, 0xbc, 0xc6, 0xa2, 0x57, 0x50, 0xb1, 0xa6, 0x53, 0x93, 0x10, 0x5f, + 0xad, 0xf2, 0x34, 0xcf, 0xb2, 0xd3, 0x74, 0x85, 0x08, 0x47, 0x6a, 0xf4, 0x3b, 0x80, 0x4f, 0xe6, + 0xee, 0x92, 0x70, 0xb6, 0xc6, 0xd9, 0x56, 0x5e, 0x8b, 0x22, 0x1d, 0x5e, 0x61, 0xd8, 0xd6, 0x3e, + 0x99, 0x2c, 0xb1, 0xd9, 0x53, 0xa1, 0x68, 0x6b, 0x2c, 0x44, 0x38, 0x52, 0x33, 0x30, 0x20, 0xce, + 0x94, 0x81, 0xf5, 0x22, 0x70, 0x28, 0x44, 0x38, 0x52, 0x33, 0x70, 0xea, 0xbb, 0x1e, 0x03, 0x1b, + 0x45, 0xa0, 0x2e, 0x44, 0x38, 0x52, 0xb3, 0x31, 0xbe, 0x75, 0x6d, 0x47, 0xdd, 0xe1, 0x54, 0xce, + 0x18, 0x9f, 0xba, 0xb6, 0x83, 0xb9, 0x0e, 0xbd, 0x04, 0x79, 0x46, 0xac, 0x25, 0x51, 0x9b, 0x1c, + 0xf8, 0x3e, 0x1b, 0x18, 0x30, 0x09, 0x16, 0x4a, 0x86, 0xbc, 0xf7, 0xad, 0xbf, 0xa8, 0xba, 0x5b, + 0x84, 0x1c, 0x33, 0x09, 0x16, 0x4a, 0x86, 0x78, 0xfe, 0xc2, 0x21, 0xaa, 0x52, 0x84, 0x98, 0x4c, + 0x82, 0x85, 0x52, 0xd3, 0xa1, 0x99, 0x9e, 0x7e, 0xe6, 0xac, 0xb9, 0xf8, 0xec, 0xeb, 0xdc, 0xa6, + 0x0d, 0x9c, 0x2c, 0xa0, 0x3d, 0x90, 0xa9, 0xeb, 0xd9, 0x13, 0x6e, 0xc7, 0x1a, 0x16, 0x81, 0xf6, + 0x0f, 0xec, 0xa4, 0xc6, 0xfe, 0x9e, 0x24, 0x07, 0xd0, 0xf0, 0xc9, 0x84, 0xd8, 0x4b, 0x32, 0x7d, + 0xe7, 0xbb, 0xf3, 0xd0, 0xda, 0xa9, 0x35, 0x66, 0x7c, 0x9f, 0x58, 0x81, 0xeb, 0x70, 0x77, 0xd7, + 0x70, 0x18, 0x25, 0x05, 0x94, 0x57, 0x0b, 0xb8, 0x05, 0x65, 0xdd, 0x29, 0x8f, 0x50, 0x43, 0xbc, + 0x57, 0x69, 0x75, 0xaf, 0x1b, 0x68, 0xa6, 0x3d, 0xf4, 0x90, 0x96, 0x6d, 0xec, 0x5f, 0xda, 0xdc, + 0x5f, 0x7b, 0x05, 0x95, 0xd0, 0x66, 0x2b, 0xef, 0xa0, 0x94, 0x7a, 0x07, 0xf7, 0xd8, 0x95, 0xbb, + 0xd4, 0x8d, 0x92, 0xf3, 0x40, 0x7b, 0x0e, 0x90, 0x78, 0x2c, 0x8f, 0xd5, 0xfe, 0x84, 0x4a, 0x68, + 0xa5, 0x8d, 0x6a, 0xa4, 0x8c, 0x6e, 0xbc, 0x84, 0xf2, 0x9c, 0x50, 0x8b, 0xef, 0x94, 0xef, 0x4d, + 0xb3, 0x77, 0x46, 0xa8, 0x85, 0xb9, 0x54, 0x1b, 0x41, 0x25, 0xf4, 0x1c, 0x2b, 0x82, 0xb9, 0x6e, + 0xe4, 0x46, 0x45, 0x88, 0xe8, 0x81, 0x59, 0x43, 0x43, 0x3e, 0x66, 0xd6, 0xa7, 0x50, 0x66, 0x86, + 0x4d, 0xae, 0x4b, 0x5a, 0xbd, 0xf4, 0x67, 0x20, 0x73, 0x77, 0xe6, 0x18, 0xe0, 0x17, 0x90, 0xb9, + 0x13, 0x8b, 0xee, 0x29, 0x1b, 0xe3, 0x6e, 0xfc, 0x42, 0xec, 0xa3, 0x04, 0x95, 0xb0, 0x78, 0xf4, + 0x06, 0xaa, 0xe1, 0xa8, 0x05, 0xaa, 0xd4, 0x2a, 0xb5, 0xeb, 0x87, 0x3f, 0x64, 0x9f, 0x36, 0x1c, + 0x56, 0x7e, 0xe2, 0x18, 0x41, 0x5d, 0x68, 0x04, 0x8b, 0x71, 0x30, 0xf1, 0x6d, 0x8f, 0xda, 0xae, + 0xa3, 0x6e, 0xf1, 0x14, 0x79, 0xef, 0xe7, 0x62, 0xcc, 0xf1, 0x14, 0x82, 0x7e, 0x83, 0xca, 0xc4, + 0x75, 0xa8, 0xef, 0xce, 0xf8, 0x10, 0xe7, 0x16, 0xd0, 0x13, 0x22, 0x9e, 0x21, 0x22, 0xb4, 0x2e, + 0xd4, 0x57, 0x0a, 0x7b, 0xd0, 0xe3, 0xf3, 0x06, 0x2a, 0x61, 0x61, 0x0c, 0x0f, 0x4b, 0x1b, 0x8b, + 0xbf, 0x18, 0x55, 0x9c, 0x2c, 0xe4, 0xe0, 0x9f, 0xb6, 0xa0, 0xbe, 0x52, 0x1a, 0x7a, 0x0d, 0xb2, + 0x7d, 0xc3, 0x9e, 0x6a, 0xd1, 0xcd, 0x17, 0x85, 0x87, 0xe9, 0x9f, 0x58, 0x4b, 0xd1, 0x52, 0x01, + 0x71, 0xfa, 0x6f, 0xcb, 0xa1, 0x61, 0x23, 0xef, 0xa1, 0xff, 0xb0, 0x1c, 0x1a, 0xd2, 0x0c, 0x62, + 0xb4, 0x78, 0xf3, 0x4b, 0x9f, 0x41, 0xf3, 0x81, 0x13, 0xb4, 0x78, 0xfe, 0x5f, 0x47, 0xcf, 0x7f, + 0xf9, 0x33, 0x68, 0x3e, 0x77, 0x82, 0xe6, 0x10, 0x3a, 0x81, 0x9a, 0x3d, 0x75, 0x1d, 0xca, 0xab, + 0x97, 0x79, 0x86, 0x9f, 0x8a, 0xab, 0xd7, 0x5d, 0x87, 0xc6, 0x27, 0x48, 0x60, 0xed, 0x04, 0x94, + 0xf5, 0xf6, 0x64, 0xbb, 0x0a, 0xed, 0x03, 0xc4, 0xb7, 0x1b, 0xf0, 0x96, 0x35, 0xf0, 0xca, 0x8a, + 0x76, 0x98, 0x64, 0x8a, 0x36, 0x5a, 0x63, 0xa4, 0x0d, 0xa6, 0x1d, 0x33, 0x71, 0x83, 0x72, 0x3c, + 0xfd, 0x36, 0x56, 0xc6, 0xcd, 0xc8, 0xa9, 0x93, 0xbd, 0xb2, 0x84, 0xf8, 0x51, 0x89, 0x22, 0xd0, + 0x7e, 0x85, 0xbd, 0xac, 0x56, 0xdc, 0x57, 0xe1, 0xc1, 0x27, 0x09, 0xca, 0xec, 0x2f, 0x2e, 0xfa, + 0x06, 0x76, 0xcd, 0xcb, 0xa3, 0x41, 0x7f, 0x78, 0x72, 0x7d, 0x66, 0x0c, 0x87, 0xdd, 0x63, 0x43, + 0xf9, 0x0a, 0x21, 0x68, 0x62, 0xe3, 0xd4, 0xe8, 0x8d, 0xe2, 0x35, 0x09, 0x3d, 0x81, 0xaf, 0xf5, + 0x4b, 0x73, 0xd0, 0xef, 0x75, 0x47, 0x46, 0xbc, 0xbc, 0xc5, 0x78, 0xdd, 0x18, 0xf4, 0xaf, 0x0c, + 0x1c, 0x2f, 0x96, 0x50, 0x03, 0xaa, 0x5d, 0x5d, 0xbf, 0x36, 0x0d, 0x03, 0x2b, 0x65, 0xb4, 0x0b, + 0x75, 0x6c, 0x9c, 0x5d, 0x5c, 0x19, 0x62, 0x41, 0x66, 0x3f, 0x63, 0xa3, 0x77, 0x75, 0x8d, 0xcd, + 0x9e, 0xb2, 0xcd, 0xa2, 0xa1, 0x71, 0xae, 0xf3, 0xa8, 0xc2, 0x22, 0x1d, 0x5f, 0x98, 0x3c, 0xaa, + 0xa2, 0x2a, 0x94, 0x4f, 0x2f, 0xfa, 0xe7, 0x4a, 0x0d, 0xd5, 0x40, 0x1e, 0x18, 0xdd, 0x2b, 0x43, + 0x01, 0xf6, 0x79, 0x8c, 0xbb, 0xef, 0x46, 0x4a, 0x9d, 0x7d, 0x9a, 0xf8, 0xf2, 0xdc, 0x50, 0x1a, + 0x07, 0x6f, 0x61, 0x37, 0x99, 0x8f, 0x23, 0x8b, 0x4e, 0x6e, 0xd0, 0xcf, 0x20, 0x8f, 0xd9, 0x47, + 0x68, 0xa3, 0x27, 0x99, 0xa3, 0x84, 0x85, 0xe6, 0xa8, 0xf1, 0xf1, 0x6e, 0x5f, 0xfa, 0xf7, 0x6e, + 0x5f, 0xfa, 0xef, 0x6e, 0x5f, 0xfa, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x17, 0x7f, 0xbd, 0x0d, 0x4b, + 0x0c, 0x00, 0x00, } func (m *TraceEvent) Marshal() (dAtA []byte, err error) { @@ -2509,6 +2567,20 @@ func (m *TraceEvent_ControlMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Idontwant) > 0 { + for iNdEx := len(m.Idontwant) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Idontwant[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Prune) > 0 { for iNdEx := len(m.Prune) - 1; iNdEx >= 0; iNdEx-- { { @@ -2724,6 +2796,42 @@ func (m *TraceEvent_ControlPruneMeta) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *TraceEvent_ControlIDontWantMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceEvent_ControlIDontWantMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceEvent_ControlIDontWantMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MessageIDs) > 0 { + for iNdEx := len(m.MessageIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MessageIDs[iNdEx]) + copy(dAtA[i:], m.MessageIDs[iNdEx]) + i = encodeVarintTrace(dAtA, i, uint64(len(m.MessageIDs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *TraceEventBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3211,6 +3319,12 @@ func (m *TraceEvent_ControlMeta) Size() (n int) { n += 1 + l + sovTrace(uint64(l)) } } + if len(m.Idontwant) > 0 { + for _, e := range m.Idontwant { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3295,6 +3409,24 @@ func (m *TraceEvent_ControlPruneMeta) Size() (n int) { return n } +func (m *TraceEvent_ControlIDontWantMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessageIDs) > 0 { + for _, b := range m.MessageIDs { + l = len(b) + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TraceEventBatch) Size() (n int) { if m == nil { return 0 @@ -6032,6 +6164,40 @@ func (m *TraceEvent_ControlMeta) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Idontwant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Idontwant = append(m.Idontwant, &TraceEvent_ControlIDontWantMeta{}) + if err := m.Idontwant[len(m.Idontwant)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) @@ -6453,6 +6619,89 @@ func (m *TraceEvent_ControlPruneMeta) Unmarshal(dAtA []byte) error { } return nil } +func (m *TraceEvent_ControlIDontWantMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControlIDontWantMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControlIDontWantMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageIDs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageIDs = append(m.MessageIDs, make([]byte, postIndex-iNdEx)) + copy(m.MessageIDs[len(m.MessageIDs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TraceEventBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.proto b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.proto index 7f834020cc3..5ee8401c147 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.proto +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/pb/trace.proto @@ -124,6 +124,7 @@ message TraceEvent { repeated ControlIWantMeta iwant = 2; repeated ControlGraftMeta graft = 3; repeated ControlPruneMeta prune = 4; + repeated ControlIDontWantMeta idontwant = 5; } message ControlIHaveMeta { @@ -143,6 +144,10 @@ message TraceEvent { optional string topic = 1; repeated bytes peers = 2; } + + message ControlIDontWantMeta { + repeated bytes messageIDs = 1; + } } message TraceEventBatch { diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/peer_notify.go b/vendor/github.com/libp2p/go-libp2p-pubsub/peer_notify.go new file mode 100644 index 00000000000..44aceeefbbd --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/peer_notify.go @@ -0,0 +1,112 @@ +package pubsub + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +func (ps *PubSub) watchForNewPeers(ctx context.Context) { + // We don't bother subscribing to "connectivity" events because we always run identify after + // every new connection. + sub, err := ps.host.EventBus().Subscribe([]interface{}{ + &event.EvtPeerIdentificationCompleted{}, + &event.EvtPeerProtocolsUpdated{}, + }) + if err != nil { + log.Errorf("failed to subscribe to peer identification events: %v", err) + return + } + defer sub.Close() + + ps.newPeersPrioLk.RLock() + ps.newPeersMx.Lock() + for _, pid := range ps.host.Network().Peers() { + if ps.host.Network().Connectedness(pid) != network.Connected { + continue + } + ps.newPeersPend[pid] = struct{}{} + } + ps.newPeersMx.Unlock() + ps.newPeersPrioLk.RUnlock() + + select { + case ps.newPeers <- struct{}{}: + default: + } + + var supportsProtocol func(protocol.ID) bool + if ps.protoMatchFunc != nil { + var supportedProtocols []func(protocol.ID) bool + for _, proto := range ps.rt.Protocols() { + + supportedProtocols = append(supportedProtocols, ps.protoMatchFunc(proto)) + } + supportsProtocol = func(proto protocol.ID) bool { + for _, fn := range supportedProtocols { + if (fn)(proto) { + return true + } + } + return false + } + } else { + supportedProtocols := make(map[protocol.ID]struct{}) + for _, proto := range ps.rt.Protocols() { + supportedProtocols[proto] = struct{}{} + } + supportsProtocol = func(proto protocol.ID) bool { + _, ok := supportedProtocols[proto] + return ok + } + } + + for ctx.Err() == nil { + var ev any + select { + case <-ctx.Done(): + return + case ev = <-sub.Out(): + } + + var protos []protocol.ID + var peer peer.ID + switch ev := ev.(type) { + case event.EvtPeerIdentificationCompleted: + peer = ev.Peer + protos = ev.Protocols + case event.EvtPeerProtocolsUpdated: + peer = ev.Peer + protos = ev.Added + default: + continue + } + + // We don't bother checking connectivity (connected and non-"limited") here because + // we'll check when actually handling the new peer. + + for _, p := range protos { + if supportsProtocol(p) { + ps.notifyNewPeer(peer) + break + } + } + } + +} + +func (ps *PubSub) notifyNewPeer(peer peer.ID) { + ps.newPeersPrioLk.RLock() + ps.newPeersMx.Lock() + ps.newPeersPend[peer] = struct{}{} + ps.newPeersMx.Unlock() + ps.newPeersPrioLk.RUnlock() + + select { + case ps.newPeers <- struct{}{}: + default: + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/pubsub.go b/vendor/github.com/libp2p/go-libp2p-pubsub/pubsub.go index a9d646e853f..ae961a95273 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/pubsub.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/pubsub.go @@ -150,7 +150,7 @@ type PubSub struct { blacklist Blacklist blacklistPeer chan peer.ID - peers map[peer.ID]chan *RPC + peers map[peer.ID]*rpcQueue inboundStreamsMx sync.Mutex inboundStreams map[peer.ID]network.Stream @@ -199,11 +199,14 @@ type PubSubRouter interface { // EnoughPeers returns whether the router needs more peers before it's ready to publish new records. // Suggested (if greater than 0) is a suggested number of peers that the router should need. EnoughPeers(topic string, suggested int) bool - // AcceptFrom is invoked on any incoming message before pushing it to the validation pipeline + // AcceptFrom is invoked on any RPC envelope before pushing it to the validation pipeline // or processing control information. // Allows routers with internal scoring to vet peers before committing any processing resources // to the message and implement an effective graylist and react to validation queue overload. AcceptFrom(peer.ID) AcceptStatus + // PreValidation is invoked on messages in the RPC envelope right before pushing it to + // the validation pipeline + PreValidation([]*Message) // HandleRPC is invoked to process control messages in the RPC envelope. // It is invoked after subscriptions and payload messages have been processed. HandleRPC(*RPC) @@ -289,7 +292,7 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option mySubs: make(map[string]map[*Subscription]struct{}), myRelays: make(map[string]int), topics: make(map[string]map[peer.ID]struct{}), - peers: make(map[peer.ID]chan *RPC), + peers: make(map[peer.ID]*rpcQueue), inboundStreams: make(map[peer.ID]network.Stream), blacklist: NewMapBlacklist(), blacklistPeer: make(chan peer.ID), @@ -331,14 +334,12 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option h.SetStreamHandler(id, ps.handleNewStream) } } - h.Network().Notify((*PubSubNotif)(ps)) + go ps.watchForNewPeers(ctx) ps.val.Start(ps) go ps.processLoop(ctx) - (*PubSubNotif)(ps).Initialize() - return ps, nil } @@ -565,8 +566,8 @@ func WithAppSpecificRpcInspector(inspector func(peer.ID, *RPC) error) Option { func (p *PubSub) processLoop(ctx context.Context) { defer func() { // Clean up go routines. - for _, ch := range p.peers { - close(ch) + for _, queue := range p.peers { + queue.Close() } p.peers = nil p.topics = nil @@ -581,7 +582,7 @@ func (p *PubSub) processLoop(ctx context.Context) { case s := <-p.newPeerStream: pid := s.Conn().RemotePeer() - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if !ok { log.Warn("new stream for unknown peer: ", pid) s.Reset() @@ -590,7 +591,7 @@ func (p *PubSub) processLoop(ctx context.Context) { if p.blacklist.Contains(pid) { log.Warn("closing stream for blacklisted peer: ", pid) - close(ch) + q.Close() delete(p.peers, pid) s.Reset() continue @@ -665,9 +666,9 @@ func (p *PubSub) processLoop(ctx context.Context) { log.Infof("Blacklisting peer %s", pid) p.blacklist.Add(pid) - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if ok { - close(ch) + q.Close() delete(p.peers, pid) for t, tmap := range p.topics { if _, ok := tmap[pid]; ok { @@ -698,6 +699,8 @@ func (p *PubSub) handlePendingPeers() { p.newPeersPrioLk.Unlock() for pid := range newPeers { + // Make sure we have a non-limited connection. We do this late because we may have + // disconnected in the meantime. if p.host.Network().Connectedness(pid) != network.Connected { continue } @@ -712,10 +715,10 @@ func (p *PubSub) handlePendingPeers() { continue } - messages := make(chan *RPC, p.peerOutboundQueueSize) - messages <- p.getHelloPacket() - go p.handleNewPeer(p.ctx, pid, messages) - p.peers[pid] = messages + rpcQueue := newRpcQueue(p.peerOutboundQueueSize) + rpcQueue.Push(p.getHelloPacket(), true) + go p.handleNewPeer(p.ctx, pid, rpcQueue) + p.peers[pid] = rpcQueue } } @@ -732,12 +735,12 @@ func (p *PubSub) handleDeadPeers() { p.peerDeadPrioLk.Unlock() for pid := range deadPeers { - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if !ok { continue } - close(ch) + q.Close() delete(p.peers, pid) for t, tmap := range p.topics { @@ -759,10 +762,10 @@ func (p *PubSub) handleDeadPeers() { // still connected, must be a duplicate connection being closed. // we respawn the writer as we need to ensure there is a stream active log.Debugf("peer declared dead but still connected; respawning writer: %s", pid) - messages := make(chan *RPC, p.peerOutboundQueueSize) - messages <- p.getHelloPacket() - p.peers[pid] = messages - go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages) + rpcQueue := newRpcQueue(p.peerOutboundQueueSize) + rpcQueue.Push(p.getHelloPacket(), true) + p.peers[pid] = rpcQueue + go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, rpcQueue) } } } @@ -926,14 +929,14 @@ func (p *PubSub) announce(topic string, sub bool) { out := rpcWithSubs(subopt) for pid, peer := range p.peers { - select { - case peer <- out: - p.tracer.SendRPC(out, pid) - default: + err := peer.Push(out, false) + if err != nil { log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) p.tracer.DropRPC(out, pid) go p.announceRetry(pid, topic, sub) + continue } + p.tracer.SendRPC(out, pid) } } @@ -969,14 +972,14 @@ func (p *PubSub) doAnnounceRetry(pid peer.ID, topic string, sub bool) { } out := rpcWithSubs(subopt) - select { - case peer <- out: - p.tracer.SendRPC(out, pid) - default: + err := peer.Push(out, false) + if err != nil { log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) p.tracer.DropRPC(out, pid) go p.announceRetry(pid, topic, sub) + return } + p.tracer.SendRPC(out, pid) } // notifySubs sends a given message to all corresponding subscribers. @@ -1102,13 +1105,21 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) { p.tracer.ThrottlePeer(rpc.from) case AcceptAll: + var toPush []*Message for _, pmsg := range rpc.GetPublish() { if !(p.subscribedToMsg(pmsg) || p.canRelayMsg(pmsg)) { log.Debug("received message in topic we didn't subscribe to; ignoring message") continue } - p.pushMsg(&Message{pmsg, "", rpc.from, nil, false}) + msg := &Message{pmsg, "", rpc.from, nil, false} + if p.shouldPush(msg) { + toPush = append(toPush, msg) + } + } + p.rt.PreValidation(toPush) + for _, msg := range toPush { + p.pushMsg(msg) } } @@ -1125,27 +1136,28 @@ func DefaultPeerFilter(pid peer.ID, topic string) bool { return true } -// pushMsg pushes a message performing validation as necessary -func (p *PubSub) pushMsg(msg *Message) { +// shouldPush filters a message before validating and pushing it +// It returns true if the message can be further validated and pushed +func (p *PubSub) shouldPush(msg *Message) bool { src := msg.ReceivedFrom // reject messages from blacklisted peers if p.blacklist.Contains(src) { log.Debugf("dropping message from blacklisted peer %s", src) p.tracer.RejectMessage(msg, RejectBlacklstedPeer) - return + return false } // even if they are forwarded by good peers if p.blacklist.Contains(msg.GetFrom()) { log.Debugf("dropping message from blacklisted source %s", src) p.tracer.RejectMessage(msg, RejectBlacklistedSource) - return + return false } err := p.checkSigningPolicy(msg) if err != nil { log.Debugf("dropping message from %s: %s", src, err) - return + return false } // reject messages claiming to be from ourselves but not locally published @@ -1153,16 +1165,24 @@ func (p *PubSub) pushMsg(msg *Message) { if peer.ID(msg.GetFrom()) == self && src != self { log.Debugf("dropping message claiming to be from self but forwarded from %s", src) p.tracer.RejectMessage(msg, RejectSelfOrigin) - return + return false } // have we already seen and validated this message? id := p.idGen.ID(msg) if p.seenMessage(id) { p.tracer.DuplicateMessage(msg) - return + return false } + return true +} + +// pushMsg pushes a message performing validation as necessary +func (p *PubSub) pushMsg(msg *Message) { + src := msg.ReceivedFrom + id := p.idGen.ID(msg) + if !p.val.Push(src, msg) { return } diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/randomsub.go b/vendor/github.com/libp2p/go-libp2p-pubsub/randomsub.go index f29b923f94a..4e410f5fa94 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/randomsub.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/randomsub.go @@ -94,6 +94,8 @@ func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } +func (rs *RandomSubRouter) PreValidation([]*Message) {} + func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {} func (rs *RandomSubRouter) Publish(msg *Message) { @@ -144,18 +146,18 @@ func (rs *RandomSubRouter) Publish(msg *Message) { out := rpcWithMessages(msg.Message) for p := range tosend { - mch, ok := rs.p.peers[p] + q, ok := rs.p.peers[p] if !ok { continue } - select { - case mch <- out: - rs.tracer.SendRPC(out, p) - default: + err := q.Push(out, false) + if err != nil { log.Infof("dropping message to peer %s: queue full", p) rs.tracer.DropRPC(out, p) + continue } + rs.tracer.SendRPC(out, p) } } diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/rpc_queue.go b/vendor/github.com/libp2p/go-libp2p-pubsub/rpc_queue.go new file mode 100644 index 00000000000..e5c22935b02 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/rpc_queue.go @@ -0,0 +1,147 @@ +package pubsub + +import ( + "context" + "errors" + "sync" +) + +var ( + ErrQueueCancelled = errors.New("rpc queue operation cancelled") + ErrQueueClosed = errors.New("rpc queue closed") + ErrQueueFull = errors.New("rpc queue full") + ErrQueuePushOnClosed = errors.New("push on closed rpc queue") +) + +type priorityQueue struct { + normal []*RPC + priority []*RPC +} + +func (q *priorityQueue) Len() int { + return len(q.normal) + len(q.priority) +} + +func (q *priorityQueue) NormalPush(rpc *RPC) { + q.normal = append(q.normal, rpc) +} + +func (q *priorityQueue) PriorityPush(rpc *RPC) { + q.priority = append(q.priority, rpc) +} + +func (q *priorityQueue) Pop() *RPC { + var rpc *RPC + + if len(q.priority) > 0 { + rpc = q.priority[0] + q.priority[0] = nil + q.priority = q.priority[1:] + } else if len(q.normal) > 0 { + rpc = q.normal[0] + q.normal[0] = nil + q.normal = q.normal[1:] + } + + return rpc +} + +type rpcQueue struct { + dataAvailable sync.Cond + spaceAvailable sync.Cond + // Mutex used to access queue + queueMu sync.Mutex + queue priorityQueue + + closed bool + maxSize int +} + +func newRpcQueue(maxSize int) *rpcQueue { + q := &rpcQueue{maxSize: maxSize} + q.dataAvailable.L = &q.queueMu + q.spaceAvailable.L = &q.queueMu + return q +} + +func (q *rpcQueue) Push(rpc *RPC, block bool) error { + return q.push(rpc, false, block) +} + +func (q *rpcQueue) UrgentPush(rpc *RPC, block bool) error { + return q.push(rpc, true, block) +} + +func (q *rpcQueue) push(rpc *RPC, urgent bool, block bool) error { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + if q.closed { + panic(ErrQueuePushOnClosed) + } + + for q.queue.Len() == q.maxSize { + if block { + q.spaceAvailable.Wait() + // It can receive a signal because the queue is closed. + if q.closed { + panic(ErrQueuePushOnClosed) + } + } else { + return ErrQueueFull + } + } + if urgent { + q.queue.PriorityPush(rpc) + } else { + q.queue.NormalPush(rpc) + } + + q.dataAvailable.Signal() + return nil +} + +// Note that, when the queue is empty and there are two blocked Pop calls, it +// doesn't mean that the first Pop will get the item from the next Push. The +// second Pop will probably get it instead. +func (q *rpcQueue) Pop(ctx context.Context) (*RPC, error) { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + if q.closed { + return nil, ErrQueueClosed + } + + unregisterAfterFunc := context.AfterFunc(ctx, func() { + // Wake up all the waiting routines. The only routine that correponds + // to this Pop call will return from the function. Note that this can + // be expensive, if there are too many waiting routines. + q.dataAvailable.Broadcast() + }) + defer unregisterAfterFunc() + + for q.queue.Len() == 0 { + select { + case <-ctx.Done(): + return nil, ErrQueueCancelled + default: + } + q.dataAvailable.Wait() + // It can receive a signal because the queue is closed. + if q.closed { + return nil, ErrQueueClosed + } + } + rpc := q.queue.Pop() + q.spaceAvailable.Signal() + return rpc, nil +} + +func (q *rpcQueue) Close() { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + q.closed = true + q.dataAvailable.Broadcast() + q.spaceAvailable.Broadcast() +} diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/timecache/time_cache.go b/vendor/github.com/libp2p/go-libp2p-pubsub/timecache/time_cache.go index e33bc3542a6..ee34fd5b433 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/timecache/time_cache.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/timecache/time_cache.go @@ -2,12 +2,8 @@ package timecache import ( "time" - - logger "github.com/ipfs/go-log/v2" ) -var log = logger.Logger("pubsub/timecache") - // Stategy is the TimeCache expiration strategy to use. type Strategy uint8 diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/trace.go b/vendor/github.com/libp2p/go-libp2p-pubsub/trace.go index 27fac289f82..7dbb5409953 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/trace.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/trace.go @@ -402,11 +402,23 @@ func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta { }) } + var idontwant []*pb.TraceEvent_ControlIDontWantMeta + for _, ctl := range rpc.Control.Idontwant { + var mids [][]byte + for _, mid := range ctl.MessageIDs { + mids = append(mids, []byte(mid)) + } + idontwant = append(idontwant, &pb.TraceEvent_ControlIDontWantMeta{ + MessageIDs: mids, + }) + } + rpcMeta.Control = &pb.TraceEvent_ControlMeta{ - Ihave: ihave, - Iwant: iwant, - Graft: graft, - Prune: prune, + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + Idontwant: idontwant, } } diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/tracer.go b/vendor/github.com/libp2p/go-libp2p-pubsub/tracer.go index 8e744c91141..cbb92ad77ad 100644 --- a/vendor/github.com/libp2p/go-libp2p-pubsub/tracer.go +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/tracer.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) diff --git a/vendor/github.com/libp2p/go-libp2p-pubsub/version.json b/vendor/github.com/libp2p/go-libp2p-pubsub/version.json new file mode 100644 index 00000000000..ea22ea59197 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-pubsub/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.11.0" +} diff --git a/vendor/github.com/libp2p/go-libp2p/README.md b/vendor/github.com/libp2p/go-libp2p/README.md index 960d89e4f87..ebebfb3953f 100644 --- a/vendor/github.com/libp2p/go-libp2p/README.md +++ b/vendor/github.com/libp2p/go-libp2p/README.md @@ -99,10 +99,12 @@ Some notable users of go-libp2p are: - [Status go](https://github.com/status-im/status-go) - Status bindings for go-ethereum, built by [Status.im](https://status.im/) - [Flow](https://github.com/onflow/flow-go) - A blockchain built to support games, apps, and digital assets built by [Dapper Labs](https://www.dapperlabs.com/) - [Swarm Bee](https://github.com/ethersphere/bee) - A client for connecting to the [Swarm network](https://www.ethswarm.org/) -- [Elrond Go](https://github.com/multiversx/mx-chain-go) - The Go implementation of the the Elrond network protocol +- [MultiversX Node](https://github.com/multiversx/mx-chain-go) - The Go implementation of the MultiversX network protocol - [Sonr](https://github.com/sonr-io/sonr) - A platform to integrate DID Documents, WebAuthn, and IPFS and manage digital identity and assets. - [EdgeVPN](https://github.com/mudler/edgevpn) - A decentralized, immutable, portable VPN and reverse proxy over p2p. - [Kairos](https://github.com/kairos-io/kairos) - A Kubernetes-focused, Cloud Native Linux meta-distribution. - [Oasis Core](https://github.com/oasisprotocol/oasis-core) - The consensus and runtime layers of the [Oasis protocol](https://oasisprotocol.org/). +- [Spacemesh](https://github.com/spacemeshos/go-spacemesh/) - The Go implementation of the [Spacemesh protocol](https://spacemesh.io/), a novel layer one blockchain +- [Tau](https://github.com/taubyte/tau/) - Open source distributed Platform as a Service (PaaS) Please open a pull request if you want your project (min. 250 GitHub stars) to be added here. diff --git a/vendor/github.com/libp2p/go-libp2p/config/config.go b/vendor/github.com/libp2p/go-libp2p/config/config.go index cc689baf7bf..d0a71664f7c 100644 --- a/vendor/github.com/libp2p/go-libp2p/config/config.go +++ b/vendor/github.com/libp2p/go-libp2p/config/config.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "errors" "fmt" + "net" "time" "github.com/libp2p/go-libp2p/core/connmgr" @@ -24,6 +25,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/host/autonat" "github.com/libp2p/go-libp2p/p2p/host/autorelay" bhost "github.com/libp2p/go-libp2p/p2p/host/basic" + blankhost "github.com/libp2p/go-libp2p/p2p/host/blank" "github.com/libp2p/go-libp2p/p2p/host/eventbus" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" @@ -34,10 +36,12 @@ import ( relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" "github.com/libp2p/go-libp2p/p2p/protocol/holepunch" "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc" "github.com/prometheus/client_golang/prometheus" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" + manet "github.com/multiformats/go-multiaddr/net" "github.com/quic-go/quic-go" "go.uber.org/fx" "go.uber.org/fx/fxevent" @@ -131,6 +135,13 @@ type Config struct { SwarmOpts []swarm.Option DisableIdentifyAddressDiscovery bool + + EnableAutoNATv2 bool + + UDPBlackHoleSuccessCounter *swarm.BlackHoleSuccessCounter + CustomUDPBlackHoleSuccessCounter bool + IPv6BlackHoleSuccessCounter *swarm.BlackHoleSuccessCounter + CustomIPv6BlackHoleSuccessCounter bool } func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swarm, error) { @@ -165,7 +176,10 @@ func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swa return nil, err } - opts := cfg.SwarmOpts + opts := append(cfg.SwarmOpts, + swarm.WithUDPBlackHoleSuccessCounter(cfg.UDPBlackHoleSuccessCounter), + swarm.WithIPv6BlackHoleSuccessCounter(cfg.IPv6BlackHoleSuccessCounter), + ) if cfg.Reporter != nil { opts = append(opts, swarm.WithMetrics(cfg.Reporter)) } @@ -193,6 +207,77 @@ func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swa return swarm.NewSwarm(pid, cfg.Peerstore, eventBus, opts...) } +func (cfg *Config) makeAutoNATV2Host() (host.Host, error) { + autonatPrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return nil, err + } + ps, err := pstoremem.NewPeerstore() + if err != nil { + return nil, err + } + + autoNatCfg := Config{ + Transports: cfg.Transports, + Muxers: cfg.Muxers, + SecurityTransports: cfg.SecurityTransports, + Insecure: cfg.Insecure, + PSK: cfg.PSK, + ConnectionGater: cfg.ConnectionGater, + Reporter: cfg.Reporter, + PeerKey: autonatPrivKey, + Peerstore: ps, + DialRanker: swarm.NoDelayDialRanker, + UDPBlackHoleSuccessCounter: cfg.UDPBlackHoleSuccessCounter, + IPv6BlackHoleSuccessCounter: cfg.IPv6BlackHoleSuccessCounter, + ResourceManager: cfg.ResourceManager, + SwarmOpts: []swarm.Option{ + // Don't update black hole state for failed autonat dials + swarm.WithReadOnlyBlackHoleDetector(), + }, + } + fxopts, err := autoNatCfg.addTransports() + if err != nil { + return nil, err + } + var dialerHost host.Host + fxopts = append(fxopts, + fx.Provide(eventbus.NewBus), + fx.Provide(func(lifecycle fx.Lifecycle, b event.Bus) (*swarm.Swarm, error) { + lifecycle.Append(fx.Hook{ + OnStop: func(context.Context) error { + return ps.Close() + }}) + sw, err := autoNatCfg.makeSwarm(b, false) + return sw, err + }), + fx.Provide(func(sw *swarm.Swarm) *blankhost.BlankHost { + return blankhost.NewBlankHost(sw) + }), + fx.Provide(func(bh *blankhost.BlankHost) host.Host { + return bh + }), + fx.Provide(func() crypto.PrivKey { return autonatPrivKey }), + fx.Provide(func(bh host.Host) peer.ID { return bh.ID() }), + fx.Invoke(func(bh *blankhost.BlankHost) { + dialerHost = bh + }), + ) + app := fx.New(fxopts...) + if err := app.Err(); err != nil { + return nil, err + } + err = app.Start(context.Background()) + if err != nil { + return nil, err + } + go func() { + <-dialerHost.Network().(*swarm.Swarm).Done() + app.Stop(context.Background()) + }() + return dialerHost, nil +} + func (cfg *Config) addTransports() ([]fx.Option, error) { fxopts := []fx.Option{ fx.WithLogger(func() fxevent.Logger { return getFXLogger() }), @@ -202,6 +287,29 @@ func (cfg *Config) addTransports() ([]fx.Option, error) { fx.Provide(func() pnet.PSK { return cfg.PSK }), fx.Provide(func() network.ResourceManager { return cfg.ResourceManager }), fx.Provide(func() *madns.Resolver { return cfg.MultiaddrResolver }), + fx.Provide(func(cm *quicreuse.ConnManager, sw *swarm.Swarm) libp2pwebrtc.ListenUDPFn { + hasQuicAddrPortFor := func(network string, laddr *net.UDPAddr) bool { + quicAddrPorts := map[string]struct{}{} + for _, addr := range sw.ListenAddresses() { + if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil { + netw, addr, err := manet.DialArgs(addr) + if err != nil { + return false + } + quicAddrPorts[netw+"_"+addr] = struct{}{} + } + } + _, ok := quicAddrPorts[network+"_"+laddr.String()] + return ok + } + + return func(network string, laddr *net.UDPAddr) (net.PacketConn, error) { + if hasQuicAddrPortFor(network, laddr) { + return cm.SharedNonQUICPacketConn(network, laddr) + } + return net.ListenUDP(network, laddr) + } + }), } fxopts = append(fxopts, cfg.Transports...) if cfg.Insecure { @@ -260,8 +368,12 @@ func (cfg *Config) addTransports() ([]fx.Option, error) { fxopts = append(fxopts, cfg.QUICReuse...) } else { fxopts = append(fxopts, - fx.Provide(func(key quic.StatelessResetKey, tokenGenerator quic.TokenGeneratorKey, _ *swarm.Swarm, lifecycle fx.Lifecycle) (*quicreuse.ConnManager, error) { - cm, err := quicreuse.NewConnManager(key, tokenGenerator) + fx.Provide(func(key quic.StatelessResetKey, tokenGenerator quic.TokenGeneratorKey, lifecycle fx.Lifecycle) (*quicreuse.ConnManager, error) { + var opts []quicreuse.Option + if !cfg.DisableMetrics { + opts = append(opts, quicreuse.EnableMetrics(cfg.PrometheusRegisterer)) + } + cm, err := quicreuse.NewConnManager(key, tokenGenerator, opts...) if err != nil { return nil, err } @@ -291,6 +403,14 @@ func (cfg *Config) addTransports() ([]fx.Option, error) { } func (cfg *Config) newBasicHost(swrm *swarm.Swarm, eventBus event.Bus) (*bhost.BasicHost, error) { + var autonatv2Dialer host.Host + if cfg.EnableAutoNATv2 { + ah, err := cfg.makeAutoNATV2Host() + if err != nil { + return nil, err + } + autonatv2Dialer = ah + } h, err := bhost.NewHost(swrm, &bhost.HostOpts{ EventBus: eventBus, ConnManager: cfg.ConnManager, @@ -306,6 +426,8 @@ func (cfg *Config) newBasicHost(swrm *swarm.Swarm, eventBus event.Bus) (*bhost.B EnableMetrics: !cfg.DisableMetrics, PrometheusRegisterer: cfg.PrometheusRegisterer, DisableIdentifyAddressDiscovery: cfg.DisableIdentifyAddressDiscovery, + EnableAutoNATv2: cfg.EnableAutoNATv2, + AutoNATv2Dialer: autonatv2Dialer, }) if err != nil { return nil, err @@ -315,9 +437,9 @@ func (cfg *Config) newBasicHost(swrm *swarm.Swarm, eventBus event.Bus) (*bhost.B // addresses by default. // // TODO: We shouldn't be doing this here. - oldFactory := h.AddrsFactory + originalAddrFactory := h.AddrsFactory h.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { - return oldFactory(autorelay.Filter(addrs)) + return originalAddrFactory(autorelay.Filter(addrs)) } } return h, nil @@ -347,18 +469,17 @@ func (cfg *Config) NewNode() (host.Host, error) { fx.Provide(func() event.Bus { return eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer(eventbus.WithRegisterer(cfg.PrometheusRegisterer)))) }), - fx.Provide(func(eventBus event.Bus, lifecycle fx.Lifecycle) (*swarm.Swarm, error) { - sw, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics) - if err != nil { - return nil, err - } - lifecycle.Append(fx.StopHook(sw.Close)) - return sw, nil + fx.Provide(func() crypto.PrivKey { + return cfg.PeerKey }), // Make sure the swarm constructor depends on the quicreuse.ConnManager. // That way, the ConnManager will be started before the swarm, and more importantly, // the swarm will be stopped before the ConnManager. - fx.Decorate(func(sw *swarm.Swarm, _ *quicreuse.ConnManager, lifecycle fx.Lifecycle) *swarm.Swarm { + fx.Provide(func(eventBus event.Bus, _ *quicreuse.ConnManager, lifecycle fx.Lifecycle) (*swarm.Swarm, error) { + sw, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics) + if err != nil { + return nil, err + } lifecycle.Append(fx.Hook{ OnStart: func(context.Context) error { // TODO: This method succeeds if listening on one address succeeds. We @@ -369,14 +490,13 @@ func (cfg *Config) NewNode() (host.Host, error) { return sw.Close() }, }) - return sw + return sw, nil }), fx.Provide(cfg.newBasicHost), fx.Provide(func(bh *bhost.BasicHost) host.Host { return bh }), fx.Provide(func(h *swarm.Swarm) peer.ID { return h.LocalPeer() }), - fx.Provide(func(h *swarm.Swarm) crypto.PrivKey { return h.Peerstore().PrivKey(h.LocalPeer()) }), } transportOpts, err := cfg.addTransports() if err != nil { @@ -394,26 +514,36 @@ func (cfg *Config) NewNode() (host.Host, error) { ) } - // Note: h.AddrsFactory may be changed by relayFinder, but non-relay version is - // used by AutoNAT below. - if cfg.EnableAutoRelay { - if !cfg.DisableMetrics { - mt := autorelay.WithMetricsTracer( - autorelay.NewMetricsTracer(autorelay.WithRegisterer(cfg.PrometheusRegisterer))) - mtOpts := []autorelay.Option{mt} - cfg.AutoRelayOpts = append(mtOpts, cfg.AutoRelayOpts...) - } - fxopts = append(fxopts, - fx.Invoke(func(h *bhost.BasicHost, lifecycle fx.Lifecycle) (*autorelay.AutoRelay, error) { + // originalAddrFactory is the AddrFactory before it's modified by autorelay + // we need this for checking reachability via autonat + originalAddrFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr { + return addrs + } + + // enable autorelay + fxopts = append(fxopts, + fx.Invoke(func(h *bhost.BasicHost) { + originalAddrFactory = h.AddrsFactory + }), + fx.Invoke(func(h *bhost.BasicHost, lifecycle fx.Lifecycle) error { + if cfg.EnableAutoRelay { + if !cfg.DisableMetrics { + mt := autorelay.WithMetricsTracer( + autorelay.NewMetricsTracer(autorelay.WithRegisterer(cfg.PrometheusRegisterer))) + mtOpts := []autorelay.Option{mt} + cfg.AutoRelayOpts = append(mtOpts, cfg.AutoRelayOpts...) + } + ar, err := autorelay.NewAutoRelay(h, cfg.AutoRelayOpts...) if err != nil { - return nil, err + return err } lifecycle.Append(fx.StartStopHook(ar.Start, ar.Close)) - return ar, nil - }), - ) - } + return nil + } + return nil + }), + ) var bh *bhost.BasicHost fxopts = append(fxopts, fx.Invoke(func(bho *bhost.BasicHost) { bh = bho })) @@ -431,7 +561,7 @@ func (cfg *Config) NewNode() (host.Host, error) { return nil, err } - if err := cfg.addAutoNAT(bh); err != nil { + if err := cfg.addAutoNAT(bh, originalAddrFactory); err != nil { app.Stop(context.Background()) if cfg.Routing != nil { rh.Close() @@ -447,8 +577,7 @@ func (cfg *Config) NewNode() (host.Host, error) { return &closableBasicHost{App: app, BasicHost: bh}, nil } -func (cfg *Config) addAutoNAT(h *bhost.BasicHost) error { - addrF := h.AddrsFactory +func (cfg *Config) addAutoNAT(h *bhost.BasicHost, addrF AddrsFactory) error { autonatOpts := []autonat.Option{ autonat.UsingAddresses(func() []ma.Multiaddr { return addrF(h.AllAddrs()) @@ -487,10 +616,10 @@ func (cfg *Config) addAutoNAT(h *bhost.BasicHost) error { PeerKey: autonatPrivKey, Peerstore: ps, DialRanker: swarm.NoDelayDialRanker, + ResourceManager: cfg.ResourceManager, SwarmOpts: []swarm.Option{ - // It is better to disable black hole detection and just attempt a dial for autonat - swarm.WithUDPBlackHoleConfig(false, 0, 0), - swarm.WithIPv6BlackHoleConfig(false, 0, 0), + swarm.WithUDPBlackHoleSuccessCounter(nil), + swarm.WithIPv6BlackHoleSuccessCounter(nil), }, } diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go index 0b4067941bc..8f48c7bba17 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/crypto.proto package pb @@ -229,7 +229,7 @@ func file_pb_crypto_proto_rawDescGZIP() []byte { var file_pb_crypto_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_pb_crypto_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_pb_crypto_proto_goTypes = []interface{}{ +var file_pb_crypto_proto_goTypes = []any{ (KeyType)(0), // 0: crypto.pb.KeyType (*PublicKey)(nil), // 1: crypto.pb.PublicKey (*PrivateKey)(nil), // 2: crypto.pb.PrivateKey @@ -250,7 +250,7 @@ func file_pb_crypto_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_crypto_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_crypto_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*PublicKey); i { case 0: return &v.state @@ -262,7 +262,7 @@ func file_pb_crypto_proto_init() { return nil } } - file_pb_crypto_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pb_crypto_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*PrivateKey); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/core/network/network.go b/vendor/github.com/libp2p/go-libp2p/core/network/network.go index 22efbf235d8..d2e2bc818d7 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/network/network.go +++ b/vendor/github.com/libp2p/go-libp2p/core/network/network.go @@ -194,6 +194,9 @@ type Dialer interface { // Notify/StopNotify register and unregister a notifiee for signals Notify(Notifiee) StopNotify(Notifiee) + + // CanDial returns whether the dialer can dial peer p at addr + CanDial(p peer.ID, addr ma.Multiaddr) bool } // AddrDelay provides an address along with the delay after which the address diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go index 2aa8a07aaac..9b37df65b73 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/peer_record.proto package pb @@ -174,7 +174,7 @@ func file_pb_peer_record_proto_rawDescGZIP() []byte { } var file_pb_peer_record_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_pb_peer_record_proto_goTypes = []interface{}{ +var file_pb_peer_record_proto_goTypes = []any{ (*PeerRecord)(nil), // 0: peer.pb.PeerRecord (*PeerRecord_AddressInfo)(nil), // 1: peer.pb.PeerRecord.AddressInfo } @@ -193,7 +193,7 @@ func file_pb_peer_record_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_peer_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_peer_record_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*PeerRecord); i { case 0: return &v.state @@ -205,7 +205,7 @@ func file_pb_peer_record_proto_init() { return nil } } - file_pb_peer_record_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pb_peer_record_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*PeerRecord_AddressInfo); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go index 1d3c7f25bc7..58a1be1d6ee 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/envelope.proto package pb @@ -139,7 +139,7 @@ func file_pb_envelope_proto_rawDescGZIP() []byte { } var file_pb_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pb_envelope_proto_goTypes = []interface{}{ +var file_pb_envelope_proto_goTypes = []any{ (*Envelope)(nil), // 0: record.pb.Envelope (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey } @@ -158,7 +158,7 @@ func file_pb_envelope_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_envelope_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_envelope_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Envelope); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go index 16b910b4d06..35d46a0bd1c 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/plaintext.proto package pb @@ -103,7 +103,7 @@ func file_pb_plaintext_proto_rawDescGZIP() []byte { } var file_pb_plaintext_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pb_plaintext_proto_goTypes = []interface{}{ +var file_pb_plaintext_proto_goTypes = []any{ (*Exchange)(nil), // 0: plaintext.pb.Exchange (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey } @@ -122,7 +122,7 @@ func file_pb_plaintext_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_plaintext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_plaintext_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Exchange); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/defaults.go b/vendor/github.com/libp2p/go-libp2p/defaults.go index 8f7dc946716..daf8c461c69 100644 --- a/vendor/github.com/libp2p/go-libp2p/defaults.go +++ b/vendor/github.com/libp2p/go-libp2p/defaults.go @@ -10,10 +10,12 @@ import ( rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" "github.com/libp2p/go-libp2p/p2p/muxer/yamux" "github.com/libp2p/go-libp2p/p2p/net/connmgr" + "github.com/libp2p/go-libp2p/p2p/net/swarm" "github.com/libp2p/go-libp2p/p2p/security/noise" tls "github.com/libp2p/go-libp2p/p2p/security/tls" quic "github.com/libp2p/go-libp2p/p2p/transport/quic" "github.com/libp2p/go-libp2p/p2p/transport/tcp" + libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc" ws "github.com/libp2p/go-libp2p/p2p/transport/websocket" webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport" "github.com/prometheus/client_golang/prometheus" @@ -46,6 +48,7 @@ var DefaultTransports = ChainOptions( Transport(quic.NewTransport), Transport(ws.New), Transport(webtransport.New), + Transport(libp2pwebrtc.New), ) // DefaultPrivateTransports are the default libp2p transports when a PSK is supplied. @@ -81,9 +84,11 @@ var DefaultListenAddrs = func(cfg *Config) error { "/ip4/0.0.0.0/tcp/0", "/ip4/0.0.0.0/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", + "/ip4/0.0.0.0/udp/0/webrtc-direct", "/ip6/::/tcp/0", "/ip6/::/udp/0/quic-v1", "/ip6/::/udp/0/quic-v1/webtransport", + "/ip6/::/udp/0/webrtc-direct", } listenAddrs := make([]multiaddr.Multiaddr, 0, len(addrs)) for _, s := range addrs { @@ -133,6 +138,18 @@ var DefaultPrometheusRegisterer = func(cfg *Config) error { return cfg.Apply(PrometheusRegisterer(prometheus.DefaultRegisterer)) } +var defaultUDPBlackHoleDetector = func(cfg *Config) error { + // A black hole is a binary property. On a network if UDP dials are blocked, all dials will + // fail. So a low success rate of 5 out 100 dials is good enough. + return cfg.Apply(UDPBlackHoleSuccessCounter(&swarm.BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"})) +} + +var defaultIPv6BlackHoleDetector = func(cfg *Config) error { + // A black hole is a binary property. On a network if there is no IPv6 connectivity, all + // dials will fail. So a low success rate of 5 out 100 dials is good enough. + return cfg.Apply(IPv6BlackHoleSuccessCounter(&swarm.BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"})) +} + // Complete list of default options and when to fallback on them. // // Please *DON'T* specify default options any other way. Putting this all here @@ -189,6 +206,18 @@ var defaults = []struct { fallback: func(cfg *Config) bool { return !cfg.DisableMetrics && cfg.PrometheusRegisterer == nil }, opt: DefaultPrometheusRegisterer, }, + { + fallback: func(cfg *Config) bool { + return !cfg.CustomUDPBlackHoleSuccessCounter && cfg.UDPBlackHoleSuccessCounter == nil + }, + opt: defaultUDPBlackHoleDetector, + }, + { + fallback: func(cfg *Config) bool { + return !cfg.CustomIPv6BlackHoleSuccessCounter && cfg.IPv6BlackHoleSuccessCounter == nil + }, + opt: defaultIPv6BlackHoleDetector, + }, } // Defaults configures libp2p to use the default options. Can be combined with diff --git a/vendor/github.com/libp2p/go-libp2p/options.go b/vendor/github.com/libp2p/go-libp2p/options.go index de95251ad3f..1a8bc5dd550 100644 --- a/vendor/github.com/libp2p/go-libp2p/options.go +++ b/vendor/github.com/libp2p/go-libp2p/options.go @@ -609,3 +609,29 @@ func DisableIdentifyAddressDiscovery() Option { return nil } } + +// EnableAutoNATv2 enables autonat v2 +func EnableAutoNATv2() Option { + return func(cfg *Config) error { + cfg.EnableAutoNATv2 = true + return nil + } +} + +// UDPBlackHoleSuccessCounter configures libp2p to use f as the black hole filter for UDP addrs +func UDPBlackHoleSuccessCounter(f *swarm.BlackHoleSuccessCounter) Option { + return func(cfg *Config) error { + cfg.UDPBlackHoleSuccessCounter = f + cfg.CustomUDPBlackHoleSuccessCounter = true + return nil + } +} + +// IPv6BlackHoleSuccessCounter configures libp2p to use f as the black hole filter for IPv6 addrs +func IPv6BlackHoleSuccessCounter(f *swarm.BlackHoleSuccessCounter) Option { + return func(cfg *Config) error { + cfg.IPv6BlackHoleSuccessCounter = f + cfg.CustomIPv6BlackHoleSuccessCounter = true + return nil + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go index 2764883f445..41df2cb7eac 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/autonat.proto package pb @@ -426,7 +426,7 @@ func file_pb_autonat_proto_rawDescGZIP() []byte { var file_pb_autonat_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_pb_autonat_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_pb_autonat_proto_goTypes = []interface{}{ +var file_pb_autonat_proto_goTypes = []any{ (Message_MessageType)(0), // 0: autonat.pb.Message.MessageType (Message_ResponseStatus)(0), // 1: autonat.pb.Message.ResponseStatus (*Message)(nil), // 2: autonat.pb.Message @@ -453,7 +453,7 @@ func file_pb_autonat_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_autonat_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_autonat_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -465,7 +465,7 @@ func file_pb_autonat_proto_init() { return nil } } - file_pb_autonat_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pb_autonat_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Message_PeerInfo); i { case 0: return &v.state @@ -477,7 +477,7 @@ func file_pb_autonat_proto_init() { return nil } } - file_pb_autonat_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_pb_autonat_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Message_Dial); i { case 0: return &v.state @@ -489,7 +489,7 @@ func file_pb_autonat_proto_init() { return nil } } - file_pb_autonat_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_pb_autonat_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Message_DialResponse); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go index 8fc808e6b62..7b7f8855fb3 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go @@ -24,6 +24,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/host/eventbus" "github.com/libp2p/go-libp2p/p2p/host/pstoremanager" "github.com/libp2p/go-libp2p/p2p/host/relaysvc" + "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2" relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" "github.com/libp2p/go-libp2p/p2p/protocol/holepunch" "github.com/libp2p/go-libp2p/p2p/protocol/identify" @@ -105,6 +106,8 @@ type BasicHost struct { caBook peerstore.CertifiedAddrBook autoNat autonat.AutoNAT + + autonatv2 *autonatv2.AutoNAT } var _ host.Host = (*BasicHost)(nil) @@ -167,6 +170,8 @@ type HostOpts struct { // DisableIdentifyAddressDiscovery disables address discovery using peer provided observed addresses in identify DisableIdentifyAddressDiscovery bool + EnableAutoNATv2 bool + AutoNATv2Dialer host.Host } // NewHost constructs a new *BasicHost and activates it by attaching its stream and connection handlers to the given inet.Network. @@ -279,6 +284,20 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { if opts.AddrsFactory != nil { h.AddrsFactory = opts.AddrsFactory } + // This is a terrible hack. + // We want to use this AddrsFactory for autonat. Wrapping AddrsFactory here ensures + // that autonat receives addresses with the correct certhashes. + // + // This logic cannot be in Addrs method as autonat cannot use the Addrs method directly. + // The autorelay package updates AddrsFactory to only provide p2p-circuit addresses when + // reachability is Private. + // + // Wrapping it here allows us to provide the wrapped AddrsFactory to autonat before + // autorelay updates it. + addrFactory := h.AddrsFactory + h.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { + return h.addCertHashes(addrFactory(addrs)) + } if opts.NATManager != nil { h.natmgr = opts.NATManager(n) @@ -310,6 +329,17 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { h.pings = ping.NewPingService(h) } + if opts.EnableAutoNATv2 { + var mt autonatv2.MetricsTracer + if opts.EnableMetrics { + mt = autonatv2.NewMetricsTracer(opts.PrometheusRegisterer) + } + h.autonatv2, err = autonatv2.New(h, opts.AutoNATv2Dialer, autonatv2.WithMetricsTracer(mt)) + if err != nil { + return nil, fmt.Errorf("failed to create autonatv2: %w", err) + } + } + n.SetStreamHandler(h.newStreamHandler) // register to be notified when the network's listen addrs change, @@ -398,6 +428,12 @@ func (h *BasicHost) Start() { h.psManager.Start() h.refCount.Add(1) h.ids.Start() + if h.autonatv2 != nil { + err := h.autonatv2.Start() + if err != nil { + log.Errorf("autonat v2 failed to start: %s", err) + } + } go h.background() } @@ -640,7 +676,7 @@ func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) { // header with given protocol.ID. If there is no connection to p, attempts // to create one. If ProtocolID is "", writes no header. // (Thread-safe) -func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { +func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (str network.Stream, strErr error) { // If the caller wants to prevent the host from dialing, it should use the NoDial option. if nodial, _ := network.GetNoDial(ctx); !nodial { err := h.Connect(ctx, peer.AddrInfo{ID: p}) @@ -658,6 +694,11 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I } return nil, fmt.Errorf("failed to open stream: %w", err) } + defer func() { + if strErr != nil && s != nil { + s.Reset() + } + }() // Wait for any in-progress identifies on the connection to finish. This // is faster than negotiating. @@ -667,13 +708,11 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I select { case <-h.ids.IdentifyWait(s.Conn()): case <-ctx.Done(): - _ = s.Reset() return nil, fmt.Errorf("identify failed to complete: %w", ctx.Err()) } pref, err := h.preferredProtocol(p, pids) if err != nil { - _ = s.Reset() return nil, err } @@ -698,7 +737,6 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I select { case err = <-errCh: if err != nil { - s.Reset() return nil, fmt.Errorf("failed to negotiate protocol: %w", err) } case <-ctx.Done(): @@ -708,8 +746,10 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I return nil, fmt.Errorf("failed to negotiate protocol: %w", ctx.Err()) } - s.SetProtocol(selected) - h.Peerstore().AddProtocols(p, selected) + if err := s.SetProtocol(selected); err != nil { + return nil, err + } + _ = h.Peerstore().AddProtocols(p, selected) // adding the protocol to the peerstore isn't critical return s, nil } @@ -778,47 +818,13 @@ func (h *BasicHost) ConnManager() connmgr.ConnManager { // Addrs returns listening addresses that are safe to announce to the network. // The output is the same as AllAddrs, but processed by AddrsFactory. func (h *BasicHost) Addrs() []ma.Multiaddr { - // This is a temporary workaround/hack that fixes #2233. Once we have a - // proper address pipeline, rework this. See the issue for more context. - type transportForListeninger interface { - TransportForListening(a ma.Multiaddr) transport.Transport - } - - type addCertHasher interface { - AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool) - } - + // We don't need to append certhashes here, the user provided addrsFactory was + // wrapped with addCertHashes in the constructor. addrs := h.AddrsFactory(h.AllAddrs()) - - s, ok := h.Network().(transportForListeninger) - if !ok { - return addrs - } - - // Copy addrs slice since we'll be modifying it. - addrsOld := addrs - addrs = make([]ma.Multiaddr, len(addrsOld)) - copy(addrs, addrsOld) - - for i, addr := range addrs { - wtOK, wtN := libp2pwebtransport.IsWebtransportMultiaddr(addr) - webrtcOK, webrtcN := libp2pwebrtc.IsWebRTCDirectMultiaddr(addr) - if (wtOK && wtN == 0) || (webrtcOK && webrtcN == 0) { - t := s.TransportForListening(addr) - tpt, ok := t.(addCertHasher) - if !ok { - continue - } - addrWithCerthash, added := tpt.AddCertHashes(addr) - if !added { - log.Debugf("Couldn't add certhashes to multiaddr: %s", addr) - continue - } - addrs[i] = addrWithCerthash - } - } - - return addrs + // Make a copy. Consumers can modify the slice elements + res := make([]ma.Multiaddr, len(addrs)) + copy(res, addrs) + return res } // NormalizeMultiaddr returns a multiaddr suitable for equality checks. @@ -838,8 +844,9 @@ func (h *BasicHost) NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr { return addr } -// AllAddrs returns all the addresses of BasicHost at this moment in time. -// It's ok to not include addresses if they're not available to be used now. +// AllAddrs returns all the addresses the host is listening on except circuit addresses. +// The output has webtransport addresses inferred from quic addresses. +// All the addresses have the correct func (h *BasicHost) AllAddrs() []ma.Multiaddr { listenAddrs := h.Network().ListenAddresses() if len(listenAddrs) == 0 { @@ -932,82 +939,48 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { finalAddrs = append(finalAddrs, observedAddrs...) } finalAddrs = ma.Unique(finalAddrs) - finalAddrs = inferWebtransportAddrsFromQuic(finalAddrs) - return finalAddrs } -var wtComponent = ma.StringCast("/webtransport") - -// inferWebtransportAddrsFromQuic infers more webtransport addresses from QUIC addresses. -// This is useful when we discover our public QUIC address, but haven't discovered our public WebTransport addrs. -// If we see that we are listening on the same port for QUIC and WebTransport, -// we can be pretty sure that the WebTransport addr will be reachable if the -// QUIC one is. -// We assume the input is deduped. -func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr { - // We need to check if we are listening on the same ip+port for QUIC and WebTransport. - // If not, there's nothing to do since we can't infer anything. - - // Count the number of QUIC addrs, this will let us allocate just once at the beginning. - quicAddrCount := 0 - for _, addr := range in { - if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { - quicAddrCount++ - } +func (h *BasicHost) addCertHashes(addrs []ma.Multiaddr) []ma.Multiaddr { + // This is a temporary workaround/hack that fixes #2233. Once we have a + // proper address pipeline, rework this. See the issue for more context. + type transportForListeninger interface { + TransportForListening(a ma.Multiaddr) transport.Transport } - quicOrWebtransportAddrs := make(map[string]struct{}, quicAddrCount) - webtransportAddrs := make(map[string]struct{}, quicAddrCount) - foundSameListeningAddr := false - for _, addr := range in { - isWebtransport, numCertHashes := libp2pwebtransport.IsWebtransportMultiaddr(addr) - if isWebtransport { - for i := 0; i < numCertHashes; i++ { - // Remove certhashes - addr, _ = ma.SplitLast(addr) - } - webtransportAddrs[string(addr.Bytes())] = struct{}{} - // Remove webtransport component, now it's a multiaddr that ends in /quic-v1 - addr, _ = ma.SplitLast(addr) - } - if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { - bytes := addr.Bytes() - if _, ok := quicOrWebtransportAddrs[string(bytes)]; ok { - foundSameListeningAddr = true - } else { - quicOrWebtransportAddrs[string(bytes)] = struct{}{} - } - } + type addCertHasher interface { + AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool) } - if !foundSameListeningAddr { - return in + s, ok := h.Network().(transportForListeninger) + if !ok { + return addrs } - if len(webtransportAddrs) == 0 { - // No webtransport addresses, we aren't listening on any webtransport - // address, so we shouldn't add any. - return in - } + // Copy addrs slice since we'll be modifying it. + addrsOld := addrs + addrs = make([]ma.Multiaddr, len(addrsOld)) + copy(addrs, addrsOld) - out := make([]ma.Multiaddr, 0, len(in)+(quicAddrCount-len(webtransportAddrs))) - for _, addr := range in { - // Add all the original addresses - out = append(out, addr) - if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { - // Convert quic to webtransport - addr = addr.Encapsulate(wtComponent) - if _, ok := webtransportAddrs[string(addr.Bytes())]; ok { - // We already have this address + for i, addr := range addrs { + wtOK, wtN := libp2pwebtransport.IsWebtransportMultiaddr(addr) + webrtcOK, webrtcN := libp2pwebrtc.IsWebRTCDirectMultiaddr(addr) + if (wtOK && wtN == 0) || (webrtcOK && webrtcN == 0) { + t := s.TransportForListening(addr) + tpt, ok := t.(addCertHasher) + if !ok { + continue + } + addrWithCerthash, added := tpt.AddCertHashes(addr) + if !added { + log.Debugf("Couldn't add certhashes to multiaddr: %s", addr) continue } - // Add the new inferred address - out = append(out, addr) + addrs[i] = addrWithCerthash } } - - return out + return addrs } func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr { @@ -1100,10 +1073,17 @@ func (h *BasicHost) Close() error { if h.hps != nil { h.hps.Close() } + if h.autonatv2 != nil { + h.autonatv2.Close() + } _ = h.emitters.evtLocalProtocolsUpdated.Close() _ = h.emitters.evtLocalAddrsUpdated.Close() + if err := h.network.Close(); err != nil { + log.Errorf("swarm close failed: %v", err) + } + h.psManager.Close() if h.Peerstore() != nil { h.Peerstore().Close() diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go new file mode 100644 index 00000000000..0cf6642f692 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go @@ -0,0 +1,233 @@ +package blankhost + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" + "github.com/libp2p/go-libp2p/core/record" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + + logging "github.com/ipfs/go-log/v2" + + ma "github.com/multiformats/go-multiaddr" + mstream "github.com/multiformats/go-multistream" +) + +var log = logging.Logger("blankhost") + +// BlankHost is the thinnest implementation of the host.Host interface +type BlankHost struct { + n network.Network + mux *mstream.MultistreamMuxer[protocol.ID] + cmgr connmgr.ConnManager + eventbus event.Bus + emitters struct { + evtLocalProtocolsUpdated event.Emitter + } +} + +type config struct { + cmgr connmgr.ConnManager + eventBus event.Bus +} + +type Option = func(cfg *config) + +func WithConnectionManager(cmgr connmgr.ConnManager) Option { + return func(cfg *config) { + cfg.cmgr = cmgr + } +} + +func WithEventBus(eventBus event.Bus) Option { + return func(cfg *config) { + cfg.eventBus = eventBus + } +} + +func NewBlankHost(n network.Network, options ...Option) *BlankHost { + cfg := config{ + cmgr: &connmgr.NullConnMgr{}, + } + for _, opt := range options { + opt(&cfg) + } + + bh := &BlankHost{ + n: n, + cmgr: cfg.cmgr, + mux: mstream.NewMultistreamMuxer[protocol.ID](), + eventbus: cfg.eventBus, + } + if bh.eventbus == nil { + bh.eventbus = eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer())) + } + + // subscribe the connection manager to network notifications (has no effect with NullConnMgr) + n.Notify(bh.cmgr.Notifee()) + + var err error + if bh.emitters.evtLocalProtocolsUpdated, err = bh.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil { + return nil + } + + n.SetStreamHandler(bh.newStreamHandler) + + // persist a signed peer record for self to the peerstore. + if err := bh.initSignedRecord(); err != nil { + log.Errorf("error creating blank host, err=%s", err) + return nil + } + + return bh +} + +func (bh *BlankHost) initSignedRecord() error { + cab, ok := peerstore.GetCertifiedAddrBook(bh.n.Peerstore()) + if !ok { + log.Error("peerstore does not support signed records") + return errors.New("peerstore does not support signed records") + } + rec := peer.PeerRecordFromAddrInfo(peer.AddrInfo{ID: bh.ID(), Addrs: bh.Addrs()}) + ev, err := record.Seal(rec, bh.Peerstore().PrivKey(bh.ID())) + if err != nil { + log.Errorf("failed to create signed record for self, err=%s", err) + return fmt.Errorf("failed to create signed record for self, err=%s", err) + } + _, err = cab.ConsumePeerRecord(ev, peerstore.PermanentAddrTTL) + if err != nil { + log.Errorf("failed to persist signed record to peerstore,err=%s", err) + return fmt.Errorf("failed to persist signed record for self, err=%s", err) + } + return err +} + +var _ host.Host = (*BlankHost)(nil) + +func (bh *BlankHost) Addrs() []ma.Multiaddr { + addrs, err := bh.n.InterfaceListenAddresses() + if err != nil { + log.Debug("error retrieving network interface addrs: ", err) + return nil + } + + return addrs +} + +func (bh *BlankHost) Close() error { + return bh.n.Close() +} + +func (bh *BlankHost) Connect(ctx context.Context, ai peer.AddrInfo) error { + // absorb addresses into peerstore + bh.Peerstore().AddAddrs(ai.ID, ai.Addrs, peerstore.TempAddrTTL) + + cs := bh.n.ConnsToPeer(ai.ID) + if len(cs) > 0 { + return nil + } + + _, err := bh.Network().DialPeer(ctx, ai.ID) + if err != nil { + return fmt.Errorf("failed to dial: %w", err) + } + return err +} + +func (bh *BlankHost) Peerstore() peerstore.Peerstore { + return bh.n.Peerstore() +} + +func (bh *BlankHost) ID() peer.ID { + return bh.n.LocalPeer() +} + +func (bh *BlankHost) NewStream(ctx context.Context, p peer.ID, protos ...protocol.ID) (network.Stream, error) { + s, err := bh.n.NewStream(ctx, p) + if err != nil { + return nil, fmt.Errorf("failed to open stream: %w", err) + } + + selected, err := mstream.SelectOneOf(protos, s) + if err != nil { + s.Reset() + return nil, fmt.Errorf("failed to negotiate protocol: %w", err) + } + + s.SetProtocol(selected) + bh.Peerstore().AddProtocols(p, selected) + + return s, nil +} + +func (bh *BlankHost) RemoveStreamHandler(pid protocol.ID) { + bh.Mux().RemoveHandler(pid) + bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{ + Removed: []protocol.ID{pid}, + }) +} + +func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) { + bh.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error { + is := rwc.(network.Stream) + is.SetProtocol(p) + handler(is) + return nil + }) + bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{ + Added: []protocol.ID{pid}, + }) +} + +func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) { + bh.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error { + is := rwc.(network.Stream) + is.SetProtocol(p) + handler(is) + return nil + }) + bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{ + Added: []protocol.ID{pid}, + }) +} + +// newStreamHandler is the remote-opened stream handler for network.Network +func (bh *BlankHost) newStreamHandler(s network.Stream) { + protoID, handle, err := bh.Mux().Negotiate(s) + if err != nil { + log.Infow("protocol negotiation failed", "error", err) + s.Reset() + return + } + + s.SetProtocol(protoID) + + handle(protoID, s) +} + +// TODO: i'm not sure this really needs to be here +func (bh *BlankHost) Mux() protocol.Switch { + return bh.mux +} + +// TODO: also not sure this fits... Might be better ways around this (leaky abstractions) +func (bh *BlankHost) Network() network.Network { + return bh.n +} + +func (bh *BlankHost) ConnManager() connmgr.ConnManager { + return bh.cmgr +} + +func (bh *BlankHost) EventBus() event.Bus { + return bh.eventbus +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go index b07016ce849..5fd8a114a59 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/conn.go @@ -5,6 +5,9 @@ import ma "github.com/multiformats/go-multiaddr" var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBRTC_DIRECT, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP} func GetTransport(a ma.Multiaddr) string { + if a == nil { + return "other" + } for _, t := range transports { if _, err := a.ValueForProtocol(t); err == nil { return ma.ProtocolWithCode(t).Name @@ -15,15 +18,17 @@ func GetTransport(a ma.Multiaddr) string { func GetIPVersion(addr ma.Multiaddr) string { version := "unknown" + if addr == nil { + return version + } ma.ForEach(addr, func(c ma.Component) bool { - if c.Protocol().Code == ma.P_IP4 { + switch c.Protocol().Code { + case ma.P_IP4, ma.P_DNS4: version = "ip4" - return false - } else if c.Protocol().Code == ma.P_IP6 { + case ma.P_IP6, ma.P_DNS6: version = "ip6" - return false } - return true + return false }) return version } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/black_hole_detector.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/black_hole_detector.go index dd7849eea67..54782c1c01f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/black_hole_detector.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/black_hole_detector.go @@ -29,35 +29,26 @@ func (st blackHoleState) String() string { } } -type blackHoleResult int - -const ( - blackHoleResultAllowed blackHoleResult = iota - blackHoleResultProbing - blackHoleResultBlocked -) - -// blackHoleFilter provides black hole filtering for dials. This filter should be used in -// concert with a UDP of IPv6 address filter to detect UDP or IPv6 black hole. In a black -// holed environments dial requests are blocked and only periodic probes to check the -// state of the black hole are allowed. -// -// Requests are blocked if the number of successes in the last n dials is less than -// minSuccesses. If a request succeeds in Blocked state, the filter state is reset and n -// subsequent requests are allowed before reevaluating black hole state. Dials cancelled -// when some other concurrent dial succeeded are counted as failures. A sufficiently large -// n prevents false negatives in such cases. -type blackHoleFilter struct { - // n serves the dual purpose of being the minimum number of requests after which we - // probe the state of the black hole in blocked state and the minimum number of - // completed dials required before evaluating black hole state. - n int - // minSuccesses is the minimum number of Success required in the last n dials +// BlackHoleSuccessCounter provides black hole filtering for dials. This filter should be used in concert +// with a UDP or IPv6 address filter to detect UDP or IPv6 black hole. In a black holed environment, +// dial requests are refused Requests are blocked if the number of successes in the last N dials is +// less than MinSuccesses. +// If a request succeeds in Blocked state, the filter state is reset and N subsequent requests are +// allowed before reevaluating black hole state. Dials cancelled when some other concurrent dial +// succeeded are counted as failures. A sufficiently large N prevents false negatives in such cases. +type BlackHoleSuccessCounter struct { + // N is + // 1. The minimum number of completed dials required before evaluating black hole state + // 2. the minimum number of requests after which we probe the state of the black hole in + // blocked state + N int + // MinSuccesses is the minimum number of Success required in the last n dials // to consider we are not blocked. - minSuccesses int - // name for the detector. - name string + MinSuccesses int + // Name for the detector. + Name string + mu sync.Mutex // requests counts number of dial requests to peers. We handle request at a peer // level and record results at individual address dial level. requests int @@ -67,22 +58,19 @@ type blackHoleFilter struct { successes int // state is the current state of the detector state blackHoleState - - mu sync.Mutex - metricsTracer MetricsTracer } -// RecordResult records the outcome of a dial. A successful dial will change the state -// of the filter to Allowed. A failed dial only blocks subsequent requests if the success +// RecordResult records the outcome of a dial. A successful dial in Blocked state will change the +// state of the filter to Probing. A failed dial only blocks subsequent requests if the success // fraction over the last n outcomes is less than the minSuccessFraction of the filter. -func (b *blackHoleFilter) RecordResult(success bool) { +func (b *BlackHoleSuccessCounter) RecordResult(success bool) { b.mu.Lock() defer b.mu.Unlock() if b.state == blackHoleStateBlocked && success { // If the call succeeds in a blocked state we reset to allowed. // This is better than slowly accumulating values till we cross the minSuccessFraction - // threshold since a blackhole is a binary property. + // threshold since a black hole is a binary property. b.reset() return } @@ -92,7 +80,7 @@ func (b *blackHoleFilter) RecordResult(success bool) { } b.dialResults = append(b.dialResults, success) - if len(b.dialResults) > b.n { + if len(b.dialResults) > b.N { if b.dialResults[0] { b.successes-- } @@ -100,58 +88,68 @@ func (b *blackHoleFilter) RecordResult(success bool) { } b.updateState() - b.trackMetrics() } // HandleRequest returns the result of applying the black hole filter for the request. -func (b *blackHoleFilter) HandleRequest() blackHoleResult { +func (b *BlackHoleSuccessCounter) HandleRequest() blackHoleState { b.mu.Lock() defer b.mu.Unlock() b.requests++ - b.trackMetrics() - if b.state == blackHoleStateAllowed { - return blackHoleResultAllowed - } else if b.state == blackHoleStateProbing || b.requests%b.n == 0 { - return blackHoleResultProbing + return blackHoleStateAllowed + } else if b.state == blackHoleStateProbing || b.requests%b.N == 0 { + return blackHoleStateProbing } else { - return blackHoleResultBlocked + return blackHoleStateBlocked } } -func (b *blackHoleFilter) reset() { +func (b *BlackHoleSuccessCounter) reset() { b.successes = 0 b.dialResults = b.dialResults[:0] b.requests = 0 b.updateState() } -func (b *blackHoleFilter) updateState() { +func (b *BlackHoleSuccessCounter) updateState() { st := b.state - if len(b.dialResults) < b.n { + if len(b.dialResults) < b.N { b.state = blackHoleStateProbing - } else if b.successes >= b.minSuccesses { + } else if b.successes >= b.MinSuccesses { b.state = blackHoleStateAllowed } else { b.state = blackHoleStateBlocked } if st != b.state { - log.Debugf("%s blackHoleDetector state changed from %s to %s", b.name, st, b.state) + log.Debugf("%s blackHoleDetector state changed from %s to %s", b.Name, st, b.state) } } -func (b *blackHoleFilter) trackMetrics() { - if b.metricsTracer == nil { - return - } +func (b *BlackHoleSuccessCounter) State() blackHoleState { + b.mu.Lock() + defer b.mu.Unlock() + + return b.state +} - nextRequestAllowedAfter := 0 +type blackHoleInfo struct { + name string + state blackHoleState + nextProbeAfter int + successFraction float64 +} + +func (b *BlackHoleSuccessCounter) info() blackHoleInfo { + b.mu.Lock() + defer b.mu.Unlock() + + nextProbeAfter := 0 if b.state == blackHoleStateBlocked { - nextRequestAllowedAfter = b.n - (b.requests % b.n) + nextProbeAfter = b.N - (b.requests % b.N) } successFraction := 0.0 @@ -159,22 +157,27 @@ func (b *blackHoleFilter) trackMetrics() { successFraction = float64(b.successes) / float64(len(b.dialResults)) } - b.metricsTracer.UpdatedBlackHoleFilterState( - b.name, - b.state, - nextRequestAllowedAfter, - successFraction, - ) + return blackHoleInfo{ + name: b.Name, + state: b.state, + nextProbeAfter: nextProbeAfter, + successFraction: successFraction, + } } -// blackHoleDetector provides UDP and IPv6 black hole detection using a `blackHoleFilter` -// for each. For details of the black hole detection logic see `blackHoleFilter`. +// blackHoleDetector provides UDP and IPv6 black hole detection using a `BlackHoleSuccessCounter` for each. +// For details of the black hole detection logic see `BlackHoleSuccessCounter`. +// In Read Only mode, detector doesn't update the state of underlying filters and refuses requests +// when black hole state is unknown. This is useful for Swarms made specifically for services like +// AutoNAT where we care about accurately reporting the reachability of a peer. // -// black hole filtering is done at a peer dial level to ensure that periodic probes to -// detect change of the black hole state are actually dialed and are not skipped -// because of dial prioritisation logic. +// Black hole filtering is done at a peer dial level to ensure that periodic probes to detect change +// of the black hole state are actually dialed and are not skipped because of dial prioritisation +// logic. type blackHoleDetector struct { - udp, ipv6 *blackHoleFilter + udp, ipv6 *BlackHoleSuccessCounter + mt MetricsTracer + readOnly bool } // FilterAddrs filters the peer's addresses removing black holed addresses @@ -192,14 +195,16 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multia } } - udpRes := blackHoleResultAllowed + udpRes := blackHoleStateAllowed if d.udp != nil && hasUDP { - udpRes = d.udp.HandleRequest() + udpRes = d.getFilterState(d.udp) + d.trackMetrics(d.udp) } - ipv6Res := blackHoleResultAllowed + ipv6Res := blackHoleStateAllowed if d.ipv6 != nil && hasIPv6 { - ipv6Res = d.ipv6.HandleRequest() + ipv6Res = d.getFilterState(d.ipv6) + d.trackMetrics(d.ipv6) } blackHoled = make([]ma.Multiaddr, 0, len(addrs)) @@ -210,19 +215,19 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multia return true } // allow all UDP addresses while probing irrespective of IPv6 black hole state - if udpRes == blackHoleResultProbing && isProtocolAddr(a, ma.P_UDP) { + if udpRes == blackHoleStateProbing && isProtocolAddr(a, ma.P_UDP) { return true } // allow all IPv6 addresses while probing irrespective of UDP black hole state - if ipv6Res == blackHoleResultProbing && isProtocolAddr(a, ma.P_IP6) { + if ipv6Res == blackHoleStateProbing && isProtocolAddr(a, ma.P_IP6) { return true } - if udpRes == blackHoleResultBlocked && isProtocolAddr(a, ma.P_UDP) { + if udpRes == blackHoleStateBlocked && isProtocolAddr(a, ma.P_UDP) { blackHoled = append(blackHoled, a) return false } - if ipv6Res == blackHoleResultBlocked && isProtocolAddr(a, ma.P_IP6) { + if ipv6Res == blackHoleStateBlocked && isProtocolAddr(a, ma.P_IP6) { blackHoled = append(blackHoled, a) return false } @@ -231,49 +236,36 @@ func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multia ), blackHoled } -// RecordResult updates the state of the relevant `blackHoleFilter`s for addr +// RecordResult updates the state of the relevant BlackHoleSuccessCounters for addr func (d *blackHoleDetector) RecordResult(addr ma.Multiaddr, success bool) { - if !manet.IsPublicAddr(addr) { + if d.readOnly || !manet.IsPublicAddr(addr) { return } if d.udp != nil && isProtocolAddr(addr, ma.P_UDP) { d.udp.RecordResult(success) + d.trackMetrics(d.udp) } if d.ipv6 != nil && isProtocolAddr(addr, ma.P_IP6) { d.ipv6.RecordResult(success) + d.trackMetrics(d.ipv6) } } -// blackHoleConfig is the config used for black hole detection -type blackHoleConfig struct { - // Enabled enables black hole detection - Enabled bool - // N is the size of the sliding window used to evaluate black hole state - N int - // MinSuccesses is the minimum number of successes out of N required to not - // block requests - MinSuccesses int -} - -func newBlackHoleDetector(udpConfig, ipv6Config blackHoleConfig, mt MetricsTracer) *blackHoleDetector { - d := &blackHoleDetector{} - - if udpConfig.Enabled { - d.udp = &blackHoleFilter{ - n: udpConfig.N, - minSuccesses: udpConfig.MinSuccesses, - name: "UDP", - metricsTracer: mt, +func (d *blackHoleDetector) getFilterState(f *BlackHoleSuccessCounter) blackHoleState { + if d.readOnly { + if f.State() != blackHoleStateAllowed { + return blackHoleStateBlocked } + return blackHoleStateAllowed } + return f.HandleRequest() +} - if ipv6Config.Enabled { - d.ipv6 = &blackHoleFilter{ - n: ipv6Config.N, - minSuccesses: ipv6Config.MinSuccesses, - name: "IPv6", - metricsTracer: mt, - } +func (d *blackHoleDetector) trackMetrics(f *BlackHoleSuccessCounter) { + if d.readOnly || d.mt == nil { + return } - return d + // Track metrics only in non readOnly state + info := f.info() + d.mt.UpdatedBlackHoleSuccessCounter(info.name, info.state, info.nextProbeAfter, info.successFraction) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go index 0cac6e4fa39..360a99e2ab1 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go @@ -254,7 +254,7 @@ loop: if simConnect, _, _ := network.GetSimultaneousConnect(ad.ctx); !simConnect { ad.ctx = network.WithSimultaneousConnect(ad.ctx, isClient, reason) // update the element in dq to use the simultaneous connect delay. - dq.Add(network.AddrDelay{ + dq.UpdateOrAdd(network.AddrDelay{ Addr: ad.addr, Delay: addrDelay[string(ad.addr.Bytes())], }) @@ -436,12 +436,31 @@ type dialQueue struct { // newDialQueue returns a new dialQueue func newDialQueue() *dialQueue { - return &dialQueue{q: make([]network.AddrDelay, 0, 16)} + return &dialQueue{ + q: make([]network.AddrDelay, 0, 16), + } } -// Add adds adelay to the queue. If another element exists in the queue with -// the same address, it replaces that element. +// Add adds a new element to the dialQueue. To update an element use UpdateOrAdd. func (dq *dialQueue) Add(adelay network.AddrDelay) { + for i := dq.Len() - 1; i >= 0; i-- { + if dq.q[i].Delay <= adelay.Delay { + // insert at pos i+1 + dq.q = append(dq.q, network.AddrDelay{}) // extend the slice + copy(dq.q[i+2:], dq.q[i+1:]) + dq.q[i+1] = adelay + return + } + } + // insert at position 0 + dq.q = append(dq.q, network.AddrDelay{}) // extend the slice + copy(dq.q[1:], dq.q[0:]) + dq.q[0] = adelay +} + +// UpdateOrAdd updates the elements with address adelay.Addr to the new delay +// Useful when hole punching +func (dq *dialQueue) UpdateOrAdd(adelay network.AddrDelay) { for i := 0; i < dq.Len(); i++ { if dq.q[i].Addr.Equal(adelay.Addr) { if dq.q[i].Delay == adelay.Delay { @@ -451,19 +470,9 @@ func (dq *dialQueue) Add(adelay network.AddrDelay) { // remove the element copy(dq.q[i:], dq.q[i+1:]) dq.q = dq.q[:len(dq.q)-1] - break - } - } - - for i := 0; i < dq.Len(); i++ { - if dq.q[i].Delay > adelay.Delay { - dq.q = append(dq.q, network.AddrDelay{}) // extend the slice - copy(dq.q[i+1:], dq.q[i:]) - dq.q[i] = adelay - return } } - dq.q = append(dq.q, adelay) + dq.Add(adelay) } // NextBatch returns all the elements in the queue with the highest priority diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go index 7897277cc70..01275555522 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go @@ -32,6 +32,8 @@ const ( // This includes the time between dialing the raw network connection, // protocol selection as well the handshake, if applicable. defaultDialTimeoutLocal = 5 * time.Second + + defaultNewStreamTimeout = 15 * time.Second ) var log = logging.Logger("swarm2") @@ -112,22 +114,33 @@ func WithDialRanker(d network.DialRanker) Option { } } -// WithUDPBlackHoleConfig configures swarm to use c as the config for UDP black hole detection +// WithUDPBlackHoleSuccessCounter configures swarm to use the provided config for UDP black hole detection // n is the size of the sliding window used to evaluate black hole state // min is the minimum number of successes out of n required to not block requests -func WithUDPBlackHoleConfig(enabled bool, n, min int) Option { +func WithUDPBlackHoleSuccessCounter(f *BlackHoleSuccessCounter) Option { return func(s *Swarm) error { - s.udpBlackHoleConfig = blackHoleConfig{Enabled: enabled, N: n, MinSuccesses: min} + s.udpBHF = f return nil } } -// WithIPv6BlackHoleConfig configures swarm to use c as the config for IPv6 black hole detection +// WithIPv6BlackHoleSuccessCounter configures swarm to use the provided config for IPv6 black hole detection // n is the size of the sliding window used to evaluate black hole state // min is the minimum number of successes out of n required to not block requests -func WithIPv6BlackHoleConfig(enabled bool, n, min int) Option { +func WithIPv6BlackHoleSuccessCounter(f *BlackHoleSuccessCounter) Option { return func(s *Swarm) error { - s.ipv6BlackHoleConfig = blackHoleConfig{Enabled: enabled, N: n, MinSuccesses: min} + s.ipv6BHF = f + return nil + } +} + +// WithReadOnlyBlackHoleDetector configures the swarm to use the black hole detector in +// read only mode. In Read Only mode dial requests are refused in unknown state and +// no updates to the detector state are made. This is useful for services like AutoNAT that +// care about accurately providing reachability info. +func WithReadOnlyBlackHoleDetector() Option { + return func(s *Swarm) error { + s.readOnlyBHD = true return nil } } @@ -203,10 +216,11 @@ type Swarm struct { dialRanker network.DialRanker - udpBlackHoleConfig blackHoleConfig - ipv6BlackHoleConfig blackHoleConfig - bhd *blackHoleDetector connectednessEventEmitter *connectednessEventEmitter + udpBHF *BlackHoleSuccessCounter + ipv6BHF *BlackHoleSuccessCounter + bhd *blackHoleDetector + readOnlyBHD bool } // NewSwarm constructs a Swarm. @@ -230,8 +244,8 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts // A black hole is a binary property. On a network if UDP dials are blocked or there is // no IPv6 connectivity, all dials will fail. So a low success rate of 5 out 100 dials // is good enough. - udpBlackHoleConfig: blackHoleConfig{Enabled: true, N: 100, MinSuccesses: 5}, - ipv6BlackHoleConfig: blackHoleConfig{Enabled: true, N: 100, MinSuccesses: 5}, + udpBHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"}, + ipv6BHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"}, } s.conns.m = make(map[peer.ID][]*Conn) @@ -255,7 +269,12 @@ func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts s.limiter = newDialLimiter(s.dialAddr) s.backf.init(s.ctx) - s.bhd = newBlackHoleDetector(s.udpBlackHoleConfig, s.ipv6BlackHoleConfig, s.metricsTracer) + s.bhd = &blackHoleDetector{ + udp: s.udpBHF, + ipv6: s.ipv6BHF, + mt: s.metricsTracer, + readOnly: s.readOnlyBHD, + } return s, nil } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go index 38e942cce81..5fd41c8d9f2 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go @@ -209,9 +209,18 @@ func (c *Conn) NewStream(ctx context.Context) (network.Stream, error) { return nil, err } + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, defaultNewStreamTimeout) + defer cancel() + } + s, err := c.openAndAddStream(ctx, scope) if err != nil { scope.Done() + if errors.Is(err, context.DeadlineExceeded) { + err = fmt.Errorf("timed out: %w", err) + } return nil, err } return s, nil diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go index f639ce16a24..446ece45046 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go @@ -416,6 +416,11 @@ func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, return nil } +func (s *Swarm) CanDial(p peer.ID, addr ma.Multiaddr) bool { + dialable, _ := s.filterKnownUndialables(p, []ma.Multiaddr{addr}) + return len(dialable) > 0 +} + func (s *Swarm) nonProxyAddr(addr ma.Multiaddr) bool { t := s.TransportForDialing(addr) return !t.Proxy() diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go index 0905e84513f..e94db44a429 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go @@ -3,6 +3,7 @@ package swarm import ( "errors" "fmt" + "slices" "time" "github.com/libp2p/go-libp2p/core/canonicallog" @@ -12,13 +13,44 @@ import ( ma "github.com/multiformats/go-multiaddr" ) +type OrderedListener interface { + // Transports optionally implement this interface to indicate the relative + // ordering that listeners should be setup. Some transports may optionally + // make use of other listeners if they are setup. e.g. WebRTC may reuse the + // same UDP port as QUIC, but only when QUIC is setup first. + // lower values are setup first. + ListenOrder() int +} + // Listen sets up listeners for all of the given addresses. // It returns as long as we successfully listen on at least *one* address. func (s *Swarm) Listen(addrs ...ma.Multiaddr) error { errs := make([]error, len(addrs)) var succeeded int - for i, a := range addrs { - if err := s.AddListenAddr(a); err != nil { + + type addrAndListener struct { + addr ma.Multiaddr + lTpt transport.Transport + } + sortedAddrsAndTpts := make([]addrAndListener, 0, len(addrs)) + for _, a := range addrs { + t := s.TransportForListening(a) + sortedAddrsAndTpts = append(sortedAddrsAndTpts, addrAndListener{addr: a, lTpt: t}) + } + slices.SortFunc(sortedAddrsAndTpts, func(a, b addrAndListener) int { + aOrder := 0 + bOrder := 0 + if l, ok := a.lTpt.(OrderedListener); ok { + aOrder = l.ListenOrder() + } + if l, ok := b.lTpt.(OrderedListener); ok { + bOrder = l.ListenOrder() + } + return aOrder - bOrder + }) + + for i, a := range sortedAddrsAndTpts { + if err := s.AddListenAddr(a.addr); err != nil { errs[i] = err } else { succeeded++ @@ -27,11 +59,11 @@ func (s *Swarm) Listen(addrs ...ma.Multiaddr) error { for i, e := range errs { if e != nil { - log.Warnw("listening failed", "on", addrs[i], "error", errs[i]) + log.Warnw("listening failed", "on", sortedAddrsAndTpts[i].addr, "error", errs[i]) } } - if succeeded == 0 && len(addrs) > 0 { + if succeeded == 0 && len(sortedAddrsAndTpts) > 0 { return fmt.Errorf("failed to listen on any addresses: %s", errs) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go index b5c0f2e4990..929f3f49466 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go @@ -85,7 +85,7 @@ var ( Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2}, }, ) - blackHoleFilterState = prometheus.NewGaugeVec( + blackHoleSuccessCounterState = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: metricNamespace, Name: "black_hole_filter_state", @@ -93,7 +93,7 @@ var ( }, []string{"name"}, ) - blackHoleFilterSuccessFraction = prometheus.NewGaugeVec( + blackHoleSuccessCounterSuccessFraction = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: metricNamespace, Name: "black_hole_filter_success_fraction", @@ -101,7 +101,7 @@ var ( }, []string{"name"}, ) - blackHoleFilterNextRequestAllowedAfter = prometheus.NewGaugeVec( + blackHoleSuccessCounterNextRequestAllowedAfter = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: metricNamespace, Name: "black_hole_filter_next_request_allowed_after", @@ -118,9 +118,9 @@ var ( connHandshakeLatency, dialsPerPeer, dialRankingDelay, - blackHoleFilterSuccessFraction, - blackHoleFilterState, - blackHoleFilterNextRequestAllowedAfter, + blackHoleSuccessCounterSuccessFraction, + blackHoleSuccessCounterState, + blackHoleSuccessCounterNextRequestAllowedAfter, } ) @@ -131,7 +131,7 @@ type MetricsTracer interface { FailedDialing(ma.Multiaddr, error, error) DialCompleted(success bool, totalDials int) DialRankingDelay(d time.Duration) - UpdatedBlackHoleFilterState(name string, state blackHoleState, nextProbeAfter int, successFraction float64) + UpdatedBlackHoleSuccessCounter(name string, state blackHoleState, nextProbeAfter int, successFraction float64) } type metricsTracer struct{} @@ -274,14 +274,14 @@ func (m *metricsTracer) DialRankingDelay(d time.Duration) { dialRankingDelay.Observe(d.Seconds()) } -func (m *metricsTracer) UpdatedBlackHoleFilterState(name string, state blackHoleState, +func (m *metricsTracer) UpdatedBlackHoleSuccessCounter(name string, state blackHoleState, nextProbeAfter int, successFraction float64) { tags := metricshelper.GetStringSlice() defer metricshelper.PutStringSlice(tags) *tags = append(*tags, name) - blackHoleFilterState.WithLabelValues(*tags...).Set(float64(state)) - blackHoleFilterSuccessFraction.WithLabelValues(*tags...).Set(successFraction) - blackHoleFilterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter)) + blackHoleSuccessCounterState.WithLabelValues(*tags...).Set(float64(state)) + blackHoleSuccessCounterSuccessFraction.WithLabelValues(*tags...).Set(successFraction) + blackHoleSuccessCounterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter)) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/autonat.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/autonat.go new file mode 100644 index 00000000000..1ae11edbfad --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/autonat.go @@ -0,0 +1,236 @@ +package autonatv2 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "golang.org/x/exp/rand" + "golang.org/x/exp/slices" +) + +//go:generate protoc --go_out=. --go_opt=Mpb/autonatv2.proto=./pb pb/autonatv2.proto + +const ( + ServiceName = "libp2p.autonatv2" + DialBackProtocol = "/libp2p/autonat/2/dial-back" + DialProtocol = "/libp2p/autonat/2/dial-request" + + maxMsgSize = 8192 + streamTimeout = time.Minute + dialBackStreamTimeout = 5 * time.Second + dialBackDialTimeout = 30 * time.Second + dialBackMaxMsgSize = 1024 + minHandshakeSizeBytes = 30_000 // for amplification attack prevention + maxHandshakeSizeBytes = 100_000 + // maxPeerAddresses is the number of addresses in a dial request the server + // will inspect, rest are ignored. + maxPeerAddresses = 50 +) + +var ( + ErrNoValidPeers = errors.New("no valid peers for autonat v2") + ErrDialRefused = errors.New("dial refused") + + log = logging.Logger("autonatv2") +) + +// Request is the request to verify reachability of a single address +type Request struct { + // Addr is the multiaddr to verify + Addr ma.Multiaddr + // SendDialData indicates whether to send dial data if the server requests it for Addr + SendDialData bool +} + +// Result is the result of the CheckReachability call +type Result struct { + // Addr is the dialed address + Addr ma.Multiaddr + // Reachability of the dialed address + Reachability network.Reachability + // Status is the outcome of the dialback + Status pb.DialStatus +} + +// AutoNAT implements the AutoNAT v2 client and server. +// Users can check reachability for their addresses using the CheckReachability method. +// The server provides amplification attack prevention and rate limiting. +type AutoNAT struct { + host host.Host + + // for cleanly closing + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + srv *server + cli *client + + mx sync.Mutex + peers *peersMap + // allowPrivateAddrs enables using private and localhost addresses for reachability checks. + // This is only useful for testing. + allowPrivateAddrs bool +} + +// New returns a new AutoNAT instance. +// host and dialerHost should have the same dialing capabilities. In case the host doesn't support +// a transport, dial back requests for address for that transport will be ignored. +func New(host host.Host, dialerHost host.Host, opts ...AutoNATOption) (*AutoNAT, error) { + s := defaultSettings() + for _, o := range opts { + if err := o(s); err != nil { + return nil, fmt.Errorf("failed to apply option: %w", err) + } + } + + ctx, cancel := context.WithCancel(context.Background()) + an := &AutoNAT{ + host: host, + ctx: ctx, + cancel: cancel, + srv: newServer(host, dialerHost, s), + cli: newClient(host), + allowPrivateAddrs: s.allowPrivateAddrs, + peers: newPeersMap(), + } + return an, nil +} + +func (an *AutoNAT) background(sub event.Subscription) { + for { + select { + case <-an.ctx.Done(): + sub.Close() + an.wg.Done() + return + case e := <-sub.Out(): + switch evt := e.(type) { + case event.EvtPeerProtocolsUpdated: + an.updatePeer(evt.Peer) + case event.EvtPeerConnectednessChanged: + an.updatePeer(evt.Peer) + case event.EvtPeerIdentificationCompleted: + an.updatePeer(evt.Peer) + } + } + } +} + +func (an *AutoNAT) Start() error { + // Listen on event.EvtPeerProtocolsUpdated, event.EvtPeerConnectednessChanged + // event.EvtPeerIdentificationCompleted to maintain our set of autonat supporting peers. + sub, err := an.host.EventBus().Subscribe([]interface{}{ + new(event.EvtPeerProtocolsUpdated), + new(event.EvtPeerConnectednessChanged), + new(event.EvtPeerIdentificationCompleted), + }) + if err != nil { + return fmt.Errorf("event subscription failed: %w", err) + } + an.cli.Start() + an.srv.Start() + + an.wg.Add(1) + go an.background(sub) + return nil +} + +func (an *AutoNAT) Close() { + an.cancel() + an.wg.Wait() + an.srv.Close() + an.cli.Close() + an.peers = nil +} + +// GetReachability makes a single dial request for checking reachability for requested addresses +func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result, error) { + if !an.allowPrivateAddrs { + for _, r := range reqs { + if !manet.IsPublicAddr(r.Addr) { + return Result{}, fmt.Errorf("private address cannot be verified by autonatv2: %s", r.Addr) + } + } + } + an.mx.Lock() + p := an.peers.GetRand() + an.mx.Unlock() + if p == "" { + return Result{}, ErrNoValidPeers + } + + res, err := an.cli.GetReachability(ctx, p, reqs) + if err != nil { + log.Debugf("reachability check with %s failed, err: %s", p, err) + return Result{}, fmt.Errorf("reachability check with %s failed: %w", p, err) + } + log.Debugf("reachability check with %s successful", p) + return res, nil +} + +func (an *AutoNAT) updatePeer(p peer.ID) { + an.mx.Lock() + defer an.mx.Unlock() + + // There are no ordering gurantees between identify and swarm events. Check peerstore + // and swarm for the current state + protos, err := an.host.Peerstore().SupportsProtocols(p, DialProtocol) + connectedness := an.host.Network().Connectedness(p) + if err == nil && slices.Contains(protos, DialProtocol) && connectedness == network.Connected { + an.peers.Put(p) + } else { + an.peers.Delete(p) + } +} + +// peersMap provides random access to a set of peers. This is useful when the map iteration order is +// not sufficiently random. +type peersMap struct { + peerIdx map[peer.ID]int + peers []peer.ID +} + +func newPeersMap() *peersMap { + return &peersMap{ + peerIdx: make(map[peer.ID]int), + peers: make([]peer.ID, 0), + } +} + +func (p *peersMap) GetRand() peer.ID { + if len(p.peers) == 0 { + return "" + } + return p.peers[rand.Intn(len(p.peers))] +} + +func (p *peersMap) Put(pid peer.ID) { + if _, ok := p.peerIdx[pid]; ok { + return + } + p.peers = append(p.peers, pid) + p.peerIdx[pid] = len(p.peers) - 1 +} + +func (p *peersMap) Delete(pid peer.ID) { + idx, ok := p.peerIdx[pid] + if !ok { + return + } + p.peers[idx] = p.peers[len(p.peers)-1] + p.peerIdx[p.peers[idx]] = idx + p.peers = p.peers[:len(p.peers)-1] + delete(p.peerIdx, pid) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/client.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/client.go new file mode 100644 index 00000000000..f93cc31377a --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/client.go @@ -0,0 +1,342 @@ +package autonatv2 + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb" + "github.com/libp2p/go-msgio/pbio" + ma "github.com/multiformats/go-multiaddr" + "golang.org/x/exp/rand" +) + +//go:generate protoc --go_out=. --go_opt=Mpb/autonatv2.proto=./pb pb/autonatv2.proto + +// client implements the client for making dial requests for AutoNAT v2. It verifies successful +// dials and provides an option to send data for dial requests. +type client struct { + host host.Host + dialData []byte + normalizeMultiaddr func(ma.Multiaddr) ma.Multiaddr + + mu sync.Mutex + // dialBackQueues maps nonce to the channel for providing the local multiaddr of the connection + // the nonce was received on + dialBackQueues map[uint64]chan ma.Multiaddr +} + +type normalizeMultiaddrer interface { + NormalizeMultiaddr(ma.Multiaddr) ma.Multiaddr +} + +func newClient(h host.Host) *client { + normalizeMultiaddr := func(a ma.Multiaddr) ma.Multiaddr { return a } + if hn, ok := h.(normalizeMultiaddrer); ok { + normalizeMultiaddr = hn.NormalizeMultiaddr + } + return &client{ + host: h, + dialData: make([]byte, 4000), + normalizeMultiaddr: normalizeMultiaddr, + dialBackQueues: make(map[uint64]chan ma.Multiaddr), + } +} + +func (ac *client) Start() { + ac.host.SetStreamHandler(DialBackProtocol, ac.handleDialBack) +} + +func (ac *client) Close() { + ac.host.RemoveStreamHandler(DialBackProtocol) +} + +// GetReachability verifies address reachability with a AutoNAT v2 server p. +func (ac *client) GetReachability(ctx context.Context, p peer.ID, reqs []Request) (Result, error) { + ctx, cancel := context.WithTimeout(ctx, streamTimeout) + defer cancel() + + s, err := ac.host.NewStream(ctx, p, DialProtocol) + if err != nil { + return Result{}, fmt.Errorf("open %s stream failed: %w", DialProtocol, err) + } + + if err := s.Scope().SetService(ServiceName); err != nil { + s.Reset() + return Result{}, fmt.Errorf("attach stream %s to service %s failed: %w", DialProtocol, ServiceName, err) + } + + if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil { + s.Reset() + return Result{}, fmt.Errorf("failed to reserve memory for stream %s: %w", DialProtocol, err) + } + defer s.Scope().ReleaseMemory(maxMsgSize) + + s.SetDeadline(time.Now().Add(streamTimeout)) + defer s.Close() + + nonce := rand.Uint64() + ch := make(chan ma.Multiaddr, 1) + ac.mu.Lock() + ac.dialBackQueues[nonce] = ch + ac.mu.Unlock() + defer func() { + ac.mu.Lock() + delete(ac.dialBackQueues, nonce) + ac.mu.Unlock() + }() + + msg := newDialRequest(reqs, nonce) + w := pbio.NewDelimitedWriter(s) + if err := w.WriteMsg(&msg); err != nil { + s.Reset() + return Result{}, fmt.Errorf("dial request write failed: %w", err) + } + + r := pbio.NewDelimitedReader(s, maxMsgSize) + if err := r.ReadMsg(&msg); err != nil { + s.Reset() + return Result{}, fmt.Errorf("dial msg read failed: %w", err) + } + + switch { + case msg.GetDialResponse() != nil: + break + // provide dial data if appropriate + case msg.GetDialDataRequest() != nil: + if err := ac.validateDialDataRequest(reqs, &msg); err != nil { + s.Reset() + return Result{}, fmt.Errorf("invalid dial data request: %w", err) + } + // dial data request is valid and we want to send data + if err := sendDialData(ac.dialData, int(msg.GetDialDataRequest().GetNumBytes()), w, &msg); err != nil { + s.Reset() + return Result{}, fmt.Errorf("dial data send failed: %w", err) + } + if err := r.ReadMsg(&msg); err != nil { + s.Reset() + return Result{}, fmt.Errorf("dial response read failed: %w", err) + } + if msg.GetDialResponse() == nil { + s.Reset() + return Result{}, fmt.Errorf("invalid response type: %T", msg.Msg) + } + default: + s.Reset() + return Result{}, fmt.Errorf("invalid msg type: %T", msg.Msg) + } + + resp := msg.GetDialResponse() + if resp.GetStatus() != pb.DialResponse_OK { + // E_DIAL_REFUSED has implication for deciding future address verificiation priorities + // wrap a distinct error for convenient errors.Is usage + if resp.GetStatus() == pb.DialResponse_E_DIAL_REFUSED { + return Result{}, fmt.Errorf("dial request failed: %w", ErrDialRefused) + } + return Result{}, fmt.Errorf("dial request failed: response status %d %s", resp.GetStatus(), + pb.DialResponse_ResponseStatus_name[int32(resp.GetStatus())]) + } + if resp.GetDialStatus() == pb.DialStatus_UNUSED { + return Result{}, fmt.Errorf("invalid response: invalid dial status UNUSED") + } + if int(resp.AddrIdx) >= len(reqs) { + return Result{}, fmt.Errorf("invalid response: addr index out of range: %d [0-%d)", resp.AddrIdx, len(reqs)) + } + + // wait for nonce from the server + var dialBackAddr ma.Multiaddr + if resp.GetDialStatus() == pb.DialStatus_OK { + timer := time.NewTimer(dialBackStreamTimeout) + select { + case at := <-ch: + dialBackAddr = at + case <-ctx.Done(): + case <-timer.C: + } + timer.Stop() + } + return ac.newResult(resp, reqs, dialBackAddr) +} + +func (ac *client) validateDialDataRequest(reqs []Request, msg *pb.Message) error { + idx := int(msg.GetDialDataRequest().AddrIdx) + if idx >= len(reqs) { // invalid address index + return fmt.Errorf("addr index out of range: %d [0-%d)", idx, len(reqs)) + } + if msg.GetDialDataRequest().NumBytes > maxHandshakeSizeBytes { // data request is too high + return fmt.Errorf("requested data too high: %d", msg.GetDialDataRequest().NumBytes) + } + if !reqs[idx].SendDialData { // low priority addr + return fmt.Errorf("low priority addr: %s index %d", reqs[idx].Addr, idx) + } + return nil +} + +func (ac *client) newResult(resp *pb.DialResponse, reqs []Request, dialBackAddr ma.Multiaddr) (Result, error) { + idx := int(resp.AddrIdx) + addr := reqs[idx].Addr + + var rch network.Reachability + switch resp.DialStatus { + case pb.DialStatus_OK: + if !ac.areAddrsConsistent(dialBackAddr, addr) { + // the server is misinforming us about the address it successfully dialed + // either we received no dialback or the address on the dialback is inconsistent with + // what the server is telling us + return Result{}, fmt.Errorf("invalid response: dialBackAddr: %s, respAddr: %s", dialBackAddr, addr) + } + rch = network.ReachabilityPublic + case pb.DialStatus_E_DIAL_ERROR: + rch = network.ReachabilityPrivate + case pb.DialStatus_E_DIAL_BACK_ERROR: + if ac.areAddrsConsistent(dialBackAddr, addr) { + // We received the dial back but the server claims the dial back errored. + // As long as we received the correct nonce in dial back it is safe to assume + // that we are public. + rch = network.ReachabilityPublic + } else { + rch = network.ReachabilityUnknown + } + default: + // Unexpected response code. Discard the response and fail. + log.Warnf("invalid status code received in response for addr %s: %d", addr, resp.DialStatus) + return Result{}, fmt.Errorf("invalid response: invalid status code for addr %s: %d", addr, resp.DialStatus) + } + + return Result{ + Addr: addr, + Reachability: rch, + Status: resp.DialStatus, + }, nil +} + +func sendDialData(dialData []byte, numBytes int, w pbio.Writer, msg *pb.Message) (err error) { + ddResp := &pb.DialDataResponse{Data: dialData} + *msg = pb.Message{ + Msg: &pb.Message_DialDataResponse{ + DialDataResponse: ddResp, + }, + } + for remain := numBytes; remain > 0; { + if remain < len(ddResp.Data) { + ddResp.Data = ddResp.Data[:remain] + } + if err := w.WriteMsg(msg); err != nil { + return fmt.Errorf("write failed: %w", err) + } + remain -= len(dialData) + } + return nil +} + +func newDialRequest(reqs []Request, nonce uint64) pb.Message { + addrbs := make([][]byte, len(reqs)) + for i, r := range reqs { + addrbs[i] = r.Addr.Bytes() + } + return pb.Message{ + Msg: &pb.Message_DialRequest{ + DialRequest: &pb.DialRequest{ + Addrs: addrbs, + Nonce: nonce, + }, + }, + } +} + +// handleDialBack receives the nonce on the dial-back stream +func (ac *client) handleDialBack(s network.Stream) { + if err := s.Scope().SetService(ServiceName); err != nil { + log.Debugf("failed to attach stream to service %s: %w", ServiceName, err) + s.Reset() + return + } + + if err := s.Scope().ReserveMemory(dialBackMaxMsgSize, network.ReservationPriorityAlways); err != nil { + log.Debugf("failed to reserve memory for stream %s: %w", DialBackProtocol, err) + s.Reset() + return + } + defer s.Scope().ReleaseMemory(dialBackMaxMsgSize) + + s.SetDeadline(time.Now().Add(dialBackStreamTimeout)) + defer s.Close() + + r := pbio.NewDelimitedReader(s, dialBackMaxMsgSize) + var msg pb.DialBack + if err := r.ReadMsg(&msg); err != nil { + log.Debugf("failed to read dialback msg from %s: %s", s.Conn().RemotePeer(), err) + s.Reset() + return + } + nonce := msg.GetNonce() + + ac.mu.Lock() + ch := ac.dialBackQueues[nonce] + ac.mu.Unlock() + if ch == nil { + log.Debugf("dialback received with invalid nonce: localAdds: %s peer: %s nonce: %d", s.Conn().LocalMultiaddr(), s.Conn().RemotePeer(), nonce) + s.Reset() + return + } + select { + case ch <- s.Conn().LocalMultiaddr(): + default: + log.Debugf("multiple dialbacks received: localAddr: %s peer: %s", s.Conn().LocalMultiaddr(), s.Conn().RemotePeer()) + s.Reset() + return + } + w := pbio.NewDelimitedWriter(s) + res := pb.DialBackResponse{} + if err := w.WriteMsg(&res); err != nil { + log.Debugf("failed to write dialback response: %s", err) + s.Reset() + } +} + +func (ac *client) areAddrsConsistent(connLocalAddr, dialedAddr ma.Multiaddr) bool { + if connLocalAddr == nil || dialedAddr == nil { + return false + } + connLocalAddr = ac.normalizeMultiaddr(connLocalAddr) + dialedAddr = ac.normalizeMultiaddr(dialedAddr) + + localProtos := connLocalAddr.Protocols() + externalProtos := dialedAddr.Protocols() + if len(localProtos) != len(externalProtos) { + return false + } + for i := 0; i < len(localProtos); i++ { + if i == 0 { + switch externalProtos[i].Code { + case ma.P_DNS, ma.P_DNSADDR: + if localProtos[i].Code == ma.P_IP4 || localProtos[i].Code == ma.P_IP6 { + continue + } + return false + case ma.P_DNS4: + if localProtos[i].Code == ma.P_IP4 { + continue + } + return false + case ma.P_DNS6: + if localProtos[i].Code == ma.P_IP6 { + continue + } + return false + } + if localProtos[i].Code != externalProtos[i].Code { + return false + } + } else { + if localProtos[i].Code != externalProtos[i].Code { + return false + } + } + } + return true +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/metrics.go new file mode 100644 index 00000000000..baa467f6d29 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/metrics.go @@ -0,0 +1,97 @@ +package autonatv2 + +import ( + "github.com/libp2p/go-libp2p/p2p/metricshelper" + "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb" + ma "github.com/multiformats/go-multiaddr" + "github.com/prometheus/client_golang/prometheus" +) + +type MetricsTracer interface { + CompletedRequest(EventDialRequestCompleted) +} + +const metricNamespace = "libp2p_autonatv2" + +var ( + requestsCompleted = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "requests_completed_total", + Help: "Requests Completed", + }, + []string{"server_error", "response_status", "dial_status", "dial_data_required", "ip_or_dns_version", "transport"}, + ) +) + +type metricsTracer struct { +} + +func NewMetricsTracer(reg prometheus.Registerer) MetricsTracer { + metricshelper.RegisterCollectors(reg, requestsCompleted) + return &metricsTracer{} +} + +func (m *metricsTracer) CompletedRequest(e EventDialRequestCompleted) { + labels := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(labels) + + errStr := getErrString(e.Error) + + dialData := "false" + if e.DialDataRequired { + dialData = "true" + } + + var ip, transport string + if e.DialedAddr != nil { + ip = getIPOrDNSVersion(e.DialedAddr) + transport = metricshelper.GetTransport(e.DialedAddr) + } + + *labels = append(*labels, + errStr, + pb.DialResponse_ResponseStatus_name[int32(e.ResponseStatus)], + pb.DialStatus_name[int32(e.DialStatus)], + dialData, + ip, + transport, + ) + requestsCompleted.WithLabelValues(*labels...).Inc() +} + +func getIPOrDNSVersion(a ma.Multiaddr) string { + if a == nil { + return "" + } + res := "unknown" + ma.ForEach(a, func(c ma.Component) bool { + switch c.Protocol().Code { + case ma.P_IP4: + res = "ip4" + case ma.P_IP6: + res = "ip6" + case ma.P_DNS, ma.P_DNSADDR: + res = "dns" + case ma.P_DNS4: + res = "dns4" + case ma.P_DNS6: + res = "dns6" + } + return false + }) + return res +} + +func getErrString(e error) string { + var errStr string + switch e { + case nil: + errStr = "nil" + case errBadRequest, errDialDataRefused, errResourceLimitExceeded: + errStr = e.Error() + default: + errStr = "other" + } + return errStr +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/msg_reader.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/msg_reader.go new file mode 100644 index 00000000000..87849a55ac7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/msg_reader.go @@ -0,0 +1,38 @@ +package autonatv2 + +import ( + "io" + + "github.com/multiformats/go-varint" +) + +// msgReader reads a varint prefixed message from R without any buffering +type msgReader struct { + R io.Reader + Buf []byte +} + +func (m *msgReader) ReadByte() (byte, error) { + buf := m.Buf[:1] + _, err := m.R.Read(buf) + return buf[0], err +} + +func (m *msgReader) ReadMsg() ([]byte, error) { + sz, err := varint.ReadUvarint(m) + if err != nil { + return nil, err + } + if sz > uint64(len(m.Buf)) { + return nil, io.ErrShortBuffer + } + n := 0 + for n < int(sz) { + nr, err := m.R.Read(m.Buf[n:sz]) + if err != nil { + return nil, err + } + n += nr + } + return m.Buf[:sz], nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/options.go new file mode 100644 index 00000000000..15268f62901 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/options.go @@ -0,0 +1,64 @@ +package autonatv2 + +import "time" + +// autoNATSettings is used to configure AutoNAT +type autoNATSettings struct { + allowPrivateAddrs bool + serverRPM int + serverPerPeerRPM int + serverDialDataRPM int + dataRequestPolicy dataRequestPolicyFunc + now func() time.Time + amplificatonAttackPreventionDialWait time.Duration + metricsTracer MetricsTracer +} + +func defaultSettings() *autoNATSettings { + return &autoNATSettings{ + allowPrivateAddrs: false, + serverRPM: 60, // 1 every second + serverPerPeerRPM: 12, // 1 every 5 seconds + serverDialDataRPM: 12, // 1 every 5 seconds + dataRequestPolicy: amplificationAttackPrevention, + amplificatonAttackPreventionDialWait: 3 * time.Second, + now: time.Now, + } +} + +type AutoNATOption func(s *autoNATSettings) error + +func WithServerRateLimit(rpm, perPeerRPM, dialDataRPM int) AutoNATOption { + return func(s *autoNATSettings) error { + s.serverRPM = rpm + s.serverPerPeerRPM = perPeerRPM + s.serverDialDataRPM = dialDataRPM + return nil + } +} + +func WithMetricsTracer(m MetricsTracer) AutoNATOption { + return func(s *autoNATSettings) error { + s.metricsTracer = m + return nil + } +} + +func withDataRequestPolicy(drp dataRequestPolicyFunc) AutoNATOption { + return func(s *autoNATSettings) error { + s.dataRequestPolicy = drp + return nil + } +} + +func allowPrivateAddrs(s *autoNATSettings) error { + s.allowPrivateAddrs = true + return nil +} + +func withAmplificationAttackPreventionDialWait(d time.Duration) AutoNATOption { + return func(s *autoNATSettings) error { + s.amplificatonAttackPreventionDialWait = d + return nil + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.pb.go new file mode 100644 index 00000000000..b4cb795939d --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.pb.go @@ -0,0 +1,818 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.2 +// source: pb/autonatv2.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DialStatus int32 + +const ( + DialStatus_UNUSED DialStatus = 0 + DialStatus_E_DIAL_ERROR DialStatus = 100 + DialStatus_E_DIAL_BACK_ERROR DialStatus = 101 + DialStatus_OK DialStatus = 200 +) + +// Enum value maps for DialStatus. +var ( + DialStatus_name = map[int32]string{ + 0: "UNUSED", + 100: "E_DIAL_ERROR", + 101: "E_DIAL_BACK_ERROR", + 200: "OK", + } + DialStatus_value = map[string]int32{ + "UNUSED": 0, + "E_DIAL_ERROR": 100, + "E_DIAL_BACK_ERROR": 101, + "OK": 200, + } +) + +func (x DialStatus) Enum() *DialStatus { + p := new(DialStatus) + *p = x + return p +} + +func (x DialStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DialStatus) Descriptor() protoreflect.EnumDescriptor { + return file_pb_autonatv2_proto_enumTypes[0].Descriptor() +} + +func (DialStatus) Type() protoreflect.EnumType { + return &file_pb_autonatv2_proto_enumTypes[0] +} + +func (x DialStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DialStatus.Descriptor instead. +func (DialStatus) EnumDescriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{0} +} + +type DialResponse_ResponseStatus int32 + +const ( + DialResponse_E_INTERNAL_ERROR DialResponse_ResponseStatus = 0 + DialResponse_E_REQUEST_REJECTED DialResponse_ResponseStatus = 100 + DialResponse_E_DIAL_REFUSED DialResponse_ResponseStatus = 101 + DialResponse_OK DialResponse_ResponseStatus = 200 +) + +// Enum value maps for DialResponse_ResponseStatus. +var ( + DialResponse_ResponseStatus_name = map[int32]string{ + 0: "E_INTERNAL_ERROR", + 100: "E_REQUEST_REJECTED", + 101: "E_DIAL_REFUSED", + 200: "OK", + } + DialResponse_ResponseStatus_value = map[string]int32{ + "E_INTERNAL_ERROR": 0, + "E_REQUEST_REJECTED": 100, + "E_DIAL_REFUSED": 101, + "OK": 200, + } +) + +func (x DialResponse_ResponseStatus) Enum() *DialResponse_ResponseStatus { + p := new(DialResponse_ResponseStatus) + *p = x + return p +} + +func (x DialResponse_ResponseStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DialResponse_ResponseStatus) Descriptor() protoreflect.EnumDescriptor { + return file_pb_autonatv2_proto_enumTypes[1].Descriptor() +} + +func (DialResponse_ResponseStatus) Type() protoreflect.EnumType { + return &file_pb_autonatv2_proto_enumTypes[1] +} + +func (x DialResponse_ResponseStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DialResponse_ResponseStatus.Descriptor instead. +func (DialResponse_ResponseStatus) EnumDescriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{3, 0} +} + +type DialBackResponse_DialBackStatus int32 + +const ( + DialBackResponse_OK DialBackResponse_DialBackStatus = 0 +) + +// Enum value maps for DialBackResponse_DialBackStatus. +var ( + DialBackResponse_DialBackStatus_name = map[int32]string{ + 0: "OK", + } + DialBackResponse_DialBackStatus_value = map[string]int32{ + "OK": 0, + } +) + +func (x DialBackResponse_DialBackStatus) Enum() *DialBackResponse_DialBackStatus { + p := new(DialBackResponse_DialBackStatus) + *p = x + return p +} + +func (x DialBackResponse_DialBackStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DialBackResponse_DialBackStatus) Descriptor() protoreflect.EnumDescriptor { + return file_pb_autonatv2_proto_enumTypes[2].Descriptor() +} + +func (DialBackResponse_DialBackStatus) Type() protoreflect.EnumType { + return &file_pb_autonatv2_proto_enumTypes[2] +} + +func (x DialBackResponse_DialBackStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DialBackResponse_DialBackStatus.Descriptor instead. +func (DialBackResponse_DialBackStatus) EnumDescriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{6, 0} +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Msg: + // + // *Message_DialRequest + // *Message_DialResponse + // *Message_DialDataRequest + // *Message_DialDataResponse + Msg isMessage_Msg `protobuf_oneof:"msg"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{0} +} + +func (m *Message) GetMsg() isMessage_Msg { + if m != nil { + return m.Msg + } + return nil +} + +func (x *Message) GetDialRequest() *DialRequest { + if x, ok := x.GetMsg().(*Message_DialRequest); ok { + return x.DialRequest + } + return nil +} + +func (x *Message) GetDialResponse() *DialResponse { + if x, ok := x.GetMsg().(*Message_DialResponse); ok { + return x.DialResponse + } + return nil +} + +func (x *Message) GetDialDataRequest() *DialDataRequest { + if x, ok := x.GetMsg().(*Message_DialDataRequest); ok { + return x.DialDataRequest + } + return nil +} + +func (x *Message) GetDialDataResponse() *DialDataResponse { + if x, ok := x.GetMsg().(*Message_DialDataResponse); ok { + return x.DialDataResponse + } + return nil +} + +type isMessage_Msg interface { + isMessage_Msg() +} + +type Message_DialRequest struct { + DialRequest *DialRequest `protobuf:"bytes,1,opt,name=dialRequest,proto3,oneof"` +} + +type Message_DialResponse struct { + DialResponse *DialResponse `protobuf:"bytes,2,opt,name=dialResponse,proto3,oneof"` +} + +type Message_DialDataRequest struct { + DialDataRequest *DialDataRequest `protobuf:"bytes,3,opt,name=dialDataRequest,proto3,oneof"` +} + +type Message_DialDataResponse struct { + DialDataResponse *DialDataResponse `protobuf:"bytes,4,opt,name=dialDataResponse,proto3,oneof"` +} + +func (*Message_DialRequest) isMessage_Msg() {} + +func (*Message_DialResponse) isMessage_Msg() {} + +func (*Message_DialDataRequest) isMessage_Msg() {} + +func (*Message_DialDataResponse) isMessage_Msg() {} + +type DialRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addrs [][]byte `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs,omitempty"` + Nonce uint64 `protobuf:"fixed64,2,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *DialRequest) Reset() { + *x = DialRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DialRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialRequest) ProtoMessage() {} + +func (x *DialRequest) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialRequest.ProtoReflect.Descriptor instead. +func (*DialRequest) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{1} +} + +func (x *DialRequest) GetAddrs() [][]byte { + if x != nil { + return x.Addrs + } + return nil +} + +func (x *DialRequest) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +type DialDataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AddrIdx uint32 `protobuf:"varint,1,opt,name=addrIdx,proto3" json:"addrIdx,omitempty"` + NumBytes uint64 `protobuf:"varint,2,opt,name=numBytes,proto3" json:"numBytes,omitempty"` +} + +func (x *DialDataRequest) Reset() { + *x = DialDataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DialDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialDataRequest) ProtoMessage() {} + +func (x *DialDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialDataRequest.ProtoReflect.Descriptor instead. +func (*DialDataRequest) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{2} +} + +func (x *DialDataRequest) GetAddrIdx() uint32 { + if x != nil { + return x.AddrIdx + } + return 0 +} + +func (x *DialDataRequest) GetNumBytes() uint64 { + if x != nil { + return x.NumBytes + } + return 0 +} + +type DialResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status DialResponse_ResponseStatus `protobuf:"varint,1,opt,name=status,proto3,enum=autonatv2.pb.DialResponse_ResponseStatus" json:"status,omitempty"` + AddrIdx uint32 `protobuf:"varint,2,opt,name=addrIdx,proto3" json:"addrIdx,omitempty"` + DialStatus DialStatus `protobuf:"varint,3,opt,name=dialStatus,proto3,enum=autonatv2.pb.DialStatus" json:"dialStatus,omitempty"` +} + +func (x *DialResponse) Reset() { + *x = DialResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DialResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialResponse) ProtoMessage() {} + +func (x *DialResponse) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialResponse.ProtoReflect.Descriptor instead. +func (*DialResponse) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{3} +} + +func (x *DialResponse) GetStatus() DialResponse_ResponseStatus { + if x != nil { + return x.Status + } + return DialResponse_E_INTERNAL_ERROR +} + +func (x *DialResponse) GetAddrIdx() uint32 { + if x != nil { + return x.AddrIdx + } + return 0 +} + +func (x *DialResponse) GetDialStatus() DialStatus { + if x != nil { + return x.DialStatus + } + return DialStatus_UNUSED +} + +type DialDataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *DialDataResponse) Reset() { + *x = DialDataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DialDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialDataResponse) ProtoMessage() {} + +func (x *DialDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialDataResponse.ProtoReflect.Descriptor instead. +func (*DialDataResponse) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{4} +} + +func (x *DialDataResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type DialBack struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nonce uint64 `protobuf:"fixed64,1,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *DialBack) Reset() { + *x = DialBack{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DialBack) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialBack) ProtoMessage() {} + +func (x *DialBack) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialBack.ProtoReflect.Descriptor instead. +func (*DialBack) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{5} +} + +func (x *DialBack) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +type DialBackResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status DialBackResponse_DialBackStatus `protobuf:"varint,1,opt,name=status,proto3,enum=autonatv2.pb.DialBackResponse_DialBackStatus" json:"status,omitempty"` +} + +func (x *DialBackResponse) Reset() { + *x = DialBackResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonatv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DialBackResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DialBackResponse) ProtoMessage() {} + +func (x *DialBackResponse) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonatv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DialBackResponse.ProtoReflect.Descriptor instead. +func (*DialBackResponse) Descriptor() ([]byte, []int) { + return file_pb_autonatv2_proto_rawDescGZIP(), []int{6} +} + +func (x *DialBackResponse) GetStatus() DialBackResponse_DialBackStatus { + if x != nil { + return x.Status + } + return DialBackResponse_OK +} + +var File_pb_autonatv2_proto protoreflect.FileDescriptor + +var file_pb_autonatv2_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x62, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, 0x32, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, 0x32, 0x2e, + 0x70, 0x62, 0x22, 0xaa, 0x02, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, 0x32, 0x2e, + 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, + 0x52, 0x0b, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, + 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, 0x32, 0x2e, + 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, + 0x00, 0x52, 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x49, 0x0a, 0x0f, 0x64, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, + 0x61, 0x74, 0x76, 0x32, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x64, 0x69, 0x61, 0x6c, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4c, 0x0a, 0x10, 0x64, 0x69, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, 0x32, + 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x10, 0x64, 0x69, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x05, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x22, + 0x39, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x61, + 0x64, 0x64, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x47, 0x0a, 0x0f, 0x44, 0x69, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x49, 0x64, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x49, 0x64, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x82, 0x02, 0x0a, 0x0c, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, 0x32, + 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x49, + 0x64, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x49, 0x64, + 0x78, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x76, + 0x32, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0a, 0x64, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x5b, 0x0a, 0x0e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, + 0x10, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x64, 0x12, 0x12, 0x0a, 0x0e, 0x45, + 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x53, 0x45, 0x44, 0x10, 0x65, 0x12, + 0x07, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0xc8, 0x01, 0x22, 0x26, 0x0a, 0x10, 0x44, 0x69, 0x61, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x20, 0x0a, 0x08, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x6e, 0x6f, 0x6e, + 0x63, 0x65, 0x22, 0x73, 0x0a, 0x10, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, + 0x76, 0x32, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, + 0x0e, 0x44, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x06, 0x0a, 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x2a, 0x4a, 0x0a, 0x0a, 0x44, 0x69, 0x61, 0x6c, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x55, 0x53, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x5f, 0x42, 0x41, + 0x43, 0x4b, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x65, 0x12, 0x07, 0x0a, 0x02, 0x4f, 0x4b, + 0x10, 0xc8, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pb_autonatv2_proto_rawDescOnce sync.Once + file_pb_autonatv2_proto_rawDescData = file_pb_autonatv2_proto_rawDesc +) + +func file_pb_autonatv2_proto_rawDescGZIP() []byte { + file_pb_autonatv2_proto_rawDescOnce.Do(func() { + file_pb_autonatv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_autonatv2_proto_rawDescData) + }) + return file_pb_autonatv2_proto_rawDescData +} + +var file_pb_autonatv2_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_pb_autonatv2_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_pb_autonatv2_proto_goTypes = []any{ + (DialStatus)(0), // 0: autonatv2.pb.DialStatus + (DialResponse_ResponseStatus)(0), // 1: autonatv2.pb.DialResponse.ResponseStatus + (DialBackResponse_DialBackStatus)(0), // 2: autonatv2.pb.DialBackResponse.DialBackStatus + (*Message)(nil), // 3: autonatv2.pb.Message + (*DialRequest)(nil), // 4: autonatv2.pb.DialRequest + (*DialDataRequest)(nil), // 5: autonatv2.pb.DialDataRequest + (*DialResponse)(nil), // 6: autonatv2.pb.DialResponse + (*DialDataResponse)(nil), // 7: autonatv2.pb.DialDataResponse + (*DialBack)(nil), // 8: autonatv2.pb.DialBack + (*DialBackResponse)(nil), // 9: autonatv2.pb.DialBackResponse +} +var file_pb_autonatv2_proto_depIdxs = []int32{ + 4, // 0: autonatv2.pb.Message.dialRequest:type_name -> autonatv2.pb.DialRequest + 6, // 1: autonatv2.pb.Message.dialResponse:type_name -> autonatv2.pb.DialResponse + 5, // 2: autonatv2.pb.Message.dialDataRequest:type_name -> autonatv2.pb.DialDataRequest + 7, // 3: autonatv2.pb.Message.dialDataResponse:type_name -> autonatv2.pb.DialDataResponse + 1, // 4: autonatv2.pb.DialResponse.status:type_name -> autonatv2.pb.DialResponse.ResponseStatus + 0, // 5: autonatv2.pb.DialResponse.dialStatus:type_name -> autonatv2.pb.DialStatus + 2, // 6: autonatv2.pb.DialBackResponse.status:type_name -> autonatv2.pb.DialBackResponse.DialBackStatus + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_pb_autonatv2_proto_init() } +func file_pb_autonatv2_proto_init() { + if File_pb_autonatv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_autonatv2_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_autonatv2_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*DialRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_autonatv2_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*DialDataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_autonatv2_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*DialResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_autonatv2_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*DialDataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_autonatv2_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*DialBack); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_autonatv2_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*DialBackResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pb_autonatv2_proto_msgTypes[0].OneofWrappers = []any{ + (*Message_DialRequest)(nil), + (*Message_DialResponse)(nil), + (*Message_DialDataRequest)(nil), + (*Message_DialDataResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_autonatv2_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_autonatv2_proto_goTypes, + DependencyIndexes: file_pb_autonatv2_proto_depIdxs, + EnumInfos: file_pb_autonatv2_proto_enumTypes, + MessageInfos: file_pb_autonatv2_proto_msgTypes, + }.Build() + File_pb_autonatv2_proto = out.File + file_pb_autonatv2_proto_rawDesc = nil + file_pb_autonatv2_proto_goTypes = nil + file_pb_autonatv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.proto new file mode 100644 index 00000000000..64dca1138ff --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb/autonatv2.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package autonatv2.pb; + +message Message { + oneof msg { + DialRequest dialRequest = 1; + DialResponse dialResponse = 2; + DialDataRequest dialDataRequest = 3; + DialDataResponse dialDataResponse = 4; + } +} + +message DialRequest { + repeated bytes addrs = 1; + fixed64 nonce = 2; +} + + +message DialDataRequest { + uint32 addrIdx = 1; + uint64 numBytes = 2; +} + + +enum DialStatus { + UNUSED = 0; + E_DIAL_ERROR = 100; + E_DIAL_BACK_ERROR = 101; + OK = 200; +} + + +message DialResponse { + enum ResponseStatus { + E_INTERNAL_ERROR = 0; + E_REQUEST_REJECTED = 100; + E_DIAL_REFUSED = 101; + OK = 200; + } + + ResponseStatus status = 1; + uint32 addrIdx = 2; + DialStatus dialStatus = 3; +} + + +message DialDataResponse { + bytes data = 1; +} + + +message DialBack { + fixed64 nonce = 1; +} + + +message DialBackResponse { + enum DialBackStatus { + OK = 0; + } + + DialBackStatus status = 1; +} \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/server.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/server.go new file mode 100644 index 00000000000..c5dd2dce3eb --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/server.go @@ -0,0 +1,509 @@ +package autonatv2 + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + pool "github.com/libp2p/go-buffer-pool" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb" + "github.com/libp2p/go-msgio/pbio" + + "math/rand" + + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +var ( + errResourceLimitExceeded = errors.New("resource limit exceeded") + errBadRequest = errors.New("bad request") + errDialDataRefused = errors.New("dial data refused") +) + +type dataRequestPolicyFunc = func(s network.Stream, dialAddr ma.Multiaddr) bool + +type EventDialRequestCompleted struct { + Error error + ResponseStatus pb.DialResponse_ResponseStatus + DialStatus pb.DialStatus + DialDataRequired bool + DialedAddr ma.Multiaddr +} + +// server implements the AutoNATv2 server. +// It can ask client to provide dial data before attempting the requested dial. +// It rate limits requests on a global level, per peer level and on whether the request requires dial data. +type server struct { + host host.Host + dialerHost host.Host + limiter *rateLimiter + + // dialDataRequestPolicy is used to determine whether dialing the address requires receiving + // dial data. It is set to amplification attack prevention by default. + dialDataRequestPolicy dataRequestPolicyFunc + amplificatonAttackPreventionDialWait time.Duration + metricsTracer MetricsTracer + + // for tests + now func() time.Time + allowPrivateAddrs bool +} + +func newServer(host, dialer host.Host, s *autoNATSettings) *server { + return &server{ + dialerHost: dialer, + host: host, + dialDataRequestPolicy: s.dataRequestPolicy, + amplificatonAttackPreventionDialWait: s.amplificatonAttackPreventionDialWait, + allowPrivateAddrs: s.allowPrivateAddrs, + limiter: &rateLimiter{ + RPM: s.serverRPM, + PerPeerRPM: s.serverPerPeerRPM, + DialDataRPM: s.serverDialDataRPM, + now: s.now, + }, + now: s.now, + metricsTracer: s.metricsTracer, + } +} + +// Enable attaches the stream handler to the host. +func (as *server) Start() { + as.host.SetStreamHandler(DialProtocol, as.handleDialRequest) +} + +func (as *server) Close() { + as.host.RemoveStreamHandler(DialProtocol) + as.dialerHost.Close() + as.limiter.Close() +} + +// handleDialRequest is the dial-request protocol stream handler +func (as *server) handleDialRequest(s network.Stream) { + evt := as.serveDialRequest(s) + log.Debugf("completed dial-request from %s, response status: %s, dial status: %s, err: %s", + s.Conn().RemotePeer(), evt.ResponseStatus, evt.DialStatus, evt.Error) + if as.metricsTracer != nil { + as.metricsTracer.CompletedRequest(evt) + } +} + +func (as *server) serveDialRequest(s network.Stream) EventDialRequestCompleted { + if err := s.Scope().SetService(ServiceName); err != nil { + s.Reset() + log.Debugf("failed to attach stream to %s service: %w", ServiceName, err) + return EventDialRequestCompleted{ + Error: errors.New("failed to attach stream to autonat-v2"), + } + } + + if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil { + s.Reset() + log.Debugf("failed to reserve memory for stream %s: %w", DialProtocol, err) + return EventDialRequestCompleted{Error: errResourceLimitExceeded} + } + defer s.Scope().ReleaseMemory(maxMsgSize) + + deadline := as.now().Add(streamTimeout) + ctx, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + s.SetDeadline(as.now().Add(streamTimeout)) + defer s.Close() + + p := s.Conn().RemotePeer() + + var msg pb.Message + w := pbio.NewDelimitedWriter(s) + // Check for rate limit before parsing the request + if !as.limiter.Accept(p) { + msg = pb.Message{ + Msg: &pb.Message_DialResponse{ + DialResponse: &pb.DialResponse{ + Status: pb.DialResponse_E_REQUEST_REJECTED, + }, + }, + } + if err := w.WriteMsg(&msg); err != nil { + s.Reset() + log.Debugf("failed to write request rejected response to %s: %s", p, err) + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED, + Error: fmt.Errorf("write failed: %w", err), + } + } + log.Debugf("rejected request from %s: rate limit exceeded", p) + return EventDialRequestCompleted{ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED} + } + defer as.limiter.CompleteRequest(p) + + r := pbio.NewDelimitedReader(s, maxMsgSize) + if err := r.ReadMsg(&msg); err != nil { + s.Reset() + log.Debugf("failed to read request from %s: %s", p, err) + return EventDialRequestCompleted{Error: fmt.Errorf("read failed: %w", err)} + } + if msg.GetDialRequest() == nil { + s.Reset() + log.Debugf("invalid message type from %s: %T expected: DialRequest", p, msg.Msg) + return EventDialRequestCompleted{Error: errBadRequest} + } + + // parse peer's addresses + var dialAddr ma.Multiaddr + var addrIdx int + for i, ab := range msg.GetDialRequest().GetAddrs() { + if i >= maxPeerAddresses { + break + } + a, err := ma.NewMultiaddrBytes(ab) + if err != nil { + continue + } + if !as.allowPrivateAddrs && !manet.IsPublicAddr(a) { + continue + } + if !as.dialerHost.Network().CanDial(p, a) { + continue + } + dialAddr = a + addrIdx = i + break + } + // No dialable address + if dialAddr == nil { + msg = pb.Message{ + Msg: &pb.Message_DialResponse{ + DialResponse: &pb.DialResponse{ + Status: pb.DialResponse_E_DIAL_REFUSED, + }, + }, + } + if err := w.WriteMsg(&msg); err != nil { + s.Reset() + log.Debugf("failed to write dial refused response to %s: %s", p, err) + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_E_DIAL_REFUSED, + Error: fmt.Errorf("write failed: %w", err), + } + } + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_E_DIAL_REFUSED, + } + } + + nonce := msg.GetDialRequest().Nonce + + isDialDataRequired := as.dialDataRequestPolicy(s, dialAddr) + if isDialDataRequired && !as.limiter.AcceptDialDataRequest(p) { + msg = pb.Message{ + Msg: &pb.Message_DialResponse{ + DialResponse: &pb.DialResponse{ + Status: pb.DialResponse_E_REQUEST_REJECTED, + }, + }, + } + if err := w.WriteMsg(&msg); err != nil { + s.Reset() + log.Debugf("failed to write request rejected response to %s: %s", p, err) + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED, + Error: fmt.Errorf("write failed: %w", err), + DialDataRequired: true, + } + } + log.Debugf("rejected request from %s: rate limit exceeded", p) + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED, + DialDataRequired: true, + } + } + + if isDialDataRequired { + if err := getDialData(w, s, &msg, addrIdx); err != nil { + s.Reset() + log.Debugf("%s refused dial data request: %s", p, err) + return EventDialRequestCompleted{ + Error: errDialDataRefused, + DialDataRequired: true, + DialedAddr: dialAddr, + } + } + // wait for a bit to prevent thundering herd style attacks on a victim + waitTime := time.Duration(rand.Intn(int(as.amplificatonAttackPreventionDialWait) + 1)) // the range is [0, n) + t := time.NewTimer(waitTime) + defer t.Stop() + select { + case <-ctx.Done(): + s.Reset() + log.Debugf("rejecting request without dialing: %s %p ", p, ctx.Err()) + return EventDialRequestCompleted{Error: ctx.Err(), DialDataRequired: true, DialedAddr: dialAddr} + case <-t.C: + } + } + + dialStatus := as.dialBack(ctx, s.Conn().RemotePeer(), dialAddr, nonce) + msg = pb.Message{ + Msg: &pb.Message_DialResponse{ + DialResponse: &pb.DialResponse{ + Status: pb.DialResponse_OK, + DialStatus: dialStatus, + AddrIdx: uint32(addrIdx), + }, + }, + } + if err := w.WriteMsg(&msg); err != nil { + s.Reset() + log.Debugf("failed to write response to %s: %s", p, err) + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_OK, + DialStatus: dialStatus, + Error: fmt.Errorf("write failed: %w", err), + DialDataRequired: isDialDataRequired, + DialedAddr: dialAddr, + } + } + return EventDialRequestCompleted{ + ResponseStatus: pb.DialResponse_OK, + DialStatus: dialStatus, + Error: nil, + DialDataRequired: isDialDataRequired, + DialedAddr: dialAddr, + } +} + +// getDialData gets data from the client for dialing the address +func getDialData(w pbio.Writer, s network.Stream, msg *pb.Message, addrIdx int) error { + numBytes := minHandshakeSizeBytes + rand.Intn(maxHandshakeSizeBytes-minHandshakeSizeBytes) + *msg = pb.Message{ + Msg: &pb.Message_DialDataRequest{ + DialDataRequest: &pb.DialDataRequest{ + AddrIdx: uint32(addrIdx), + NumBytes: uint64(numBytes), + }, + }, + } + if err := w.WriteMsg(msg); err != nil { + return fmt.Errorf("dial data write: %w", err) + } + // pbio.Reader that we used so far on this stream is buffered. But at this point + // there is nothing unread on the stream. So it is safe to use the raw stream to + // read, reducing allocations. + return readDialData(numBytes, s) +} + +func readDialData(numBytes int, r io.Reader) error { + mr := &msgReader{R: r, Buf: pool.Get(maxMsgSize)} + defer pool.Put(mr.Buf) + for remain := numBytes; remain > 0; { + msg, err := mr.ReadMsg() + if err != nil { + return fmt.Errorf("dial data read: %w", err) + } + // protobuf format is: + // (oneof dialDataResponse:)(dial data:) + bytesLen := len(msg) + bytesLen -= 2 // fieldTag + varint first byte + if bytesLen > 127 { + bytesLen -= 1 // varint second byte + } + bytesLen -= 2 // second fieldTag + varint first byte + if bytesLen > 127 { + bytesLen -= 1 // varint second byte + } + if bytesLen > 0 { + remain -= bytesLen + } + // Check if the peer is not sending too little data forcing us to just do a lot of compute + if bytesLen < 100 && remain > 0 { + return fmt.Errorf("dial data msg too small: %d", bytesLen) + } + } + return nil +} + +func (as *server) dialBack(ctx context.Context, p peer.ID, addr ma.Multiaddr, nonce uint64) pb.DialStatus { + ctx, cancel := context.WithTimeout(ctx, dialBackDialTimeout) + ctx = network.WithForceDirectDial(ctx, "autonatv2") + as.dialerHost.Peerstore().AddAddr(p, addr, peerstore.TempAddrTTL) + defer func() { + cancel() + as.dialerHost.Network().ClosePeer(p) + as.dialerHost.Peerstore().ClearAddrs(p) + as.dialerHost.Peerstore().RemovePeer(p) + }() + + err := as.dialerHost.Connect(ctx, peer.AddrInfo{ID: p}) + if err != nil { + return pb.DialStatus_E_DIAL_ERROR + } + + s, err := as.dialerHost.NewStream(ctx, p, DialBackProtocol) + if err != nil { + return pb.DialStatus_E_DIAL_BACK_ERROR + } + + defer s.Close() + s.SetDeadline(as.now().Add(dialBackStreamTimeout)) + + w := pbio.NewDelimitedWriter(s) + if err := w.WriteMsg(&pb.DialBack{Nonce: nonce}); err != nil { + s.Reset() + return pb.DialStatus_E_DIAL_BACK_ERROR + } + + // Since the underlying connection is on a separate dialer, it'll be closed after this + // function returns. Connection close will drop all the queued writes. To ensure message + // delivery, do a CloseWrite and read a byte from the stream. The peer actually sends a + // response of type DialBackResponse but we only care about the fact that the DialBack + // message has reached the peer. So we ignore that message on the read side. + s.CloseWrite() + s.SetDeadline(as.now().Add(5 * time.Second)) // 5 is a magic number + b := make([]byte, 1) // Read 1 byte here because 0 len reads are free to return (0, nil) immediately + s.Read(b) + + return pb.DialStatus_OK +} + +// rateLimiter implements a sliding window rate limit of requests per minute. It allows 1 concurrent request +// per peer. It rate limits requests globally, at a peer level and depending on whether it requires dial data. +type rateLimiter struct { + // PerPeerRPM is the rate limit per peer + PerPeerRPM int + // RPM is the global rate limit + RPM int + // DialDataRPM is the rate limit for requests that require dial data + DialDataRPM int + + mu sync.Mutex + closed bool + reqs []entry + peerReqs map[peer.ID][]time.Time + dialDataReqs []time.Time + // ongoingReqs tracks in progress requests. This is used to disallow multiple concurrent requests by the + // same peer + // TODO: Should we allow a few concurrent requests per peer? + ongoingReqs map[peer.ID]struct{} + + now func() time.Time // for tests +} + +type entry struct { + PeerID peer.ID + Time time.Time +} + +func (r *rateLimiter) Accept(p peer.ID) bool { + r.mu.Lock() + defer r.mu.Unlock() + if r.closed { + return false + } + if r.peerReqs == nil { + r.peerReqs = make(map[peer.ID][]time.Time) + r.ongoingReqs = make(map[peer.ID]struct{}) + } + + nw := r.now() + r.cleanup(nw) + + if _, ok := r.ongoingReqs[p]; ok { + return false + } + if len(r.reqs) >= r.RPM || len(r.peerReqs[p]) >= r.PerPeerRPM { + return false + } + + r.ongoingReqs[p] = struct{}{} + r.reqs = append(r.reqs, entry{PeerID: p, Time: nw}) + r.peerReqs[p] = append(r.peerReqs[p], nw) + return true +} + +func (r *rateLimiter) AcceptDialDataRequest(p peer.ID) bool { + r.mu.Lock() + defer r.mu.Unlock() + if r.closed { + return false + } + if r.peerReqs == nil { + r.peerReqs = make(map[peer.ID][]time.Time) + r.ongoingReqs = make(map[peer.ID]struct{}) + } + nw := r.now() + r.cleanup(nw) + if len(r.dialDataReqs) >= r.DialDataRPM { + return false + } + r.dialDataReqs = append(r.dialDataReqs, nw) + return true +} + +// cleanup removes stale requests. +// +// This is fast enough in rate limited cases and the state is small enough to +// clean up quickly when blocking requests. +func (r *rateLimiter) cleanup(now time.Time) { + idx := len(r.reqs) + for i, e := range r.reqs { + if now.Sub(e.Time) >= time.Minute { + pi := len(r.peerReqs[e.PeerID]) + for j, t := range r.peerReqs[e.PeerID] { + if now.Sub(t) < time.Minute { + pi = j + break + } + } + r.peerReqs[e.PeerID] = r.peerReqs[e.PeerID][pi:] + if len(r.peerReqs[e.PeerID]) == 0 { + delete(r.peerReqs, e.PeerID) + } + } else { + idx = i + break + } + } + r.reqs = r.reqs[idx:] + + idx = len(r.dialDataReqs) + for i, t := range r.dialDataReqs { + if now.Sub(t) < time.Minute { + idx = i + break + } + } + r.dialDataReqs = r.dialDataReqs[idx:] +} + +func (r *rateLimiter) CompleteRequest(p peer.ID) { + r.mu.Lock() + defer r.mu.Unlock() + delete(r.ongoingReqs, p) +} + +func (r *rateLimiter) Close() { + r.mu.Lock() + defer r.mu.Unlock() + r.closed = true + r.peerReqs = nil + r.ongoingReqs = nil + r.dialDataReqs = nil +} + +// amplificationAttackPrevention is a dialDataRequestPolicy which requests data when the peer's observed +// IP address is different from the dial back IP address +func amplificationAttackPrevention(s network.Stream, dialAddr ma.Multiaddr) bool { + connIP, err := manet.ToIP(s.Conn().RemoteMultiaddr()) + if err != nil { + return true + } + dialIP, _ := manet.ToIP(s.Conn().LocalMultiaddr()) // must be an IP multiaddr + return !connIP.Equal(dialIP) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go index d4d285a300b..afe93577ca0 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/circuit.proto package pb @@ -606,7 +606,7 @@ func file_pb_circuit_proto_rawDescGZIP() []byte { var file_pb_circuit_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_pb_circuit_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_pb_circuit_proto_goTypes = []interface{}{ +var file_pb_circuit_proto_goTypes = []any{ (Status)(0), // 0: circuit.pb.Status (HopMessage_Type)(0), // 1: circuit.pb.HopMessage.Type (StopMessage_Type)(0), // 2: circuit.pb.StopMessage.Type @@ -639,7 +639,7 @@ func file_pb_circuit_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_circuit_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_circuit_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HopMessage); i { case 0: return &v.state @@ -651,7 +651,7 @@ func file_pb_circuit_proto_init() { return nil } } - file_pb_circuit_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pb_circuit_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*StopMessage); i { case 0: return &v.state @@ -663,7 +663,7 @@ func file_pb_circuit_proto_init() { return nil } } - file_pb_circuit_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_pb_circuit_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Peer); i { case 0: return &v.state @@ -675,7 +675,7 @@ func file_pb_circuit_proto_init() { return nil } } - file_pb_circuit_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_pb_circuit_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Reservation); i { case 0: return &v.state @@ -687,7 +687,7 @@ func file_pb_circuit_proto_init() { return nil } } - file_pb_circuit_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_pb_circuit_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Limit); i { case 0: return &v.state @@ -700,11 +700,11 @@ func file_pb_circuit_proto_init() { } } } - file_pb_circuit_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_pb_circuit_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_pb_circuit_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_pb_circuit_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_pb_circuit_proto_msgTypes[4].OneofWrappers = []interface{}{} + file_pb_circuit_proto_msgTypes[0].OneofWrappers = []any{} + file_pb_circuit_proto_msgTypes[1].OneofWrappers = []any{} + file_pb_circuit_proto_msgTypes[2].OneofWrappers = []any{} + file_pb_circuit_proto_msgTypes[3].OneofWrappers = []any{} + file_pb_circuit_proto_msgTypes[4].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go index 917b5370223..6b02cdac640 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/voucher.proto package pb @@ -115,7 +115,7 @@ func file_pb_voucher_proto_rawDescGZIP() []byte { } var file_pb_voucher_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pb_voucher_proto_goTypes = []interface{}{ +var file_pb_voucher_proto_goTypes = []any{ (*ReservationVoucher)(nil), // 0: circuit.pb.ReservationVoucher } var file_pb_voucher_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_pb_voucher_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_voucher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_voucher_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*ReservationVoucher); i { case 0: return &v.state @@ -145,7 +145,7 @@ func file_pb_voucher_proto_init() { } } } - file_pb_voucher_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_pb_voucher_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go index ca568580c8c..8478ab692b0 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/holepunch.proto package pb @@ -161,7 +161,7 @@ func file_pb_holepunch_proto_rawDescGZIP() []byte { var file_pb_holepunch_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_pb_holepunch_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pb_holepunch_proto_goTypes = []interface{}{ +var file_pb_holepunch_proto_goTypes = []any{ (HolePunch_Type)(0), // 0: holepunch.pb.HolePunch.Type (*HolePunch)(nil), // 1: holepunch.pb.HolePunch } @@ -180,7 +180,7 @@ func file_pb_holepunch_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_holepunch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_holepunch_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HolePunch); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go index fc1c100c8e4..ffe60345e11 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go @@ -213,6 +213,48 @@ func (o *ObservedAddrManager) AddrsFor(addr ma.Multiaddr) (addrs []ma.Multiaddr) return res } +// appendInferredAddrs infers the external address of other transports that +// share the local thin waist with a transport that we have do observations for. +// +// e.g. If we have observations for a QUIC address on port 9000, and we are +// listening on the same interface and port 9000 for WebTransport, we can infer +// the external WebTransport address. +func (o *ObservedAddrManager) appendInferredAddrs(twToObserverSets map[string][]*observerSet, addrs []ma.Multiaddr) []ma.Multiaddr { + if twToObserverSets == nil { + twToObserverSets = make(map[string][]*observerSet) + for localTWStr := range o.externalAddrs { + twToObserverSets[localTWStr] = append(twToObserverSets[localTWStr], o.getTopExternalAddrs(localTWStr)...) + } + } + lAddrs, err := o.interfaceListenAddrs() + if err != nil { + log.Warnw("failed to get interface resolved listen addrs. Using just the listen addrs", "error", err) + lAddrs = nil + } + lAddrs = append(lAddrs, o.listenAddrs()...) + seenTWs := make(map[string]struct{}) + for _, a := range lAddrs { + if _, ok := o.localAddrs[string(a.Bytes())]; ok { + // We already have this address in the list + continue + } + if _, ok := seenTWs[string(a.Bytes())]; ok { + // We've already added this + continue + } + seenTWs[string(a.Bytes())] = struct{}{} + a = o.normalize(a) + t, err := thinWaistForm(a) + if err != nil { + continue + } + for _, s := range twToObserverSets[string(t.TW.Bytes())] { + addrs = append(addrs, s.cacheMultiaddr(t.Rest)) + } + } + return addrs +} + // Addrs return all activated observed addresses func (o *ObservedAddrManager) Addrs() []ma.Multiaddr { o.mu.RLock() @@ -228,6 +270,8 @@ func (o *ObservedAddrManager) Addrs() []ma.Multiaddr { addrs = append(addrs, s.cacheMultiaddr(t.Rest)) } } + + addrs = o.appendInferredAddrs(m, addrs) return addrs } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go index 1c93815d45a..078a7e17af5 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/identify.proto package pb @@ -168,7 +168,7 @@ func file_pb_identify_proto_rawDescGZIP() []byte { } var file_pb_identify_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pb_identify_proto_goTypes = []interface{}{ +var file_pb_identify_proto_goTypes = []any{ (*Identify)(nil), // 0: identify.pb.Identify } var file_pb_identify_proto_depIdxs = []int32{ @@ -185,7 +185,7 @@ func file_pb_identify_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_identify_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_identify_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identify); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go index 9a677155932..a846f40499c 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/ping/ping.go @@ -20,8 +20,9 @@ import ( var log = logging.Logger("ping") const ( - PingSize = 32 - pingTimeout = time.Second * 60 + PingSize = 32 + pingTimeout = 10 * time.Second + pingDuration = 30 * time.Second ID = "/ipfs/ping/1.0.0" @@ -52,6 +53,8 @@ func (p *PingService) PingHandler(s network.Stream) { } defer s.Scope().ReleaseMemory(PingSize) + s.SetDeadline(time.Now().Add(pingDuration)) + buf := pool.Get(PingSize) defer pool.Put(buf) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go index 8e3a805a583..dc980dd1e04 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: pb/payload.proto package pb @@ -174,7 +174,7 @@ func file_pb_payload_proto_rawDescGZIP() []byte { } var file_pb_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_pb_payload_proto_goTypes = []interface{}{ +var file_pb_payload_proto_goTypes = []any{ (*NoiseExtensions)(nil), // 0: pb.NoiseExtensions (*NoiseHandshakePayload)(nil), // 1: pb.NoiseHandshakePayload } @@ -193,7 +193,7 @@ func file_pb_payload_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_pb_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_pb_payload_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*NoiseExtensions); i { case 0: return &v.state @@ -205,7 +205,7 @@ func file_pb_payload_proto_init() { return nil } } - file_pb_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_pb_payload_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*NoiseHandshakePayload); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go index 18d198bbea6..04b5e4d6fe8 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go @@ -26,6 +26,8 @@ import ( "github.com/quic-go/quic-go" ) +const ListenOrder = 1 + var log = logging.Logger("quic-transport") var ErrHolePunching = errors.New("hole punching attempted; no active dial") @@ -103,6 +105,10 @@ func NewTransport(key ic.PrivKey, connManager *quicreuse.ConnManager, psk pnet.P }, nil } +func (t *transport) ListenOrder() int { + return ListenOrder +} + // Dial dials a new QUIC connection func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (_c tpt.CapableConn, _err error) { if ok, isClient, _ := network.GetSimultaneousConnect(ctx); ok && !isClient { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go index ec2b7b280e4..01e1bcda5ef 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go @@ -9,15 +9,19 @@ import ( ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + "github.com/prometheus/client_golang/prometheus" "github.com/quic-go/quic-go" quiclogging "github.com/quic-go/quic-go/logging" + quicmetrics "github.com/quic-go/quic-go/metrics" ) type ConnManager struct { reuseUDP4 *reuse reuseUDP6 *reuse enableReuseport bool - enableMetrics bool + + enableMetrics bool + registerer prometheus.Registerer serverConfig *quic.Config clientConfig *quic.Config @@ -40,6 +44,7 @@ func NewConnManager(statelessResetKey quic.StatelessResetKey, tokenKey quic.Toke quicListeners: make(map[string]quicListenerEntry), srk: statelessResetKey, tokenKey: tokenKey, + registerer: prometheus.DefaultRegisterer, } for _, o := range opts { if err := o(cm); err != nil { @@ -48,14 +53,7 @@ func NewConnManager(statelessResetKey quic.StatelessResetKey, tokenKey quic.Toke } quicConf := quicConfig.Clone() - - quicConf.Tracer = func(ctx context.Context, p quiclogging.Perspective, ci quic.ConnectionID) *quiclogging.ConnectionTracer { - var tracer *quiclogging.ConnectionTracer - if qlogTracerDir != "" { - tracer = qloggerForDir(qlogTracerDir, p, ci) - } - return tracer - } + quicConf.Tracer = cm.getTracer() serverConfig := quicConf.Clone() cm.clientConfig = quicConf @@ -67,6 +65,31 @@ func NewConnManager(statelessResetKey quic.StatelessResetKey, tokenKey quic.Toke return cm, nil } +func (c *ConnManager) getTracer() func(context.Context, quiclogging.Perspective, quic.ConnectionID) *quiclogging.ConnectionTracer { + return func(ctx context.Context, p quiclogging.Perspective, ci quic.ConnectionID) *quiclogging.ConnectionTracer { + var promTracer *quiclogging.ConnectionTracer + if c.enableMetrics { + switch p { + case quiclogging.PerspectiveClient: + promTracer = quicmetrics.NewClientConnectionTracerWithRegisterer(c.registerer) + case quiclogging.PerspectiveServer: + promTracer = quicmetrics.NewServerConnectionTracerWithRegisterer(c.registerer) + default: + log.Error("invalid logging perspective: %s", p) + } + } + var tracer *quiclogging.ConnectionTracer + if qlogTracerDir != "" { + tracer = qloggerForDir(qlogTracerDir, p, ci) + if promTracer != nil { + tracer = quiclogging.NewMultiplexedConnectionTracer(promTracer, + tracer) + } + } + return tracer + } +} + func (c *ConnManager) getReuse(network string) (*reuse, error) { switch network { case "udp4": @@ -131,6 +154,28 @@ func (c *ConnManager) onListenerClosed(key string) { } } +func (c *ConnManager) SharedNonQUICPacketConn(network string, laddr *net.UDPAddr) (net.PacketConn, error) { + c.quicListenersMu.Lock() + defer c.quicListenersMu.Unlock() + key := laddr.String() + entry, ok := c.quicListeners[key] + if !ok { + return nil, errors.New("expected to be able to share with a QUIC listener, but no QUIC listener found. The QUIC listener should start first") + } + t := entry.ln.transport + if t, ok := t.(*refcountedTransport); ok { + t.IncreaseCount() + ctx, cancel := context.WithCancel(context.Background()) + return &nonQUICPacketConn{ + ctx: ctx, + ctxCancel: cancel, + owningTransport: t, + tr: &t.Transport, + }, nil + } + return nil, errors.New("expected to be able to share with a QUIC listener, but the QUIC listener is not using a refcountedTransport. `DisableReuseport` should not be set") +} + func (c *ConnManager) transportForListen(network string, laddr *net.UDPAddr) (refCountedQuicTransport, error) { if c.enableReuseport { reuse, err := c.getReuse(network) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/nonquic_packetconn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/nonquic_packetconn.go new file mode 100644 index 00000000000..2f950e76a13 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/nonquic_packetconn.go @@ -0,0 +1,74 @@ +package quicreuse + +import ( + "context" + "net" + "time" + + "github.com/quic-go/quic-go" +) + +// nonQUICPacketConn is a net.PacketConn that can be used to read and write +// non-QUIC packets on a quic.Transport. This lets us reuse this UDP port for +// other transports like WebRTC. +type nonQUICPacketConn struct { + owningTransport refCountedQuicTransport + tr *quic.Transport + ctx context.Context + ctxCancel context.CancelFunc + readCtx context.Context + readCancel context.CancelFunc +} + +// Close implements net.PacketConn. +func (n *nonQUICPacketConn) Close() error { + n.ctxCancel() + + // Don't actually close the underlying transport since someone else might be using it. + // reuse has it's own gc to close unused transports. + n.owningTransport.DecreaseCount() + return nil +} + +// LocalAddr implements net.PacketConn. +func (n *nonQUICPacketConn) LocalAddr() net.Addr { + return n.tr.Conn.LocalAddr() +} + +// ReadFrom implements net.PacketConn. +func (n *nonQUICPacketConn) ReadFrom(p []byte) (int, net.Addr, error) { + ctx := n.readCtx + if ctx == nil { + ctx = n.ctx + } + return n.tr.ReadNonQUICPacket(ctx, p) +} + +// SetDeadline implements net.PacketConn. +func (n *nonQUICPacketConn) SetDeadline(t time.Time) error { + // Only used for reads. + return n.SetReadDeadline(t) +} + +// SetReadDeadline implements net.PacketConn. +func (n *nonQUICPacketConn) SetReadDeadline(t time.Time) error { + if t.IsZero() && n.readCtx != nil { + n.readCancel() + n.readCtx = nil + } + n.readCtx, n.readCancel = context.WithDeadline(n.ctx, t) + return nil +} + +// SetWriteDeadline implements net.PacketConn. +func (n *nonQUICPacketConn) SetWriteDeadline(t time.Time) error { + // Unused. quic-go doesn't support deadlines for writes. + return nil +} + +// WriteTo implements net.PacketConn. +func (n *nonQUICPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) { + return n.tr.WriteTo(p, addr) +} + +var _ net.PacketConn = &nonQUICPacketConn{} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go index c3250692099..e87574df76a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/options.go @@ -1,5 +1,7 @@ package quicreuse +import "github.com/prometheus/client_golang/prometheus" + type Option func(*ConnManager) error func DisableReuseport() Option { @@ -9,10 +11,14 @@ func DisableReuseport() Option { } } -// EnableMetrics enables Prometheus metrics collection. -func EnableMetrics() Option { +// EnableMetrics enables Prometheus metrics collection. If reg is nil, +// prometheus.DefaultRegisterer will be used as the registerer. +func EnableMetrics(reg prometheus.Registerer) Option { return func(m *ConnManager) error { m.enableMetrics = true + if reg != nil { + m.registerer = reg + } return nil } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/connection.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/connection.go index cd28ec32035..2fba37a970e 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/connection.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/connection.go @@ -16,6 +16,7 @@ import ( ma "github.com/multiformats/go-multiaddr" "github.com/pion/datachannel" + "github.com/pion/sctp" "github.com/pion/webrtc/v3" ) @@ -31,6 +32,8 @@ func (errConnectionTimeout) Error() string { return "connection timeout" } func (errConnectionTimeout) Timeout() bool { return true } func (errConnectionTimeout) Temporary() bool { return false } +var errConnClosed = errors.New("connection closed") + type dataChannel struct { stream datachannel.ReadWriteCloser channel *webrtc.DataChannel @@ -74,6 +77,7 @@ func newConnection( remoteKey ic.PubKey, remoteMultiaddr ma.Multiaddr, incomingDataChannels chan dataChannel, + peerConnectionClosedCh chan struct{}, ) (*connection, error) { ctx, cancel := context.WithCancel(context.Background()) c := &connection{ @@ -102,6 +106,18 @@ func newConnection( } pc.OnConnectionStateChange(c.onConnectionStateChange) + pc.SCTP().OnClose(func(err error) { + if err != nil { + c.closeWithError(fmt.Errorf("%w: %w", errConnClosed, err)) + } + c.closeWithError(errConnClosed) + }) + select { + case <-peerConnectionClosedCh: + c.Close() + return nil, errConnClosed + default: + } return c, nil } @@ -112,27 +128,29 @@ func (c *connection) ConnState() network.ConnectionState { // Close closes the underlying peerconnection. func (c *connection) Close() error { - c.closeOnce.Do(func() { c.closeWithError(errors.New("connection closed")) }) + c.closeWithError(errConnClosed) return nil } // closeWithError is used to Close the connection when the underlying DTLS connection fails func (c *connection) closeWithError(err error) { - c.closeErr = err - // cancel must be called after closeErr is set. This ensures interested goroutines waiting on - // ctx.Done can read closeErr without holding the conn lock. - c.cancel() - // closing peerconnection will close the datachannels associated with the streams - c.pc.Close() - - c.m.Lock() - streams := c.streams - c.streams = nil - c.m.Unlock() - for _, s := range streams { - s.closeForShutdown(err) - } - c.scope.Done() + c.closeOnce.Do(func() { + c.closeErr = err + // cancel must be called after closeErr is set. This ensures interested goroutines waiting on + // ctx.Done can read closeErr without holding the conn lock. + c.cancel() + // closing peerconnection will close the datachannels associated with the streams + c.pc.Close() + + c.m.Lock() + streams := c.streams + c.streams = nil + c.m.Unlock() + for _, s := range streams { + s.closeForShutdown(err) + } + c.scope.Done() + }) } func (c *connection) IsClosed() bool { @@ -155,6 +173,12 @@ func (c *connection) OpenStream(ctx context.Context) (network.MuxedStream, error } rwc, err := c.detachChannel(ctx, dc) if err != nil { + // There's a race between webrtc.SCTP.OnClose callback and the underlying + // association closing. It's nicer to close the connection here. + if errors.Is(err, sctp.ErrStreamClosed) { + c.closeWithError(errConnClosed) + return nil, c.closeErr + } dc.Close() return nil, fmt.Errorf("detach channel failed for stream(%d): %w", streamID, err) } @@ -209,9 +233,7 @@ func (c *connection) removeStream(id uint16) { func (c *connection) onConnectionStateChange(state webrtc.PeerConnectionState) { if state == webrtc.PeerConnectionStateFailed || state == webrtc.PeerConnectionStateClosed { - c.closeOnce.Do(func() { - c.closeWithError(errConnectionTimeout{}) - }) + c.closeWithError(errConnectionTimeout{}) } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/listener.go index 3f465b34fcc..96174f3457b 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/listener.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/listener.go @@ -276,6 +276,7 @@ func (l *listener) setupConnection( remotePubKey, remoteMultiaddr, w.IncomingDataChannels, + w.PeerConnectionClosedCh, ) if err != nil { return nil, err diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/logger.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/logger.go index ac9fab7672f..ebe5fa309f5 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/logger.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/logger.go @@ -12,6 +12,8 @@ var pionLog = logging.Logger("webrtc-transport-pion") // pionLogger wraps the StandardLogger interface to provide a LeveledLogger interface // as expected by pion +// Pion logs are too noisy and have invalid log levels. pionLogger downgrades all the +// logs to debug type pionLogger struct { logging.StandardLogger } @@ -25,20 +27,32 @@ func (l pionLogger) Debug(s string) { } func (l pionLogger) Error(s string) { - l.StandardLogger.Error(s) + l.StandardLogger.Debug(s) +} + +func (l pionLogger) Errorf(s string, args ...interface{}) { + l.StandardLogger.Debugf(s, args...) } func (l pionLogger) Info(s string) { - l.StandardLogger.Info(s) + l.StandardLogger.Debug(s) +} + +func (l pionLogger) Infof(s string, args ...interface{}) { + l.StandardLogger.Debugf(s, args...) } + func (l pionLogger) Warn(s string) { - l.StandardLogger.Warn(s) + l.StandardLogger.Debug(s) +} + +func (l pionLogger) Warnf(s string, args ...interface{}) { + l.StandardLogger.Debugf(s, args...) } func (l pionLogger) Trace(s string) { l.StandardLogger.Debug(s) } - func (l pionLogger) Tracef(s string, args ...interface{}) { l.StandardLogger.Debugf(s, args...) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb/message.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb/message.pb.go index 384bddd2891..44e787cdb7a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb/message.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb/message.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.34.2 +// protoc v5.27.2 // source: message.proto package pb @@ -177,7 +177,7 @@ func file_message_proto_rawDescGZIP() []byte { var file_message_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_message_proto_goTypes = []interface{}{ +var file_message_proto_goTypes = []any{ (Message_Flag)(0), // 0: Message.Flag (*Message)(nil), // 1: Message } @@ -196,7 +196,7 @@ func file_message_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_message_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/transport.go index 68a2988c780..c4c16fd4025 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/transport.go @@ -1,15 +1,5 @@ // Package libp2pwebrtc implements the WebRTC transport for go-libp2p, // as described in https://github.com/libp2p/specs/tree/master/webrtc. -// -// At this point, this package is EXPERIMENTAL, and the WebRTC transport is not enabled by default. -// While we're fairly confident that the implementation correctly implements the specification, -// we're not making any guarantees regarding its security (especially regarding resource exhaustion attacks). -// Fixes, even for security-related issues, will be conducted in the open. -// -// Experimentation is encouraged. Please open an issue if you encounter any problems with this transport. -// -// The udpmux subpackage contains the logic for multiplexing multiple WebRTC (ICE) -// connections over a single UDP socket. package libp2pwebrtc import ( @@ -36,6 +26,7 @@ import ( "github.com/libp2p/go-libp2p/core/sec" tpt "github.com/libp2p/go-libp2p/core/transport" "github.com/libp2p/go-libp2p/p2p/security/noise" + libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic" "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb" "github.com/libp2p/go-msgio" @@ -88,6 +79,8 @@ type WebRTCTransport struct { noiseTpt *noise.Transport localPeerId peer.ID + listenUDP func(network string, laddr *net.UDPAddr) (net.PacketConn, error) + // timeouts peerConnectionTimeouts iceTimeouts @@ -105,7 +98,9 @@ type iceTimeouts struct { Keepalive time.Duration } -func New(privKey ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, opts ...Option) (*WebRTCTransport, error) { +type ListenUDPFn func(network string, laddr *net.UDPAddr) (net.PacketConn, error) + +func New(privKey ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, listenUDP ListenUDPFn, opts ...Option) (*WebRTCTransport, error) { if psk != nil { log.Error("WebRTC doesn't support private networks yet.") return nil, fmt.Errorf("WebRTC doesn't support private networks yet") @@ -151,6 +146,7 @@ func New(privKey ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr noiseTpt: noiseTpt, localPeerId: localPeerID, + listenUDP: listenUDP, peerConnectionTimeouts: iceTimeouts{ Disconnect: DefaultDisconnectedTimeout, Failed: DefaultFailedTimeout, @@ -167,6 +163,10 @@ func New(privKey ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr return transport, nil } +func (t *WebRTCTransport) ListenOrder() int { + return libp2pquic.ListenOrder + 1 // We want to listen after QUIC listens so we can possibly reuse the same port. +} + func (t *WebRTCTransport) Protocols() []int { return []int{ma.P_WEBRTC_DIRECT} } @@ -200,7 +200,7 @@ func (t *WebRTCTransport) Listen(addr ma.Multiaddr) (tpt.Listener, error) { return nil, fmt.Errorf("listener could not resolve udp address: %w", err) } - socket, err := net.ListenUDP(nw, udpAddr) + socket, err := t.listenUDP(nw, udpAddr) if err != nil { return nil, fmt.Errorf("listen on udp: %w", err) } @@ -213,7 +213,7 @@ func (t *WebRTCTransport) Listen(addr ma.Multiaddr) (tpt.Listener, error) { return listener, nil } -func (t *WebRTCTransport) listenSocket(socket *net.UDPConn) (tpt.Listener, error) { +func (t *WebRTCTransport) listenSocket(socket net.PacketConn) (tpt.Listener, error) { listenerMultiaddr, err := manet.FromNetAddr(socket.LocalAddr()) if err != nil { return nil, err @@ -269,6 +269,7 @@ func (t *WebRTCTransport) dial(ctx context.Context, scope network.ConnManagement } if tConn != nil { _ = tConn.Close() + tConn = nil } } }() @@ -399,6 +400,7 @@ func (t *WebRTCTransport) dial(ctx context.Context, scope network.ConnManagement remotePubKey, remoteMultiaddrWithoutCerthash, w.IncomingDataChannels, + w.PeerConnectionClosedCh, ) if err != nil { return nil, err @@ -572,9 +574,10 @@ func detachHandshakeDataChannel(ctx context.Context, dc *webrtc.DataChannel) (da // a small window of time where datachannels created by the peer may not surface to us and cause a // memory leak. type webRTCConnection struct { - PeerConnection *webrtc.PeerConnection - HandshakeDataChannel *webrtc.DataChannel - IncomingDataChannels chan dataChannel + PeerConnection *webrtc.PeerConnection + HandshakeDataChannel *webrtc.DataChannel + IncomingDataChannels chan dataChannel + PeerConnectionClosedCh chan struct{} } func newWebRTCConnection(settings webrtc.SettingEngine, config webrtc.Configuration) (webRTCConnection, error) { @@ -613,10 +616,20 @@ func newWebRTCConnection(settings webrtc.SettingEngine, config webrtc.Configurat } }) }) + + connectionClosedCh := make(chan struct{}, 1) + pc.SCTP().OnClose(func(err error) { + // We only need one message. Closing a connection is a problem as pion might invoke the callback more than once. + select { + case connectionClosedCh <- struct{}{}: + default: + } + }) return webRTCConnection{ - PeerConnection: pc, - HandshakeDataChannel: handshakeDataChannel, - IncomingDataChannels: incomingDataChannels, + PeerConnection: pc, + HandshakeDataChannel: handshakeDataChannel, + IncomingDataChannels: incomingDataChannels, + PeerConnectionClosedCh: connectionClosedCh, }, nil } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/udpmux/mux.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/udpmux/mux.go index 98e60e3f19a..85392c5d56e 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/udpmux/mux.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webrtc/udpmux/mux.go @@ -1,8 +1,11 @@ +// The udpmux package contains the logic for multiplexing multiple WebRTC (ICE) +// connections over a single UDP socket. package udpmux import ( "bytes" "context" + "errors" "fmt" "io" "net" @@ -141,7 +144,7 @@ func (mux *UDPMux) readLoop() { n, addr, err := mux.socket.ReadFrom(buf) if err != nil { - if strings.Contains(err.Error(), "use of closed network connection") { + if strings.Contains(err.Error(), "use of closed network connection") || errors.Is(err, context.Canceled) { log.Debugf("readLoop exiting: socket %s closed", mux.socket.LocalAddr()) } else { log.Errorf("error reading from socket %s: %v", mux.socket.LocalAddr(), err) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go index d7a1b885b89..3ff72830d10 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go @@ -2,10 +2,11 @@ package websocket import ( "crypto/tls" + "errors" "fmt" "net" "net/http" - "strings" + "sync" "github.com/libp2p/go-libp2p/core/transport" @@ -22,8 +23,11 @@ type listener struct { laddr ma.Multiaddr - closed chan struct{} incoming chan *Conn + + closeOnce sync.Once + closeErr error + closed chan struct{} } func (pwma *parsedWebsocketMultiaddr) toMultiaddr() ma.Multiaddr { @@ -122,7 +126,6 @@ func (l *listener) Accept() (manet.Conn, error) { c.Close() return nil, err } - return mnc, nil case <-l.closed: return nil, transport.ErrListenerClosed @@ -134,13 +137,13 @@ func (l *listener) Addr() net.Addr { } func (l *listener) Close() error { - l.server.Close() - err := l.nl.Close() - <-l.closed - if strings.Contains(err.Error(), "use of closed network connection") { - return transport.ErrListenerClosed - } - return err + l.closeOnce.Do(func() { + err1 := l.nl.Close() + err2 := l.server.Close() + <-l.closed + l.closeErr = errors.Join(err1, err2) + }) + return l.closeErr } func (l *listener) Multiaddr() ma.Multiaddr { diff --git a/vendor/github.com/libp2p/go-libp2p/version.json b/vendor/github.com/libp2p/go-libp2p/version.json index 808595258cf..53072426c19 100644 --- a/vendor/github.com/libp2p/go-libp2p/version.json +++ b/vendor/github.com/libp2p/go-libp2p/version.json @@ -1,3 +1,3 @@ { - "version": "v0.35.2" + "version": "v0.36.2" } diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index e57d86afecb..8d5a2a47893 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -83,6 +83,8 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://github.com/egbakou/domainverifier * https://github.com/semihalev/sdns * https://github.com/wintbiit/NineDNS +* https://linuxcontainers.org/incus/ +* https://ifconfig.es Send pull request if you want to be listed here. @@ -146,6 +148,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 3225 - DO bit (DNSSEC OK) * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY +* 3596 - AAAA record * 3597 - Unknown RRs * 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions @@ -186,6 +189,9 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery * 8914 - Extended DNS Errors * 8976 - Message Digest for DNS Zones (ZONEMD RR) +* 9460 - Service Binding and Parameter Specification via the DNS +* 9461 - Service Binding Mapping for DNS Servers +* 9462 - Discovery of Designated Resolvers ## Loosely Based Upon diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index 02d9199a49b..68e766c689c 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -198,10 +198,12 @@ func IsDomainName(s string) (labels int, ok bool) { off int begin int wasDot bool + escape bool ) for i := 0; i < len(s); i++ { switch s[i] { case '\\': + escape = !escape if off+1 > lenmsg { return labels, false } @@ -217,6 +219,7 @@ func IsDomainName(s string) (labels int, ok bool) { wasDot = false case '.': + escape = false if i == 0 && len(s) > 1 { // leading dots are not legal except for the root zone return labels, false @@ -243,10 +246,13 @@ func IsDomainName(s string) (labels int, ok bool) { labels++ begin = i + 1 default: + escape = false wasDot = false } } - + if escape { + return labels, false + } return labels, true } diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 1b58e8f0aa9..c1bbdaae2e9 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -756,36 +756,48 @@ const ( ExtendedErrorCodeNoReachableAuthority ExtendedErrorCodeNetworkError ExtendedErrorCodeInvalidData + ExtendedErrorCodeSignatureExpiredBeforeValid + ExtendedErrorCodeTooEarly + ExtendedErrorCodeUnsupportedNSEC3IterValue + ExtendedErrorCodeUnableToConformToPolicy + ExtendedErrorCodeSynthesized + ExtendedErrorCodeInvalidQueryType ) // ExtendedErrorCodeToString maps extended error info codes to a human readable // description. var ExtendedErrorCodeToString = map[uint16]string{ - ExtendedErrorCodeOther: "Other", - ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", - ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", - ExtendedErrorCodeStaleAnswer: "Stale Answer", - ExtendedErrorCodeForgedAnswer: "Forged Answer", - ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", - ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", - ExtendedErrorCodeSignatureExpired: "Signature Expired", - ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", - ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", - ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", - ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", - ExtendedErrorCodeNSECMissing: "NSEC Missing", - ExtendedErrorCodeCachedError: "Cached Error", - ExtendedErrorCodeNotReady: "Not Ready", - ExtendedErrorCodeBlocked: "Blocked", - ExtendedErrorCodeCensored: "Censored", - ExtendedErrorCodeFiltered: "Filtered", - ExtendedErrorCodeProhibited: "Prohibited", - ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", - ExtendedErrorCodeNotAuthoritative: "Not Authoritative", - ExtendedErrorCodeNotSupported: "Not Supported", - ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", - ExtendedErrorCodeNetworkError: "Network Error", - ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeOther: "Other", + ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", + ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", + ExtendedErrorCodeStaleAnswer: "Stale Answer", + ExtendedErrorCodeForgedAnswer: "Forged Answer", + ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", + ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", + ExtendedErrorCodeSignatureExpired: "Signature Expired", + ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", + ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", + ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", + ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", + ExtendedErrorCodeNSECMissing: "NSEC Missing", + ExtendedErrorCodeCachedError: "Cached Error", + ExtendedErrorCodeNotReady: "Not Ready", + ExtendedErrorCodeBlocked: "Blocked", + ExtendedErrorCodeCensored: "Censored", + ExtendedErrorCodeFiltered: "Filtered", + ExtendedErrorCodeProhibited: "Prohibited", + ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", + ExtendedErrorCodeNotAuthoritative: "Not Authoritative", + ExtendedErrorCodeNotSupported: "Not Supported", + ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", + ExtendedErrorCodeNetworkError: "Network Error", + ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeSignatureExpiredBeforeValid: "Signature Expired Before Valid", + ExtendedErrorCodeTooEarly: "Too Early", + ExtendedErrorCodeUnsupportedNSEC3IterValue: "Unsupported NSEC3 Iterations Value", + ExtendedErrorCodeUnableToConformToPolicy: "Unable To Conform To Policy", + ExtendedErrorCodeSynthesized: "Synthesized", + ExtendedErrorCodeInvalidQueryType: "Invalid Query Type", } // StringToExtendedErrorCode is a map from human readable descriptions to diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 8294d03958a..5fa7f9e8334 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -714,7 +714,7 @@ func (h *MsgHdr) String() string { return s } -// Pack packs a Msg: it is converted to to wire format. +// Pack packs a Msg: it is converted to wire format. // If the dns.Compress is true the message will be in compressed wire format. func (dns *Msg) Pack() (msg []byte, err error) { return dns.PackBuffer(nil) diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 1f92ae42165..e26e8027a40 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -101,12 +101,13 @@ type ttlState struct { isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive } -// NewRR reads the RR contained in the string s. Only the first RR is returned. +// NewRR reads a string s and returns the first RR. // If s contains no records, NewRR will return nil with no error. // -// The class defaults to IN and TTL defaults to 3600. The full zone file syntax -// like $TTL, $ORIGIN, etc. is supported. All fields of the returned RR are -// set, except RR.Header().Rdlength which is set to 0. +// The class defaults to IN, TTL defaults to 3600, and +// origin for resolving relative domain names defaults to the DNS root (.). +// Full zone file syntax is supported, including directives like $TTL and $ORIGIN. +// All fields of the returned RR are set from the read data, except RR.Header().Rdlength which is set to 0. func NewRR(s string) (RR, error) { if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline return ReadRR(strings.NewReader(s+"\n"), "") @@ -1282,7 +1283,7 @@ func stringToCm(token string) (e, m uint8, ok bool) { cmeters *= 10 } } - // This slighly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). + // This slightly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). if !hasCM || mStr != "" { meters, err = strconv.Atoi(mStr) // RFC1876 states the max value is 90000000.00. The latter two conditions enforce it. diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index 1a90c61f8d4..c1a76995e7b 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -51,25 +51,24 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { switch l.value { case zString: empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p := 0 + for { + i, ok := escapedStringOffset(l.token[p:], 255) + if !ok { + return nil, &ParseError{err: errstr, lex: l} } - s = append(s, sx...) - break - } + if i != -1 && p+i != len(l.token) { + sx = append(sx, l.token[p:p+i]) + } else { + sx = append(sx, l.token[p:]) + break - s = append(s, l.token) + } + p += i + } + s = append(s, sx...) case zBlank: if quote { // zBlank can only be seen in between txt parts. @@ -1920,3 +1919,39 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { rr.Prefixes = prefixes return nil } + +// escapedStringOffset finds the offset within a string (which may contain escape +// sequences) that corresponds to a certain byte offset. If the input offset is +// out of bounds, -1 is returned (which is *not* considered an error). +func escapedStringOffset(s string, desiredByteOffset int) (int, bool) { + if desiredByteOffset == 0 { + return 0, true + } + + currentByteOffset, i := 0, 0 + + for i < len(s) { + currentByteOffset += 1 + + // Skip escape sequences + if s[i] != '\\' { + // Single plain byte, not an escape sequence. + i++ + } else if isDDD(s[i+1:]) { + // Skip backslash and DDD. + i += 4 + } else if len(s[i+1:]) < 1 { + // No character following the backslash; that's an error. + return 0, false + } else { + // Skip backslash and following byte. + i += 2 + } + + if currentByteOffset >= desiredByteOffset { + return i, true + } + } + + return -1, true +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 0207d6da225..81580d1e5fd 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -188,6 +188,14 @@ type DecorateReader func(Reader) Reader // Implementations should never return a nil Writer. type DecorateWriter func(Writer) Writer +// MsgInvalidFunc is a listener hook for observing incoming messages that were discarded +// because they could not be parsed. +// Every message that is read by a Reader will eventually be provided to the Handler, +// rejected (or ignored) by the MsgAcceptFunc, or passed to this function. +type MsgInvalidFunc func(m []byte, err error) + +func DefaultMsgInvalidFunc(m []byte, err error) {} + // A Server defines parameters for running an DNS server. type Server struct { // Address to listen on, ":dns" if empty. @@ -233,6 +241,8 @@ type Server struct { // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. MsgAcceptFunc MsgAcceptFunc + // MsgInvalidFunc is optional, will be called if a message is received but cannot be parsed. + MsgInvalidFunc MsgInvalidFunc // Shutdown handling lock sync.RWMutex @@ -277,6 +287,9 @@ func (srv *Server) init() { if srv.MsgAcceptFunc == nil { srv.MsgAcceptFunc = DefaultMsgAcceptFunc } + if srv.MsgInvalidFunc == nil { + srv.MsgInvalidFunc = DefaultMsgInvalidFunc + } if srv.Handler == nil { srv.Handler = DefaultServeMux } @@ -531,6 +544,7 @@ func (srv *Server) serveUDP(l net.PacketConn) error { if cap(m) == srv.UDPSize { srv.udpPool.Put(m[:srv.UDPSize]) } + srv.MsgInvalidFunc(m, ErrShortRead) continue } wg.Add(1) @@ -611,6 +625,7 @@ func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u net.PacketConn func (srv *Server) serveDNS(m []byte, w *response) { dh, off, err := unpackMsgHdr(m, 0) if err != nil { + srv.MsgInvalidFunc(m, err) // Let client hang, they are sending crap; any reply can be used to amplify. return } @@ -620,10 +635,12 @@ func (srv *Server) serveDNS(m []byte, w *response) { switch action := srv.MsgAcceptFunc(dh); action { case MsgAccept: - if req.unpack(dh, m, off) == nil { + err := req.unpack(dh, m, off) + if err == nil { break } + srv.MsgInvalidFunc(m, err) fallthrough case MsgReject, MsgRejectNotImplemented: opcode := req.Opcode diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index c1a740b6840..310c7d11f5a 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -14,7 +14,7 @@ import ( // SVCBKey is the type of the keys used in the SVCB RR. type SVCBKey uint16 -// Keys defined in draft-ietf-dnsop-svcb-https-08 Section 14.3.2. +// Keys defined in rfc9460 const ( SVCB_MANDATORY SVCBKey = iota SVCB_ALPN @@ -23,7 +23,8 @@ const ( SVCB_IPV4HINT SVCB_ECHCONFIG SVCB_IPV6HINT - SVCB_DOHPATH // draft-ietf-add-svcb-dns-02 Section 9 + SVCB_DOHPATH // rfc9461 Section 5 + SVCB_OHTTP // rfc9540 Section 8 svcb_RESERVED SVCBKey = 65535 ) @@ -37,6 +38,7 @@ var svcbKeyToStringMap = map[SVCBKey]string{ SVCB_ECHCONFIG: "ech", SVCB_IPV6HINT: "ipv6hint", SVCB_DOHPATH: "dohpath", + SVCB_OHTTP: "ohttp", } var svcbStringToKeyMap = reverseSVCBKeyMap(svcbKeyToStringMap) @@ -201,6 +203,8 @@ func makeSVCBKeyValue(key SVCBKey) SVCBKeyValue { return new(SVCBIPv6Hint) case SVCB_DOHPATH: return new(SVCBDoHPath) + case SVCB_OHTTP: + return new(SVCBOhttp) case svcb_RESERVED: return nil default: @@ -771,8 +775,8 @@ func (s *SVCBIPv6Hint) copy() SVCBKeyValue { // SVCBDoHPath pair is used to indicate the URI template that the // clients may use to construct a DNS over HTTPS URI. // -// See RFC xxxx (https://datatracker.ietf.org/doc/html/draft-ietf-add-svcb-dns-02) -// and RFC yyyy (https://datatracker.ietf.org/doc/html/draft-ietf-add-ddr-06). +// See RFC 9461 (https://datatracker.ietf.org/doc/html/rfc9461) +// and RFC 9462 (https://datatracker.ietf.org/doc/html/rfc9462). // // A basic example of using the dohpath option together with the alpn // option to indicate support for DNS over HTTPS on a certain path: @@ -816,6 +820,44 @@ func (s *SVCBDoHPath) copy() SVCBKeyValue { } } +// The "ohttp" SvcParamKey is used to indicate that a service described in a SVCB RR +// can be accessed as a target using an associated gateway. +// Both the presentation and wire-format values for the "ohttp" parameter MUST be empty. +// +// See RFC 9460 (https://datatracker.ietf.org/doc/html/rfc9460/) +// and RFC 9230 (https://datatracker.ietf.org/doc/html/rfc9230/) +// +// A basic example of using the dohpath option together with the alpn +// option to indicate support for DNS over HTTPS on a certain path: +// +// s := new(dns.SVCB) +// s.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeSVCB, Class: dns.ClassINET} +// e := new(dns.SVCBAlpn) +// e.Alpn = []string{"h2", "h3"} +// p := new(dns.SVCBOhttp) +// s.Value = append(s.Value, e, p) +type SVCBOhttp struct{} + +func (*SVCBOhttp) Key() SVCBKey { return SVCB_OHTTP } +func (*SVCBOhttp) copy() SVCBKeyValue { return &SVCBOhttp{} } +func (*SVCBOhttp) pack() ([]byte, error) { return []byte{}, nil } +func (*SVCBOhttp) String() string { return "" } +func (*SVCBOhttp) len() int { return 0 } + +func (*SVCBOhttp) unpack(b []byte) error { + if len(b) != 0 { + return errors.New("dns: svcbotthp: svcbotthp must have no value") + } + return nil +} + +func (*SVCBOhttp) parse(b string) error { + if b != "" { + return errors.New("dns: svcbotthp: svcbotthp must have no value") + } + return nil +} + // SVCBLocal pair is intended for experimental/private use. The key is recommended // to be in the range [SVCB_PRIVATE_LOWER, SVCB_PRIVATE_UPPER]. // Basic use pattern for creating a keyNNNNN option: diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 8e3129cbd25..7a34c14ca0a 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -96,6 +96,7 @@ const ( TypeLP uint16 = 107 TypeEUI48 uint16 = 108 TypeEUI64 uint16 = 109 + TypeNXNAME uint16 = 128 TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 @@ -294,6 +295,19 @@ func (*NULL) parse(c *zlexer, origin string) *ParseError { return &ParseError{err: "NULL records do not have a presentation format"} } +// NXNAME is a meta record. See https://www.iana.org/go/draft-ietf-dnsop-compact-denial-of-existence-04 +// Reference: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml +type NXNAME struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *NXNAME) String() string { return rr.Hdr.String() } + +func (*NXNAME) parse(c *zlexer, origin string) *ParseError { + return &ParseError{err: "NXNAME records do not have a presentation format"} +} + // CNAME RR. See RFC 1034. type CNAME struct { Hdr RR_Header diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index dc34e5902be..00c8629f278 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 58} +var Version = v{1, 1, 62} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 05b3c5addeb..5cfbb516af5 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -1,6 +1,7 @@ package dns import ( + "crypto/tls" "fmt" "time" ) @@ -20,6 +21,7 @@ type Transfer struct { TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations. TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) tsigTimersOnly bool + TLS *tls.Config // TLS config. If Xfr over TLS will be attempted } func (t *Transfer) tsigProvider() TsigProvider { @@ -57,7 +59,11 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { } if t.Conn == nil { - t.Conn, err = DialTimeout("tcp", a, timeout) + if t.TLS != nil { + t.Conn, err = DialTimeoutWithTLS("tcp-tls", a, t.TLS, timeout) + } else { + t.Conn, err = DialTimeout("tcp", a, timeout) + } if err != nil { return nil, err } @@ -182,7 +188,7 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { if v, ok := rr.(*SOA); ok { if v.Serial == serial { n++ - // quit if it's a full axfr or the the servers' SOA is repeated the third time + // quit if it's a full axfr or the servers' SOA is repeated the third time if axfr && n == 2 || n == 3 { c <- &Envelope{in.Answer, nil} return @@ -203,6 +209,7 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { // ch := make(chan *dns.Envelope) // tr := new(dns.Transfer) // var wg sync.WaitGroup +// wg.Add(1) // go func() { // tr.Out(w, r, ch) // wg.Done() diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 03029fb3ebb..330c05395f3 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -886,6 +886,15 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool { return true } +func (r1 *NXNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NXNAME) + if !ok { + return false + } + _ = r2 + return true +} + func (r1 *NXT) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*NXT) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 39b3bc8102e..5a6cf4c6ad5 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -706,6 +706,10 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b return off, nil } +func (rr *NXNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + return off, nil +} + func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDomainName(rr.NextDomain, msg, off, compression, false) if err != nil { @@ -2266,6 +2270,13 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *NXNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + return off, nil +} + func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 2c70fc44d6f..11f13ecf9c2 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -60,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNULL: func() RR { return new(NULL) }, + TypeNXNAME: func() RR { return new(NXNAME) }, TypeNXT: func() RR { return new(NXT) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, @@ -146,6 +147,7 @@ var TypeToString = map[uint16]string{ TypeNSEC3: "NSEC3", TypeNSEC3PARAM: "NSEC3PARAM", TypeNULL: "NULL", + TypeNXNAME: "NXNAME", TypeNXT: "NXT", TypeNone: "None", TypeOPENPGPKEY: "OPENPGPKEY", @@ -230,6 +232,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *NXNAME) Header() *RR_Header { return &rr.Hdr } func (rr *NXT) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } @@ -594,6 +597,11 @@ func (rr *NULL) len(off int, compression map[string]struct{}) int { return l } +func (rr *NXNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + return l +} + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) @@ -1107,6 +1115,10 @@ func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } +func (rr *NXNAME) copy() RR { + return &NXNAME{rr.Hdr} +} + func (rr *NXT) copy() RR { return &NXT{*rr.NSEC.copy().(*NSEC)} } diff --git a/vendor/github.com/multiformats/go-multiaddr/protocols.go b/vendor/github.com/multiformats/go-multiaddr/protocols.go index b01e6cb8b04..d3117b24efb 100644 --- a/vendor/github.com/multiformats/go-multiaddr/protocols.go +++ b/vendor/github.com/multiformats/go-multiaddr/protocols.go @@ -26,6 +26,7 @@ const ( P_P2P = 421 P_IPFS = P_P2P // alias for backwards compatibility P_HTTP = 480 + P_HTTP_PATH = 481 P_HTTPS = 443 // deprecated alias for /tls/http P_ONION = 444 // also for backwards compatibility P_ONION3 = 445 @@ -206,6 +207,13 @@ var ( Code: P_HTTP, VCode: CodeToVarint(P_HTTP), } + protoHTTPPath = Protocol{ + Name: "http-path", + Code: P_HTTP_PATH, + VCode: CodeToVarint(P_HTTP_PATH), + Size: LengthPrefixedVarSize, + Transcoder: TranscoderHTTPPath, + } protoHTTPS = Protocol{ Name: "https", Code: P_HTTPS, @@ -301,6 +309,7 @@ func init() { protoWEBTRANSPORT, protoCERTHASH, protoHTTP, + protoHTTPPath, protoHTTPS, protoP2P, protoUNIX, diff --git a/vendor/github.com/multiformats/go-multiaddr/transcoders.go b/vendor/github.com/multiformats/go-multiaddr/transcoders.go index 538774016cc..0d0327e58fe 100644 --- a/vendor/github.com/multiformats/go-multiaddr/transcoders.go +++ b/vendor/github.com/multiformats/go-multiaddr/transcoders.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "net" + "net/url" "strconv" "strings" @@ -454,3 +455,30 @@ func validateCertHash(b []byte) error { _, err := mh.Decode(b) return err } + +var TranscoderHTTPPath = NewTranscoderFromFunctions(httpPathStB, httpPathBtS, validateHTTPPath) + +func httpPathStB(s string) ([]byte, error) { + unescaped, err := url.QueryUnescape(s) + if err != nil { + return nil, err + } + if len(unescaped) == 0 { + return nil, fmt.Errorf("empty http path is not allowed") + } + return []byte(unescaped), err +} + +func httpPathBtS(b []byte) (string, error) { + if len(b) == 0 { + return "", fmt.Errorf("empty http path is not allowed") + } + return url.QueryEscape(string(b)), nil +} + +func validateHTTPPath(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("empty http path is not allowed") + } + return nil // We can represent any byte slice when we escape it. +} diff --git a/vendor/github.com/multiformats/go-multiaddr/version.json b/vendor/github.com/multiformats/go-multiaddr/version.json index 03cac9ee937..d3f796811f0 100644 --- a/vendor/github.com/multiformats/go-multiaddr/version.json +++ b/vendor/github.com/multiformats/go-multiaddr/version.json @@ -1,3 +1,3 @@ { - "version": "v0.12.4" + "version": "v0.13.0" } diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE new file mode 100644 index 00000000000..bbc7b897cbc --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 00000000000..e33ee173036 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt similarity index 100% rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt rename to vendor/github.com/munnerz/goautoneg/README.txt diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go similarity index 52% rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go rename to vendor/github.com/munnerz/goautoneg/autoneg.go index a21b9d15dd8..1dd1cad6466 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -1,28 +1,28 @@ /* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,6 +36,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + package goautoneg import ( @@ -51,16 +52,14 @@ type Accept struct { Params map[string]string } -// For internal use, so that we can use the sort interface -type accept_slice []Accept +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept -func (accept accept_slice) Len() int { - slice := []Accept(accept) +func (slice acceptSlice) Len() int { return len(slice) } -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) +func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true @@ -74,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool { return false } -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) +func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + // Parse an Accept Header string returning a sorted list // of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) - mrp := strings.Split(part, ";") + a := Accept{ + Q: 1.0, + } + + sp, remainingPart := nextSplitElement(part, ";") - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } default: - continue + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } - if len(mrp) == 1 { + if len(remainingPart) == 0 { accept = append(accept, a) continue } - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { continue } - token := strings.Trim(sp[0], " ") + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) + a.Q, _ = strconv.ParseFloat(sp1, 32) } else { - a.Params[token] = strings.Trim(sp[1], " ") + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } - slice := accept_slice(accept) - sort.Sort(slice) - - return + sort.Sort(accept) + return accept } // Negotiate the most appropriate content_type given the accept header diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7a18..b2dc59be66f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec979d..cf3b7cb6d6d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 86da7340d17..48827cc5efc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,6 +25,18 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } + if len(goFlagsConfig.O) > 0 { + userDefinedPath, err := filepath.Abs(goFlagsConfig.O) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path %s:\n%s", goFlagsConfig.O, err.Error()) + return suite + } + path = userDefinedPath + } + + goFlagsConfig.O = path + ginkgoInvocationPath, _ := os.Getwd() ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) packagePath := suite.AbsPath() @@ -34,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 00000000000..3c5079ff4c7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 26de28b5707..8e16d2bb034 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -// loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } @@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bdc3c..0e6ae1f2903 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be75879..480730486af 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } @@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 43244a9bd51..562e0f62ba2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -177,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -324,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a75f..97a049e0c16 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -89,6 +90,9 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -198,6 +202,7 @@ type GoFlagsConfig struct { A bool ASMFlags string BuildMode string + BuildVCS bool Compiler string GCCGoFlags string GCFlags string @@ -215,6 +220,7 @@ type GoFlagsConfig struct { ToolExec string Work bool X bool + O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { @@ -264,7 +270,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -274,6 +280,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -331,6 +339,12 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -499,7 +513,7 @@ var GinkgoCLIWatchFlags = GinkgoFlags{ // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", - Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."}, + Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", @@ -515,6 +529,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, + {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", + Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", @@ -549,6 +565,8 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, + {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", + Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI @@ -602,7 +620,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { @@ -625,7 +643,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") } - args := []string{"test", "-c", "-o", destination, packageToBuild} + args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, map[string]interface{}{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873d0..de69f3022de 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651e7e..7fdc8aa23f3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index ed934647454..c6af20b689a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.15.0" +const VERSION = "2.20.0" diff --git a/vendor/github.com/pion/datachannel/.golangci.yml b/vendor/github.com/pion/datachannel/.golangci.yml index 6dd80c805ca..e06de4d3c03 100644 --- a/vendor/github.com/pion/datachannel/.golangci.yml +++ b/vendor/github.com/pion/datachannel/.golangci.yml @@ -3,7 +3,8 @@ linters-settings: govet: - check-shadowing: true + enable: + - shadow misspell: locale: US exhaustive: @@ -110,6 +111,7 @@ linters: issues: exclude-use-default: false + exclude-dirs-use-default: false exclude-rules: # Allow complex tests and examples, better to be self contained - path: (examples|main\.go|_test\.go) @@ -121,6 +123,3 @@ issues: - path: cmd linters: - forbidigo - -run: - skip-dirs-use-default: false diff --git a/vendor/github.com/pion/datachannel/message.go b/vendor/github.com/pion/datachannel/message.go index 53f221b143d..f71ce7bc5dd 100644 --- a/vendor/github.com/pion/datachannel/message.go +++ b/vendor/github.com/pion/datachannel/message.go @@ -75,3 +75,18 @@ func parseExpectDataChannelOpen(raw []byte) (*channelOpen, error) { return msg, nil } + +// TryMarshalUnmarshal attempts to marshal and unmarshal a message. Added for fuzzing. +func TryMarshalUnmarshal(msg []byte) int { + message, err := parse(msg) + if err != nil { + return 0 + } + + _, err = message.Marshal() + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pion/datachannel/message_channel_open.go b/vendor/github.com/pion/datachannel/message_channel_open.go index e7052f4a739..dac58ff6558 100644 --- a/vendor/github.com/pion/datachannel/message_channel_open.go +++ b/vendor/github.com/pion/datachannel/message_channel_open.go @@ -133,7 +133,7 @@ func (c *channelOpen) Unmarshal(raw []byte) error { labelLength := binary.BigEndian.Uint16(raw[8:]) protocolLength := binary.BigEndian.Uint16(raw[10:]) - if expectedLen := int(channelOpenHeaderLength + labelLength + protocolLength); len(raw) != expectedLen { + if expectedLen := channelOpenHeaderLength + int(labelLength) + int(protocolLength); len(raw) != expectedLen { return fmt.Errorf("%w expected(%d) actual(%d)", ErrExpectedAndActualLengthMismatch, expectedLen, len(raw)) } diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go index 2966966dd62..75a8c2ee31e 100644 --- a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go @@ -10,6 +10,10 @@ type SRTPProtectionProfile uint16 const ( SRTP_AES128_CM_HMAC_SHA1_80 SRTPProtectionProfile = 0x0001 // nolint SRTP_AES128_CM_HMAC_SHA1_32 SRTPProtectionProfile = 0x0002 // nolint + SRTP_AES256_CM_SHA1_80 SRTPProtectionProfile = 0x0003 // nolint + SRTP_AES256_CM_SHA1_32 SRTPProtectionProfile = 0x0004 // nolint + SRTP_NULL_HMAC_SHA1_80 SRTPProtectionProfile = 0x0005 // nolint + SRTP_NULL_HMAC_SHA1_32 SRTPProtectionProfile = 0x0006 // nolint SRTP_AEAD_AES_128_GCM SRTPProtectionProfile = 0x0007 // nolint SRTP_AEAD_AES_256_GCM SRTPProtectionProfile = 0x0008 // nolint ) @@ -18,6 +22,10 @@ func srtpProtectionProfiles() map[SRTPProtectionProfile]bool { return map[SRTPProtectionProfile]bool{ SRTP_AES128_CM_HMAC_SHA1_80: true, SRTP_AES128_CM_HMAC_SHA1_32: true, + SRTP_AES256_CM_SHA1_80: true, + SRTP_AES256_CM_SHA1_32: true, + SRTP_NULL_HMAC_SHA1_80: true, + SRTP_NULL_HMAC_SHA1_32: true, SRTP_AEAD_AES_128_GCM: true, SRTP_AEAD_AES_256_GCM: true, } diff --git a/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go b/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go index e306e9e6aa3..8b7e471f3d1 100644 --- a/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go +++ b/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go @@ -12,6 +12,10 @@ type SRTPProtectionProfile = extension.SRTPProtectionProfile const ( SRTP_AES128_CM_HMAC_SHA1_80 SRTPProtectionProfile = extension.SRTP_AES128_CM_HMAC_SHA1_80 // nolint:revive,stylecheck SRTP_AES128_CM_HMAC_SHA1_32 SRTPProtectionProfile = extension.SRTP_AES128_CM_HMAC_SHA1_32 // nolint:revive,stylecheck + SRTP_AES256_CM_SHA1_80 SRTPProtectionProfile = extension.SRTP_AES256_CM_SHA1_80 // nolint:revive,stylecheck + SRTP_AES256_CM_SHA1_32 SRTPProtectionProfile = extension.SRTP_AES256_CM_SHA1_32 // nolint:revive,stylecheck + SRTP_NULL_HMAC_SHA1_80 SRTPProtectionProfile = extension.SRTP_NULL_HMAC_SHA1_80 // nolint:revive,stylecheck + SRTP_NULL_HMAC_SHA1_32 SRTPProtectionProfile = extension.SRTP_NULL_HMAC_SHA1_32 // nolint:revive,stylecheck SRTP_AEAD_AES_128_GCM SRTPProtectionProfile = extension.SRTP_AEAD_AES_128_GCM // nolint:revive,stylecheck SRTP_AEAD_AES_256_GCM SRTPProtectionProfile = extension.SRTP_AEAD_AES_256_GCM // nolint:revive,stylecheck ) diff --git a/vendor/github.com/pion/ice/v2/agent.go b/vendor/github.com/pion/ice/v2/agent.go index 344200c21eb..82ee75bc388 100644 --- a/vendor/github.com/pion/ice/v2/agent.go +++ b/vendor/github.com/pion/ice/v2/agent.go @@ -7,6 +7,7 @@ package ice import ( "context" + "errors" "fmt" "math" "net" @@ -323,9 +324,9 @@ func NewAgent(config *AgentConfig) (*Agent, error) { //nolint:gocognit userBindingRequestHandler: config.BindingRequestHandler, } - a.connectionStateNotifier = &handlerNotifier{connectionStateFunc: a.onConnectionStateChange} - a.candidateNotifier = &handlerNotifier{candidateFunc: a.onCandidate} - a.selectedCandidatePairNotifier = &handlerNotifier{candidatePairFunc: a.onSelectedCandidatePairChange} + a.connectionStateNotifier = &handlerNotifier{connectionStateFunc: a.onConnectionStateChange, done: make(chan struct{})} + a.candidateNotifier = &handlerNotifier{candidateFunc: a.onCandidate, done: make(chan struct{})} + a.selectedCandidatePairNotifier = &handlerNotifier{candidatePairFunc: a.onSelectedCandidatePairChange, done: make(chan struct{})} if a.net == nil { a.net, err = stdnet.NewNet() @@ -915,7 +916,21 @@ func (a *Agent) removeUfragFromMux() { // Close cleans up the Agent func (a *Agent) Close() error { + return a.close(false) +} + +// GracefulClose cleans up the Agent and waits for any goroutines it started +// to complete. This is only safe to call outside of Agent callbacks or if in a callback, +// in its own goroutine. +func (a *Agent) GracefulClose() error { + return a.close(true) +} + +func (a *Agent) close(graceful bool) error { if err := a.ok(); err != nil { + if errors.Is(err, ErrClosed) { + return nil + } return err } @@ -930,7 +945,14 @@ func (a *Agent) Close() error { a.removeUfragFromMux() close(a.done) + // the loop is safe to wait on no matter what <-a.taskLoopDone + + // but we are in less control of the notifiers, so we will + // pass through `graceful`. + a.connectionStateNotifier.Close(graceful) + a.candidateNotifier.Close(graceful) + a.selectedCandidatePairNotifier.Close(graceful) return nil } diff --git a/vendor/github.com/pion/ice/v2/agent_handlers.go b/vendor/github.com/pion/ice/v2/agent_handlers.go index bb0c8d303a7..0c02277bdd8 100644 --- a/vendor/github.com/pion/ice/v2/agent_handlers.go +++ b/vendor/github.com/pion/ice/v2/agent_handlers.go @@ -45,7 +45,8 @@ func (a *Agent) onConnectionStateChange(s ConnectionState) { type handlerNotifier struct { sync.Mutex - running bool + running bool + notifiers sync.WaitGroup connectionStates []ConnectionState connectionStateFunc func(ConnectionState) @@ -55,13 +56,42 @@ type handlerNotifier struct { selectedCandidatePairs []*CandidatePair candidatePairFunc func(*CandidatePair) + + // State for closing + done chan struct{} +} + +func (h *handlerNotifier) Close(graceful bool) { + if graceful { + // if we were closed ungracefully before, we now + // want ot wait. + defer h.notifiers.Wait() + } + + h.Lock() + + select { + case <-h.done: + h.Unlock() + return + default: + } + close(h.done) + h.Unlock() } func (h *handlerNotifier) EnqueueConnectionState(s ConnectionState) { h.Lock() defer h.Unlock() + select { + case <-h.done: + return + default: + } + notify := func() { + defer h.notifiers.Done() for { h.Lock() if len(h.connectionStates) == 0 { @@ -79,6 +109,7 @@ func (h *handlerNotifier) EnqueueConnectionState(s ConnectionState) { h.connectionStates = append(h.connectionStates, s) if !h.running { h.running = true + h.notifiers.Add(1) go notify() } } @@ -87,7 +118,14 @@ func (h *handlerNotifier) EnqueueCandidate(c Candidate) { h.Lock() defer h.Unlock() + select { + case <-h.done: + return + default: + } + notify := func() { + defer h.notifiers.Done() for { h.Lock() if len(h.candidates) == 0 { @@ -105,6 +143,7 @@ func (h *handlerNotifier) EnqueueCandidate(c Candidate) { h.candidates = append(h.candidates, c) if !h.running { h.running = true + h.notifiers.Add(1) go notify() } } @@ -113,7 +152,14 @@ func (h *handlerNotifier) EnqueueSelectedCandidatePair(p *CandidatePair) { h.Lock() defer h.Unlock() + select { + case <-h.done: + return + default: + } + notify := func() { + defer h.notifiers.Done() for { h.Lock() if len(h.selectedCandidatePairs) == 0 { @@ -131,6 +177,7 @@ func (h *handlerNotifier) EnqueueSelectedCandidatePair(p *CandidatePair) { h.selectedCandidatePairs = append(h.selectedCandidatePairs, p) if !h.running { h.running = true + h.notifiers.Add(1) go notify() } } diff --git a/vendor/github.com/pion/ice/v2/candidate_base.go b/vendor/github.com/pion/ice/v2/candidate_base.go index 63199406259..f725985218a 100644 --- a/vendor/github.com/pion/ice/v2/candidate_base.go +++ b/vendor/github.com/pion/ice/v2/candidate_base.go @@ -447,6 +447,13 @@ func (c *candidateBase) copy() (Candidate, error) { return UnmarshalCandidate(c.Marshal()) } +func removeZoneIDFromAddress(addr string) string { + if i := strings.Index(addr, "%"); i != -1 { + return addr[:i] + } + return addr +} + // Marshal returns the string representation of the ICECandidate func (c *candidateBase) Marshal() string { val := c.Foundation() @@ -459,7 +466,7 @@ func (c *candidateBase) Marshal() string { c.Component(), c.NetworkType().NetworkShort(), c.Priority(), - c.Address(), + removeZoneIDFromAddress(c.Address()), c.Port(), c.Type()) @@ -509,7 +516,7 @@ func UnmarshalCandidate(raw string) (Candidate, error) { priority := uint32(priorityRaw) // Address - address := split[4] + address := removeZoneIDFromAddress(split[4]) // Port rawPort, err := strconv.ParseUint(split[5], 10, 16) diff --git a/vendor/github.com/pion/interceptor/attributes.go b/vendor/github.com/pion/interceptor/attributes.go index d7936d52643..8b6d0f5cf6b 100644 --- a/vendor/github.com/pion/interceptor/attributes.go +++ b/vendor/github.com/pion/interceptor/attributes.go @@ -33,7 +33,7 @@ func (a Attributes) Set(key interface{}, val interface{}) { } // GetRTPHeader gets the RTP header if present. If it is not present, it will be -// unmarshalled from the raw byte slice and stored in the attribtues. +// unmarshalled from the raw byte slice and stored in the attributes. func (a Attributes) GetRTPHeader(raw []byte) (*rtp.Header, error) { if val, ok := a[rtpHeaderKey]; ok { if header, ok := val.(*rtp.Header); ok { @@ -50,7 +50,7 @@ func (a Attributes) GetRTPHeader(raw []byte) (*rtp.Header, error) { } // GetRTCPPackets gets the RTCP packets if present. If the packet slice is not -// present, it will be unmarshaled from the raw byte slice and stored in the +// present, it will be unmarshalled from the raw byte slice and stored in the // attributes. func (a Attributes) GetRTCPPackets(raw []byte) ([]rtcp.Packet, error) { if val, ok := a[rtcpPacketsKey]; ok { diff --git a/vendor/github.com/pion/rtp/codecs/h264_packet.go b/vendor/github.com/pion/rtp/codecs/h264_packet.go index 3021fc0fcb2..973a831524d 100644 --- a/vendor/github.com/pion/rtp/codecs/h264_packet.go +++ b/vendor/github.com/pion/rtp/codecs/h264_packet.go @@ -231,7 +231,11 @@ func (p *H264Packet) parseBody(payload []byte) ([]byte, error) { currOffset := int(stapaHeaderSize) result := []byte{} for currOffset < len(payload) { - naluSize := int(binary.BigEndian.Uint16(payload[currOffset:])) + naluSizeBytes := payload[currOffset:] + if len(naluSizeBytes) < stapaNALULengthSize { + break + } + naluSize := int(binary.BigEndian.Uint16(naluSizeBytes)) currOffset += stapaNALULengthSize if len(payload) < currOffset+naluSize { diff --git a/vendor/github.com/pion/rtp/codecs/vp9/bits.go b/vendor/github.com/pion/rtp/codecs/vp9/bits.go new file mode 100644 index 00000000000..a6a3c1f4c8a --- /dev/null +++ b/vendor/github.com/pion/rtp/codecs/vp9/bits.go @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package vp9 + +import "errors" + +var errNotEnoughBits = errors.New("not enough bits") + +func hasSpace(buf []byte, pos int, n int) error { + if n > ((len(buf) * 8) - pos) { + return errNotEnoughBits + } + return nil +} + +func readFlag(buf []byte, pos *int) (bool, error) { + err := hasSpace(buf, *pos, 1) + if err != nil { + return false, err + } + + return readFlagUnsafe(buf, pos), nil +} + +func readFlagUnsafe(buf []byte, pos *int) bool { + b := (buf[*pos>>0x03] >> (7 - (*pos & 0x07))) & 0x01 + *pos++ + return b == 1 +} + +func readBits(buf []byte, pos *int, n int) (uint64, error) { + err := hasSpace(buf, *pos, n) + if err != nil { + return 0, err + } + + return readBitsUnsafe(buf, pos, n), nil +} + +func readBitsUnsafe(buf []byte, pos *int, n int) uint64 { + res := 8 - (*pos & 0x07) + if n < res { + v := uint64((buf[*pos>>0x03] >> (res - n)) & (1<>0x03] & (1<= 8 { + v = (v << 8) | uint64(buf[*pos>>0x03]) + *pos += 8 + n -= 8 + } + + if n > 0 { + v = (v << n) | uint64(buf[*pos>>0x03]>>(8-n)) + *pos += n + } + + return v +} diff --git a/vendor/github.com/pion/rtp/codecs/vp9/header.go b/vendor/github.com/pion/rtp/codecs/vp9/header.go new file mode 100644 index 00000000000..30e15bcf333 --- /dev/null +++ b/vendor/github.com/pion/rtp/codecs/vp9/header.go @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +// Package vp9 contains a VP9 header parser. +package vp9 + +import ( + "errors" +) + +var ( + errInvalidFrameMarker = errors.New("invalid frame marker") + errWrongFrameSyncByte0 = errors.New("wrong frame_sync_byte_0") + errWrongFrameSyncByte1 = errors.New("wrong frame_sync_byte_1") + errWrongFrameSyncByte2 = errors.New("wrong frame_sync_byte_2") +) + +// HeaderColorConfig is the color_config member of an header. +type HeaderColorConfig struct { + TenOrTwelveBit bool + BitDepth uint8 + ColorSpace uint8 + ColorRange bool + SubsamplingX bool + SubsamplingY bool +} + +func (c *HeaderColorConfig) unmarshal(profile uint8, buf []byte, pos *int) error { + if profile >= 2 { + var err error + c.TenOrTwelveBit, err = readFlag(buf, pos) + if err != nil { + return err + } + + if c.TenOrTwelveBit { + c.BitDepth = 12 + } else { + c.BitDepth = 10 + } + } else { + c.BitDepth = 8 + } + + tmp, err := readBits(buf, pos, 3) + if err != nil { + return err + } + c.ColorSpace = uint8(tmp) + + if c.ColorSpace != 7 { + var err error + c.ColorRange, err = readFlag(buf, pos) + if err != nil { + return err + } + + if profile == 1 || profile == 3 { + err := hasSpace(buf, *pos, 3) + if err != nil { + return err + } + + c.SubsamplingX = readFlagUnsafe(buf, pos) + c.SubsamplingY = readFlagUnsafe(buf, pos) + *pos++ + } else { + c.SubsamplingX = true + c.SubsamplingY = true + } + } else { + c.ColorRange = true + + if profile == 1 || profile == 3 { + c.SubsamplingX = false + c.SubsamplingY = false + + err := hasSpace(buf, *pos, 1) + if err != nil { + return err + } + *pos++ + } + } + + return nil +} + +// HeaderFrameSize is the frame_size member of an header. +type HeaderFrameSize struct { + FrameWidthMinus1 uint16 + FrameHeightMinus1 uint16 +} + +func (s *HeaderFrameSize) unmarshal(buf []byte, pos *int) error { + err := hasSpace(buf, *pos, 32) + if err != nil { + return err + } + + s.FrameWidthMinus1 = uint16(readBitsUnsafe(buf, pos, 16)) + s.FrameHeightMinus1 = uint16(readBitsUnsafe(buf, pos, 16)) + return nil +} + +// Header is a VP9 Frame header. +// Specification: +// https://storage.googleapis.com/downloads.webmproject.org/docs/vp9/vp9-bitstream-specification-v0.6-20160331-draft.pdf +type Header struct { + Profile uint8 + ShowExistingFrame bool + FrameToShowMapIdx uint8 + NonKeyFrame bool + ShowFrame bool + ErrorResilientMode bool + ColorConfig *HeaderColorConfig + FrameSize *HeaderFrameSize +} + +// Unmarshal decodes a Header. +func (h *Header) Unmarshal(buf []byte) error { + pos := 0 + + err := hasSpace(buf, pos, 4) + if err != nil { + return err + } + + frameMarker := readBitsUnsafe(buf, &pos, 2) + if frameMarker != 2 { + return errInvalidFrameMarker + } + + profileLowBit := uint8(readBitsUnsafe(buf, &pos, 1)) + profileHighBit := uint8(readBitsUnsafe(buf, &pos, 1)) + h.Profile = profileHighBit<<1 + profileLowBit + + if h.Profile == 3 { + err = hasSpace(buf, pos, 1) + if err != nil { + return err + } + pos++ + } + + h.ShowExistingFrame, err = readFlag(buf, &pos) + if err != nil { + return err + } + + if h.ShowExistingFrame { + var tmp uint64 + tmp, err = readBits(buf, &pos, 3) + if err != nil { + return err + } + h.FrameToShowMapIdx = uint8(tmp) + return nil + } + + err = hasSpace(buf, pos, 3) + if err != nil { + return err + } + + h.NonKeyFrame = readFlagUnsafe(buf, &pos) + h.ShowFrame = readFlagUnsafe(buf, &pos) + h.ErrorResilientMode = readFlagUnsafe(buf, &pos) + + if !h.NonKeyFrame { + err := hasSpace(buf, pos, 24) + if err != nil { + return err + } + + frameSyncByte0 := uint8(readBitsUnsafe(buf, &pos, 8)) + if frameSyncByte0 != 0x49 { + return errWrongFrameSyncByte0 + } + + frameSyncByte1 := uint8(readBitsUnsafe(buf, &pos, 8)) + if frameSyncByte1 != 0x83 { + return errWrongFrameSyncByte1 + } + + frameSyncByte2 := uint8(readBitsUnsafe(buf, &pos, 8)) + if frameSyncByte2 != 0x42 { + return errWrongFrameSyncByte2 + } + + h.ColorConfig = &HeaderColorConfig{} + err = h.ColorConfig.unmarshal(h.Profile, buf, &pos) + if err != nil { + return err + } + + h.FrameSize = &HeaderFrameSize{} + err = h.FrameSize.unmarshal(buf, &pos) + if err != nil { + return err + } + } + + return nil +} + +// Width returns the video width. +func (h Header) Width() uint16 { + if h.FrameSize == nil { + return 0 + } + return h.FrameSize.FrameWidthMinus1 + 1 +} + +// Height returns the video height. +func (h Header) Height() uint16 { + if h.FrameSize == nil { + return 0 + } + return h.FrameSize.FrameHeightMinus1 + 1 +} diff --git a/vendor/github.com/pion/rtp/codecs/vp9_packet.go b/vendor/github.com/pion/rtp/codecs/vp9_packet.go index 6a6e0b0de50..98920be028c 100644 --- a/vendor/github.com/pion/rtp/codecs/vp9_packet.go +++ b/vendor/github.com/pion/rtp/codecs/vp9_packet.go @@ -5,6 +5,7 @@ package codecs import ( "github.com/pion/randutil" + "github.com/pion/rtp/codecs/vp9" ) // Use global random generator to properly seed by crypto grade random. @@ -12,24 +13,50 @@ var globalMathRandomGenerator = randutil.NewMathRandomGenerator() // nolint:goch // VP9Payloader payloads VP9 packets type VP9Payloader struct { - pictureID uint16 - initialized bool + // whether to use flexible mode or non-flexible mode. + FlexibleMode bool // InitialPictureIDFn is a function that returns random initial picture ID. InitialPictureIDFn func() uint16 + + pictureID uint16 + initialized bool } const ( - vp9HeaderSize = 3 // Flexible mode 15 bit picture ID maxSpatialLayers = 5 maxVP9RefPics = 3 ) // Payload fragments an VP9 packet across one or more byte arrays func (p *VP9Payloader) Payload(mtu uint16, payload []byte) [][]byte { + if !p.initialized { + if p.InitialPictureIDFn == nil { + p.InitialPictureIDFn = func() uint16 { + return uint16(globalMathRandomGenerator.Intn(0x7FFF)) + } + } + p.pictureID = p.InitialPictureIDFn() & 0x7FFF + p.initialized = true + } + + var payloads [][]byte + if p.FlexibleMode { + payloads = p.payloadFlexible(mtu, payload) + } else { + payloads = p.payloadNonFlexible(mtu, payload) + } + + p.pictureID++ + if p.pictureID >= 0x8000 { + p.pictureID = 0 + } + + return payloads +} + +func (p *VP9Payloader) payloadFlexible(mtu uint16, payload []byte) [][]byte { /* - * https://www.ietf.org/id/draft-ietf-payload-vp9-13.txt - * * Flexible mode (F=1) * 0 1 2 3 4 5 6 7 * +-+-+-+-+-+-+-+-+ @@ -46,7 +73,45 @@ func (p *VP9Payloader) Payload(mtu uint16, payload []byte) [][]byte { * V: | SS | * | .. | * +-+-+-+-+-+-+-+-+ - * + */ + + headerSize := 3 + maxFragmentSize := int(mtu) - headerSize + payloadDataRemaining := len(payload) + payloadDataIndex := 0 + var payloads [][]byte + + if min(maxFragmentSize, payloadDataRemaining) <= 0 { + return [][]byte{} + } + + for payloadDataRemaining > 0 { + currentFragmentSize := min(maxFragmentSize, payloadDataRemaining) + out := make([]byte, headerSize+currentFragmentSize) + + out[0] = 0x90 // F=1, I=1 + if payloadDataIndex == 0 { + out[0] |= 0x08 // B=1 + } + if payloadDataRemaining == currentFragmentSize { + out[0] |= 0x04 // E=1 + } + + out[1] = byte(p.pictureID>>8) | 0x80 + out[2] = byte(p.pictureID) + + copy(out[headerSize:], payload[payloadDataIndex:payloadDataIndex+currentFragmentSize]) + payloads = append(payloads, out) + + payloadDataRemaining -= currentFragmentSize + payloadDataIndex += currentFragmentSize + } + + return payloads +} + +func (p *VP9Payloader) payloadNonFlexible(mtu uint16, payload []byte) [][]byte { + /* * Non-flexible mode (F=0) * 0 1 2 3 4 5 6 7 * +-+-+-+-+-+-+-+-+ @@ -65,51 +130,80 @@ func (p *VP9Payloader) Payload(mtu uint16, payload []byte) [][]byte { * +-+-+-+-+-+-+-+-+ */ - if !p.initialized { - if p.InitialPictureIDFn == nil { - p.InitialPictureIDFn = func() uint16 { - return uint16(globalMathRandomGenerator.Intn(0x7FFF)) - } - } - p.pictureID = p.InitialPictureIDFn() & 0x7FFF - p.initialized = true - } - if payload == nil { + var h vp9.Header + err := h.Unmarshal(payload) + if err != nil { return [][]byte{} } - maxFragmentSize := int(mtu) - vp9HeaderSize payloadDataRemaining := len(payload) payloadDataIndex := 0 - - if min(maxFragmentSize, payloadDataRemaining) <= 0 { - return [][]byte{} - } - var payloads [][]byte + for payloadDataRemaining > 0 { + var headerSize int + if !h.NonKeyFrame && payloadDataIndex == 0 { + headerSize = 3 + 8 + } else { + headerSize = 3 + } + + maxFragmentSize := int(mtu) - headerSize currentFragmentSize := min(maxFragmentSize, payloadDataRemaining) - out := make([]byte, vp9HeaderSize+currentFragmentSize) + if currentFragmentSize <= 0 { + return [][]byte{} + } + + out := make([]byte, headerSize+currentFragmentSize) + + out[0] = 0x80 | 0x01 // I=1, Z=1 - out[0] = 0x90 // F=1 I=1 + if h.NonKeyFrame { + out[0] |= 0x40 // P=1 + } if payloadDataIndex == 0 { out[0] |= 0x08 // B=1 } if payloadDataRemaining == currentFragmentSize { out[0] |= 0x04 // E=1 } + out[1] = byte(p.pictureID>>8) | 0x80 out[2] = byte(p.pictureID) - copy(out[vp9HeaderSize:], payload[payloadDataIndex:payloadDataIndex+currentFragmentSize]) + off := 3 + + if !h.NonKeyFrame && payloadDataIndex == 0 { + out[0] |= 0x02 // V=1 + out[off] = 0x10 | 0x08 // N_S=0, Y=1, G=1 + off++ + + width := h.Width() + out[off] = byte(width >> 8) + off++ + out[off] = byte(width & 0xFF) + off++ + + height := h.Height() + out[off] = byte(height >> 8) + off++ + out[off] = byte(height & 0xFF) + off++ + + out[off] = 0x01 // N_G=1 + off++ + + out[off] = 1<<4 | 1<<2 // TID=0, U=1, R=1 + off++ + + out[off] = 0x01 // P_DIFF=1 + } + + copy(out[headerSize:], payload[payloadDataIndex:payloadDataIndex+currentFragmentSize]) payloads = append(payloads, out) payloadDataRemaining -= currentFragmentSize payloadDataIndex += currentFragmentSize } - p.pictureID++ - if p.pictureID >= 0x8000 { - p.pictureID = 0 - } return payloads } diff --git a/vendor/github.com/pion/rtp/error.go b/vendor/github.com/pion/rtp/error.go index 4df5d2a2d27..ac3ece458a1 100644 --- a/vendor/github.com/pion/rtp/error.go +++ b/vendor/github.com/pion/rtp/error.go @@ -21,4 +21,6 @@ var ( errRFC8285TwoByteHeaderSize = errors.New("header extension payload must be 255bytes or less for RFC 5285 two byte extensions") errRFC3550HeaderIDRange = errors.New("header extension id must be 0 for non-RFC 5285 extensions") + + errInvalidRTPPadding = errors.New("invalid RTP padding") ) diff --git a/vendor/github.com/pion/rtp/packet.go b/vendor/github.com/pion/rtp/packet.go index af88af3e9cf..61d341d141d 100644 --- a/vendor/github.com/pion/rtp/packet.go +++ b/vendor/github.com/pion/rtp/packet.go @@ -215,6 +215,9 @@ func (p *Packet) Unmarshal(buf []byte) error { end := len(buf) if p.Header.Padding { + if end <= n { + return errTooSmall + } p.PaddingSize = buf[end-1] end -= int(p.PaddingSize) } @@ -475,6 +478,10 @@ func (p Packet) Marshal() (buf []byte, err error) { // MarshalTo serializes the packet and writes to the buffer. func (p *Packet) MarshalTo(buf []byte) (n int, err error) { + if p.Header.Padding && p.PaddingSize == 0 { + return 0, errInvalidRTPPadding + } + n, err = p.Header.MarshalTo(buf) if err != nil { return 0, err diff --git a/vendor/github.com/pion/rtp/sequencer.go b/vendor/github.com/pion/rtp/sequencer.go index 8ad2cfd7be0..7aa7daae917 100644 --- a/vendor/github.com/pion/rtp/sequencer.go +++ b/vendor/github.com/pion/rtp/sequencer.go @@ -4,7 +4,6 @@ package rtp import ( - "math" "sync" ) @@ -14,11 +13,18 @@ type Sequencer interface { RollOverCount() uint64 } +// maxInitialRandomSequenceNumber is the maximum value used for the initial sequence +// number when using NewRandomSequencer(). +// This uses only half the potential sequence number space to avoid issues decrypting +// SRTP when the sequence number starts near the rollover and there is packet loss. +// See https://webrtc-review.googlesource.com/c/src/+/358360 +const maxInitialRandomSequenceNumber = 1<<15 - 1 + // NewRandomSequencer returns a new sequencer starting from a random sequence // number func NewRandomSequencer() Sequencer { return &sequencer{ - sequenceNumber: uint16(globalMathRandomGenerator.Intn(math.MaxUint16)), + sequenceNumber: uint16(globalMathRandomGenerator.Intn(maxInitialRandomSequenceNumber)), } } diff --git a/vendor/github.com/pion/sctp/ack_timer.go b/vendor/github.com/pion/sctp/ack_timer.go index 879e86df734..b6008d18112 100644 --- a/vendor/github.com/pion/sctp/ack_timer.go +++ b/vendor/github.com/pion/sctp/ack_timer.go @@ -18,7 +18,7 @@ type ackTimerObserver interface { onAckTimeout() } -type ackTimerState int +type ackTimerState uint8 const ( ackTimerStopped ackTimerState = iota @@ -28,10 +28,11 @@ const ( // ackTimer provides the retnransmission timer conforms with RFC 4960 Sec 6.3.1 type ackTimer struct { + timer *time.Timer observer ackTimerObserver - mutex sync.RWMutex + mutex sync.Mutex state ackTimerState - timer *time.Timer + pending uint8 } // newAckTimer creates a new acknowledgement timer used to enable delayed ack. @@ -44,7 +45,7 @@ func newAckTimer(observer ackTimerObserver) *ackTimer { func (t *ackTimer) timeout() { t.mutex.Lock() - if t.state == ackTimerStarted { + if t.pending--; t.pending == 0 && t.state == ackTimerStarted { t.state = ackTimerStopped defer t.observer.onAckTimeout() } @@ -62,6 +63,7 @@ func (t *ackTimer) start() bool { } t.state = ackTimerStarted + t.pending++ t.timer.Reset(ackInterval) return true } @@ -73,7 +75,9 @@ func (t *ackTimer) stop() { defer t.mutex.Unlock() if t.state == ackTimerStarted { - t.timer.Stop() + if t.timer.Stop() { + t.pending-- + } t.state = ackTimerStopped } } @@ -84,8 +88,8 @@ func (t *ackTimer) close() { t.mutex.Lock() defer t.mutex.Unlock() - if t.state == ackTimerStarted { - t.timer.Stop() + if t.state == ackTimerStarted && t.timer.Stop() { + t.pending-- } t.state = ackTimerClosed } @@ -93,8 +97,8 @@ func (t *ackTimer) close() { // isRunning tests if the timer is running. // Debug purpose only func (t *ackTimer) isRunning() bool { - t.mutex.RLock() - defer t.mutex.RUnlock() + t.mutex.Lock() + defer t.mutex.Unlock() return t.state == ackTimerStarted } diff --git a/vendor/github.com/pion/sctp/association.go b/vendor/github.com/pion/sctp/association.go index 46881b1d98e..29e9978c92f 100644 --- a/vendor/github.com/pion/sctp/association.go +++ b/vendor/github.com/pion/sctp/association.go @@ -33,6 +33,7 @@ var ( ErrChunk = errors.New("abort chunk, with following errors") ErrShutdownNonEstablished = errors.New("shutdown called in non-established state") ErrAssociationClosedBeforeConn = errors.New("association closed before connecting") + ErrAssociationClosed = errors.New("association closed") ErrSilentlyDiscard = errors.New("silently discard") ErrInitNotStoredToSend = errors.New("the init not stored to send") ErrCookieEchoNotStoredToSend = errors.New("cookieEcho not stored to send") @@ -105,11 +106,11 @@ const ( avgChunkSize = 500 // minTSNOffset is the minimum offset over the cummulative TSN that we will enqueue // irrespective of the receive buffer size - // see Association.getMaxTSNOffset + // see getMaxTSNOffset minTSNOffset = 2000 // maxTSNOffset is the maximum offset over the cummulative TSN that we will enqueue // irrespective of the receive buffer size - // see Association.getMaxTSNOffset + // see getMaxTSNOffset maxTSNOffset = 40000 // maxReconfigRequests is the maximum number of reconfig requests we will keep outstanding maxReconfigRequests = 1000 @@ -166,7 +167,6 @@ type Association struct { state uint32 initialTSN uint32 myNextTSN uint32 // nextTSN - peerLastTSN uint32 // lastRcvdTSN minTSN2MeasureRTT uint32 // for RTT measurement willSendForwardTSN bool willRetransmitFast bool @@ -190,7 +190,7 @@ type Association struct { myMaxNumInboundStreams uint16 myMaxNumOutboundStreams uint16 myCookie *paramStateCookie - payloadQueue *payloadQueue + payloadQueue *receivePayloadQueue inflightQueue *payloadQueue pendingQueue *pendingQueue controlQueue *controlQueue @@ -333,7 +333,7 @@ func createAssociation(config Config) *Association { myMaxNumOutboundStreams: math.MaxUint16, myMaxNumInboundStreams: math.MaxUint16, - payloadQueue: newPayloadQueue(), + payloadQueue: newReceivePayloadQueue(getMaxTSNOffset(maxReceiveBufferSize)), inflightQueue: newPayloadQueue(), pendingQueue: newPendingQueue(), controlQueue: newControlQueue(), @@ -573,6 +573,7 @@ func (a *Association) readLoop() { a.closeWriteLoopOnce.Do(func() { close(a.closeWriteLoopCh) }) a.lock.Lock() + a.setState(closed) for _, s := range a.streams { a.unregisterStream(s, closeErr) } @@ -1071,6 +1072,11 @@ func min32(a, b uint32) uint32 { return b } +// peerLastTSN return last received cumulative TSN +func (a *Association) peerLastTSN() uint32 { + return a.payloadQueue.getcumulativeTSN() +} + // setState atomically sets the state of the Association. // The caller should hold the lock. func (a *Association) setState(newState uint32) { @@ -1127,13 +1133,11 @@ func (a *Association) SRTT() float64 { } // getMaxTSNOffset returns the maximum offset over the current cummulative TSN that -// we are willing to enqueue. Limiting the maximum offset limits the number of -// tsns we have in the payloadQueue map. This ensures that we don't use too much space in -// the map itself. This also ensures that we keep the bytes utilized in the receive +// we are willing to enqueue. This ensures that we keep the bytes utilized in the receive // buffer within a small multiple of the user provided max receive buffer size. -func (a *Association) getMaxTSNOffset() uint32 { +func getMaxTSNOffset(maxReceiveBufferSize uint32) uint32 { // 4 is a magic number here. There is no theory behind this. - offset := (a.maxReceiveBufferSize * 4) / avgChunkSize + offset := (maxReceiveBufferSize * 4) / avgChunkSize if offset < minTSNOffset { offset = minTSNOffset } @@ -1186,7 +1190,7 @@ func (a *Association) handleInit(p *packet, i *chunkInit) ([]*packet, error) { // is set initially by taking the peer's initial TSN, // received in the INIT or INIT ACK chunk, and // subtracting one from it. - a.peerLastTSN = i.initialTSN - 1 + a.payloadQueue.init(i.initialTSN - 1) for _, param := range i.params { switch v := param.(type) { // nolint:gocritic @@ -1260,7 +1264,7 @@ func (a *Association) handleInitAck(p *packet, i *chunkInitAck) error { a.myMaxNumInboundStreams = min16(i.numInboundStreams, a.myMaxNumInboundStreams) a.myMaxNumOutboundStreams = min16(i.numOutboundStreams, a.myMaxNumOutboundStreams) a.peerVerificationTag = i.initiateTag - a.peerLastTSN = i.initialTSN - 1 + a.payloadQueue.init(i.initialTSN - 1) if a.sourcePort != p.destinationPort || a.destinationPort != p.sourcePort { a.log.Warnf("[%s] handleInitAck: port mismatch", a.name) @@ -1372,8 +1376,9 @@ func (a *Association) handleCookieEcho(c *chunkCookieEcho) []*packet { a.storedCookieEcho = nil a.setState(established) - // Note: This is a future place where the user could be notified (COMMUNICATION UP) - a.handshakeCompletedCh <- nil + if !a.completeHandshake(nil) { + return nil + } } p := &packet{ @@ -1401,8 +1406,7 @@ func (a *Association) handleCookieAck() { a.storedCookieEcho = nil a.setState(established) - // Note: This is a future place where the user could be notified (COMMUNICATION UP) - a.handshakeCompletedCh <- nil + a.completeHandshake(nil) } // The caller should hold the lock. @@ -1411,7 +1415,7 @@ func (a *Association) handleData(d *chunkPayloadData) []*packet { a.name, d.tsn, d.immediateSack, len(d.userData)) a.stats.incDATAs() - canPush := a.payloadQueue.canPush(d, a.peerLastTSN, a.getMaxTSNOffset()) + canPush := a.payloadQueue.canPush(d.tsn) if canPush { s := a.getOrCreateStream(d.streamIdentifier, true, PayloadTypeUnknown) if s == nil { @@ -1423,14 +1427,14 @@ func (a *Association) handleData(d *chunkPayloadData) []*packet { if a.getMyReceiverWindowCredit() > 0 { // Pass the new chunk to stream level as soon as it arrives - a.payloadQueue.push(d, a.peerLastTSN) + a.payloadQueue.push(d.tsn) s.handleData(d) } else { // Receive buffer is full lastTSN, ok := a.payloadQueue.getLastTSNReceived() if ok && sna32LT(d.tsn, lastTSN) { a.log.Debugf("[%s] receive buffer full, but accepted as this is a missing chunk with tsn=%d ssn=%d", a.name, d.tsn, d.streamSequenceNumber) - a.payloadQueue.push(d, a.peerLastTSN) + a.payloadQueue.push(d.tsn) s.handleData(d) } else { a.log.Debugf("[%s] receive buffer full. dropping DATA with tsn=%d ssn=%d", a.name, d.tsn, d.streamSequenceNumber) @@ -1454,10 +1458,9 @@ func (a *Association) handlePeerLastTSNAndAcknowledgement(sackImmediately bool) // Meaning, if peerLastTSN+1 points to a chunk that is received, // advance peerLastTSN until peerLastTSN+1 points to unreceived chunk. for { - if _, popOk := a.payloadQueue.pop(a.peerLastTSN + 1); !popOk { + if popOk := a.payloadQueue.pop(false); !popOk { break } - a.peerLastTSN++ for _, rstReq := range a.reconfigRequests { resp := a.resetStreamsIfAny(rstReq) @@ -1470,7 +1473,7 @@ func (a *Association) handlePeerLastTSNAndAcknowledgement(sackImmediately bool) hasPacketLoss := (a.payloadQueue.size() > 0) if hasPacketLoss { - a.log.Tracef("[%s] packetloss: %s", a.name, a.payloadQueue.getGapAckBlocksString(a.peerLastTSN)) + a.log.Tracef("[%s] packetloss: %s", a.name, a.payloadQueue.getGapAckBlocksString()) } if (a.ackState != ackStateImmediate && !sackImmediately && !hasPacketLoss && a.ackMode == ackModeNormal) || a.ackMode == ackModeAlwaysDelay { @@ -1504,6 +1507,11 @@ func (a *Association) OpenStream(streamIdentifier uint16, defaultPayloadType Pay a.lock.Lock() defer a.lock.Unlock() + switch a.getState() { + case shutdownAckSent, shutdownPending, shutdownReceived, shutdownSent, closed: + return nil, ErrAssociationClosed + } + return a.getOrCreateStream(streamIdentifier, false, defaultPayloadType), nil } @@ -2068,8 +2076,8 @@ func (a *Association) handleForwardTSN(c *chunkForwardTSN) []*packet { // duplicate may indicate the previous SACK was lost in the network. a.log.Tracef("[%s] should send ack? newCumTSN=%d peerLastTSN=%d", - a.name, c.newCumulativeTSN, a.peerLastTSN) - if sna32LTE(c.newCumulativeTSN, a.peerLastTSN) { + a.name, c.newCumulativeTSN, a.peerLastTSN()) + if sna32LTE(c.newCumulativeTSN, a.peerLastTSN()) { a.log.Tracef("[%s] sending ack on Forward TSN", a.name) a.ackState = ackStateImmediate a.ackTimer.stop() @@ -2088,9 +2096,8 @@ func (a *Association) handleForwardTSN(c *chunkForwardTSN) []*packet { // chunk, // Advance peerLastTSN - for sna32LT(a.peerLastTSN, c.newCumulativeTSN) { - a.payloadQueue.pop(a.peerLastTSN + 1) // may not exist - a.peerLastTSN++ + for sna32LT(a.peerLastTSN(), c.newCumulativeTSN) { + a.payloadQueue.pop(true) // may not exist } // Report new peerLastTSN value and abandoned largest SSN value to @@ -2143,7 +2150,7 @@ func (a *Association) handleReconfigParam(raw param) (*packet, error) { switch p := raw.(type) { case *paramOutgoingResetRequest: a.log.Tracef("[%s] handleReconfigParam (OutgoingResetRequest)", a.name) - if a.peerLastTSN < p.senderLastTSN && len(a.reconfigRequests) >= maxReconfigRequests { + if a.peerLastTSN() < p.senderLastTSN && len(a.reconfigRequests) >= maxReconfigRequests { // We have too many reconfig requests outstanding. Drop the request and let // the peer retransmit. A well behaved peer should only have 1 outstanding // reconfig request. @@ -2189,9 +2196,9 @@ func (a *Association) handleReconfigParam(raw param) (*packet, error) { // The caller should hold the lock. func (a *Association) resetStreamsIfAny(p *paramOutgoingResetRequest) *packet { result := reconfigResultSuccessPerformed - if sna32LTE(p.senderLastTSN, a.peerLastTSN) { + if sna32LTE(p.senderLastTSN, a.peerLastTSN()) { a.log.Debugf("[%s] resetStream(): senderLastTSN=%d <= peerLastTSN=%d", - a.name, p.senderLastTSN, a.peerLastTSN) + a.name, p.senderLastTSN, a.peerLastTSN()) for _, id := range p.streamIdentifiers { s, ok := a.streams[id] if !ok { @@ -2206,7 +2213,7 @@ func (a *Association) resetStreamsIfAny(p *paramOutgoingResetRequest) *packet { delete(a.reconfigRequests, p.reconfigRequestSequenceNumber) } else { a.log.Debugf("[%s] resetStream(): senderLastTSN=%d > peerLastTSN=%d", - a.name, p.senderLastTSN, a.peerLastTSN) + a.name, p.senderLastTSN, a.peerLastTSN()) result = reconfigResultInProgress } @@ -2280,7 +2287,7 @@ func (a *Association) popPendingDataChunksToSend() ([]*chunkPayloadData, []uint1 break // would exceeds cwnd } - if dataLen > a.rwnd { + if dataLen > a.RWND() { break // no more rwnd } @@ -2389,7 +2396,8 @@ func (a *Association) checkPartialReliabilityStatus(c *chunkPayloadData) { } s.lock.RUnlock() } else { - a.log.Errorf("[%s] stream %d not found)", a.name, c.streamIdentifier) + // Remote has reset its send side of the stream, we can still send data. + a.log.Tracef("[%s] stream %d not found, remote reset", a.name, c.streamIdentifier) } } @@ -2454,10 +2462,10 @@ func (a *Association) generateNextRSN() uint32 { func (a *Association) createSelectiveAckChunk() *chunkSelectiveAck { sack := &chunkSelectiveAck{} - sack.cumulativeTSNAck = a.peerLastTSN + sack.cumulativeTSNAck = a.peerLastTSN() sack.advertisedReceiverWindowCredit = a.getMyReceiverWindowCredit() sack.duplicateTSN = a.payloadQueue.popDuplicates() - sack.gapAckBlocks = a.payloadQueue.getGapAckBlocks(a.peerLastTSN) + sack.gapAckBlocks = a.payloadQueue.getGapAckBlocks() return sack } @@ -2691,13 +2699,13 @@ func (a *Association) onRetransmissionFailure(id int) { if id == timerT1Init { a.log.Errorf("[%s] retransmission failure: T1-init", a.name) - a.handshakeCompletedCh <- ErrHandshakeInitAck + a.completeHandshake(ErrHandshakeInitAck) return } if id == timerT1Cookie { a.log.Errorf("[%s] retransmission failure: T1-cookie", a.name) - a.handshakeCompletedCh <- ErrHandshakeCookieEcho + a.completeHandshake(ErrHandshakeCookieEcho) return } @@ -2745,3 +2753,17 @@ func (a *Association) MaxMessageSize() uint32 { func (a *Association) SetMaxMessageSize(maxMsgSize uint32) { atomic.StoreUint32(&a.maxMessageSize, maxMsgSize) } + +// completeHandshake sends the given error to handshakeCompletedCh unless the read/write +// side of the association closes before that can happen. It returns whether it was able +// to send on the channel or not. +func (a *Association) completeHandshake(handshakeErr error) bool { + select { + // Note: This is a future place where the user could be notified (COMMUNICATION UP) + case a.handshakeCompletedCh <- handshakeErr: + return true + case <-a.closeWriteLoopCh: // check the read/write sides for closure + case <-a.readLoopCloseCh: + } + return false +} diff --git a/vendor/github.com/pion/sctp/chunk_payload_data.go b/vendor/github.com/pion/sctp/chunk_payload_data.go index a6e50db93c9..a5a00064c7f 100644 --- a/vendor/github.com/pion/sctp/chunk_payload_data.go +++ b/vendor/github.com/pion/sctp/chunk_payload_data.go @@ -132,7 +132,7 @@ func (p *chunkPayloadData) unmarshal(raw []byte) error { p.beginningFragment = p.flags&payloadDataBeginingFragmentBitmask != 0 p.endingFragment = p.flags&payloadDataEndingFragmentBitmask != 0 - if len(raw) < payloadDataHeaderSize { + if len(p.raw) < payloadDataHeaderSize { return ErrChunkPayloadSmall } p.tsn = binary.BigEndian.Uint32(p.raw[0:]) diff --git a/vendor/github.com/pion/sctp/error_cause_header.go b/vendor/github.com/pion/sctp/error_cause_header.go index 84ff209d29c..98c530fb467 100644 --- a/vendor/github.com/pion/sctp/error_cause_header.go +++ b/vendor/github.com/pion/sctp/error_cause_header.go @@ -5,6 +5,7 @@ package sctp import ( "encoding/binary" + "errors" ) // errorCauseHeader represents the shared header that is shared by all error causes @@ -18,6 +19,9 @@ const ( errorCauseHeaderLength = 4 ) +// ErrInvalidSCTPChunk is returned when an SCTP chunk is invalid +var ErrInvalidSCTPChunk = errors.New("invalid SCTP chunk") + func (e *errorCauseHeader) marshal() ([]byte, error) { e.len = uint16(len(e.raw)) + uint16(errorCauseHeaderLength) raw := make([]byte, e.len) @@ -31,6 +35,9 @@ func (e *errorCauseHeader) marshal() ([]byte, error) { func (e *errorCauseHeader) unmarshal(raw []byte) error { e.code = errorCauseCode(binary.BigEndian.Uint16(raw[0:])) e.len = binary.BigEndian.Uint16(raw[2:]) + if e.len < errorCauseHeaderLength || int(e.len) > len(raw) { + return ErrInvalidSCTPChunk + } valueLength := e.len - errorCauseHeaderLength e.raw = raw[errorCauseHeaderLength : errorCauseHeaderLength+valueLength] return nil diff --git a/vendor/github.com/pion/sctp/packet.go b/vendor/github.com/pion/sctp/packet.go index 72660f3e9df..a2ab8e14cbf 100644 --- a/vendor/github.com/pion/sctp/packet.go +++ b/vendor/github.com/pion/sctp/packet.go @@ -209,3 +209,19 @@ func (p *packet) String() string { } return res } + +// TryMarshalUnmarshal attempts to marshal and unmarshal a message. Added for fuzzing. +func TryMarshalUnmarshal(msg []byte) int { + p := &packet{} + err := p.unmarshal(false, msg) + if err != nil { + return 0 + } + + _, err = p.marshal(false) + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/pion/sctp/param_requested_hmac_algorithm.go b/vendor/github.com/pion/sctp/param_requested_hmac_algorithm.go index ca9b0147006..3e98ea714d3 100644 --- a/vendor/github.com/pion/sctp/param_requested_hmac_algorithm.go +++ b/vendor/github.com/pion/sctp/param_requested_hmac_algorithm.go @@ -13,7 +13,7 @@ type hmacAlgorithm uint16 const ( hmacResv1 hmacAlgorithm = 0 - hmacSHA128 = 1 + hmacSHA128 hmacAlgorithm = 1 hmacResv2 hmacAlgorithm = 2 hmacSHA256 hmacAlgorithm = 3 ) @@ -21,6 +21,9 @@ const ( // ErrInvalidAlgorithmType is returned if unknown auth algorithm is specified. var ErrInvalidAlgorithmType = errors.New("invalid algorithm type") +// ErrInvalidChunkLength is returned if the chunk length is invalid. +var ErrInvalidChunkLength = errors.New("invalid chunk length") + func (c hmacAlgorithm) String() string { switch c { case hmacResv1: @@ -58,6 +61,9 @@ func (r *paramRequestedHMACAlgorithm) unmarshal(raw []byte) (param, error) { if err != nil { return nil, err } + if len(r.raw)%2 == 1 { + return nil, ErrInvalidChunkLength + } i := 0 for i < len(r.raw) { diff --git a/vendor/github.com/pion/sctp/payload_queue.go b/vendor/github.com/pion/sctp/payload_queue.go index a0b1b26f320..92eacecd0a1 100644 --- a/vendor/github.com/pion/sctp/payload_queue.go +++ b/vendor/github.com/pion/sctp/payload_queue.go @@ -3,81 +3,26 @@ package sctp -import ( - "fmt" - "sort" -) - type payloadQueue struct { - chunkMap map[uint32]*chunkPayloadData - sorted []uint32 - dupTSN []uint32 - nBytes int + chunks *queue[*chunkPayloadData] + nBytes int } func newPayloadQueue() *payloadQueue { - return &payloadQueue{chunkMap: map[uint32]*chunkPayloadData{}} -} - -func (q *payloadQueue) updateSortedKeys() { - if q.sorted != nil { - return - } - - q.sorted = make([]uint32, len(q.chunkMap)) - i := 0 - for k := range q.chunkMap { - q.sorted[i] = k - i++ - } - - sort.Slice(q.sorted, func(i, j int) bool { - return sna32LT(q.sorted[i], q.sorted[j]) - }) -} - -func (q *payloadQueue) canPush(p *chunkPayloadData, cumulativeTSN uint32, maxTSNOffset uint32) bool { - _, ok := q.chunkMap[p.tsn] - if ok || sna32LTE(p.tsn, cumulativeTSN) || sna32GTE(p.tsn, cumulativeTSN+maxTSNOffset) { - return false - } - return true + return &payloadQueue{chunks: newQueue[*chunkPayloadData](128)} } func (q *payloadQueue) pushNoCheck(p *chunkPayloadData) { - q.chunkMap[p.tsn] = p + q.chunks.PushBack(p) q.nBytes += len(p.userData) - q.sorted = nil -} - -// push pushes a payload data. If the payload data is already in our queue or -// older than our cumulativeTSN marker, it will be recored as duplications, -// which can later be retrieved using popDuplicates. -func (q *payloadQueue) push(p *chunkPayloadData, cumulativeTSN uint32) bool { - _, ok := q.chunkMap[p.tsn] - if ok || sna32LTE(p.tsn, cumulativeTSN) { - // Found the packet, log in dups - q.dupTSN = append(q.dupTSN, p.tsn) - return false - } - - q.chunkMap[p.tsn] = p - q.nBytes += len(p.userData) - q.sorted = nil - return true } // pop pops only if the oldest chunk's TSN matches the given TSN. func (q *payloadQueue) pop(tsn uint32) (*chunkPayloadData, bool) { - q.updateSortedKeys() - - if len(q.chunkMap) > 0 && tsn == q.sorted[0] { - q.sorted = q.sorted[1:] - if c, ok := q.chunkMap[tsn]; ok { - delete(q.chunkMap, tsn) - q.nBytes -= len(c.userData) - return c, true - } + if q.chunks.Len() > 0 && tsn == q.chunks.Front().tsn { + c := q.chunks.PopFront() + q.nBytes -= len(c.userData) + return c, true } return nil, false @@ -85,65 +30,20 @@ func (q *payloadQueue) pop(tsn uint32) (*chunkPayloadData, bool) { // get returns reference to chunkPayloadData with the given TSN value. func (q *payloadQueue) get(tsn uint32) (*chunkPayloadData, bool) { - c, ok := q.chunkMap[tsn] - return c, ok -} - -// popDuplicates returns an array of TSN values that were found duplicate. -func (q *payloadQueue) popDuplicates() []uint32 { - dups := q.dupTSN - q.dupTSN = []uint32{} - return dups -} - -func (q *payloadQueue) getGapAckBlocks(cumulativeTSN uint32) (gapAckBlocks []gapAckBlock) { - var b gapAckBlock - - if len(q.chunkMap) == 0 { - return []gapAckBlock{} - } - - q.updateSortedKeys() - - for i, tsn := range q.sorted { - if i == 0 { - b.start = uint16(tsn - cumulativeTSN) - b.end = b.start - continue - } - diff := uint16(tsn - cumulativeTSN) - if b.end+1 == diff { - b.end++ - } else { - gapAckBlocks = append(gapAckBlocks, gapAckBlock{ - start: b.start, - end: b.end, - }) - b.start = diff - b.end = diff - } + length := q.chunks.Len() + if length == 0 { + return nil, false } - - gapAckBlocks = append(gapAckBlocks, gapAckBlock{ - start: b.start, - end: b.end, - }) - - return gapAckBlocks -} - -func (q *payloadQueue) getGapAckBlocksString(cumulativeTSN uint32) string { - gapAckBlocks := q.getGapAckBlocks(cumulativeTSN) - str := fmt.Sprintf("cumTSN=%d", cumulativeTSN) - for _, b := range gapAckBlocks { - str += fmt.Sprintf(",%d-%d", b.start, b.end) + head := q.chunks.Front().tsn + if tsn < head || int(tsn-head) >= length { + return nil, false } - return str + return q.chunks.At(int(tsn - head)), true } func (q *payloadQueue) markAsAcked(tsn uint32) int { var nBytesAcked int - if c, ok := q.chunkMap[tsn]; ok { + if c, ok := q.get(tsn); ok { c.acked = true c.retransmit = false nBytesAcked = len(c.userData) @@ -154,18 +54,9 @@ func (q *payloadQueue) markAsAcked(tsn uint32) int { return nBytesAcked } -func (q *payloadQueue) getLastTSNReceived() (uint32, bool) { - q.updateSortedKeys() - - qlen := len(q.sorted) - if qlen == 0 { - return 0, false - } - return q.sorted[qlen-1], true -} - func (q *payloadQueue) markAllToRetrasmit() { - for _, c := range q.chunkMap { + for i := 0; i < q.chunks.Len(); i++ { + c := q.chunks.At(i) if c.acked || c.abandoned() { continue } @@ -178,5 +69,5 @@ func (q *payloadQueue) getNumBytes() int { } func (q *payloadQueue) size() int { - return len(q.chunkMap) + return q.chunks.Len() } diff --git a/vendor/github.com/pion/sctp/queue.go b/vendor/github.com/pion/sctp/queue.go new file mode 100644 index 00000000000..a6945caf596 --- /dev/null +++ b/vendor/github.com/pion/sctp/queue.go @@ -0,0 +1,70 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package sctp + +type queue[T any] struct { + buf []T + head int + tail int + count int +} + +const minCap = 16 + +func newQueue[T any](capacity int) *queue[T] { + queueCap := minCap + for queueCap < capacity { + queueCap <<= 1 + } + + return &queue[T]{ + buf: make([]T, queueCap), + } +} + +func (q *queue[T]) Len() int { + return q.count +} + +func (q *queue[T]) PushBack(ele T) { + q.growIfFull() + q.buf[q.tail] = ele + q.tail = (q.tail + 1) % len(q.buf) + q.count++ +} + +func (q *queue[T]) PopFront() T { + ele := q.buf[q.head] + var zeroVal T + q.buf[q.head] = zeroVal + q.head = (q.head + 1) % len(q.buf) + q.count-- + return ele +} + +func (q *queue[T]) Front() T { + return q.buf[q.head] +} + +func (q *queue[T]) At(i int) T { + return q.buf[(q.head+i)%(len(q.buf))] +} + +func (q *queue[T]) growIfFull() { + if q.count < len(q.buf) { + return + } + + newBuf := make([]T, q.count<<1) + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} diff --git a/vendor/github.com/pion/sctp/receive_payload_queue.go b/vendor/github.com/pion/sctp/receive_payload_queue.go new file mode 100644 index 00000000000..2a4f2bbf75e --- /dev/null +++ b/vendor/github.com/pion/sctp/receive_payload_queue.go @@ -0,0 +1,186 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package sctp + +import ( + "fmt" + "math/bits" +) + +type receivePayloadQueue struct { + tailTSN uint32 + chunkSize int + tsnBitmask []uint64 + dupTSN []uint32 + maxTSNOffset uint32 + + cumulativeTSN uint32 +} + +func newReceivePayloadQueue(maxTSNOffset uint32) *receivePayloadQueue { + maxTSNOffset = ((maxTSNOffset + 63) / 64) * 64 + return &receivePayloadQueue{ + tsnBitmask: make([]uint64, maxTSNOffset/64), + maxTSNOffset: maxTSNOffset, + } +} + +func (q *receivePayloadQueue) init(cumulativeTSN uint32) { + q.cumulativeTSN = cumulativeTSN + q.tailTSN = cumulativeTSN + q.chunkSize = 0 + for i := range q.tsnBitmask { + q.tsnBitmask[i] = 0 + } + q.dupTSN = q.dupTSN[:0] +} + +func (q *receivePayloadQueue) hasChunk(tsn uint32) bool { + if q.chunkSize == 0 || sna32LTE(tsn, q.cumulativeTSN) || sna32GT(tsn, q.tailTSN) { + return false + } + + index, offset := int(tsn/64)%len(q.tsnBitmask), tsn%64 + return q.tsnBitmask[index]&(1<> uint64(start)) + return i + start, i+start < end +} + +func getFirstZeroBit(val uint64, start, end int) (int, bool) { + return getFirstNonZeroBit(^val, start, end) +} diff --git a/vendor/github.com/pion/sctp/rtx_timer.go b/vendor/github.com/pion/sctp/rtx_timer.go index 354825b51c8..1fea39319ff 100644 --- a/vendor/github.com/pion/sctp/rtx_timer.go +++ b/vendor/github.com/pion/sctp/rtx_timer.go @@ -118,19 +118,28 @@ type rtxTimerObserver interface { onRetransmissionFailure(timerID int) } +type rtxTimerState uint8 + +const ( + rtxTimerStopped rtxTimerState = iota + rtxTimerStarted + rtxTimerClosed +) + // rtxTimer provides the retnransmission timer conforms with RFC 4960 Sec 6.3.1 type rtxTimer struct { - id int + timer *time.Timer observer rtxTimerObserver + id int maxRetrans uint - stopFunc stopTimerLoop - closed bool - mutex sync.RWMutex rtoMax float64 + mutex sync.Mutex + rto float64 + nRtos uint + state rtxTimerState + pending uint8 } -type stopTimerLoop func() - // newRTXTimer creates a new retransmission timer. // if maxRetrans is set to 0, it will keep retransmitting until stop() is called. // (it will never make onRetransmissionFailure() callback. @@ -146,21 +155,38 @@ func newRTXTimer(id int, observer rtxTimerObserver, maxRetrans uint, if timer.rtoMax == 0 { timer.rtoMax = defaultRTOMax } + timer.timer = time.AfterFunc(math.MaxInt64, timer.timeout) + timer.timer.Stop() return &timer } +func (t *rtxTimer) calculateNextTimeout() time.Duration { + timeout := calculateNextTimeout(t.rto, t.nRtos, t.rtoMax) + return time.Duration(timeout) * time.Millisecond +} + +func (t *rtxTimer) timeout() { + t.mutex.Lock() + if t.pending--; t.pending == 0 && t.state == rtxTimerStarted { + if t.nRtos++; t.maxRetrans == 0 || t.nRtos <= t.maxRetrans { + t.timer.Reset(t.calculateNextTimeout()) + t.pending++ + defer t.observer.onRetransmissionTimeout(t.id, t.nRtos) + } else { + t.state = rtxTimerStopped + defer t.observer.onRetransmissionFailure(t.id) + } + } + t.mutex.Unlock() +} + // start starts the timer. func (t *rtxTimer) start(rto float64) bool { t.mutex.Lock() defer t.mutex.Unlock() - // this timer is already closed - if t.closed { - return false - } - - // this is a noop if the timer is always running - if t.stopFunc != nil { + // this timer is already closed or aleady running + if t.state != rtxTimerStopped { return false } @@ -168,40 +194,11 @@ func (t *rtxTimer) start(rto float64) bool { // fast timeout for the tests. Non-test code should pass in the // rto generated by rtoManager getRTO() method which caps the // value at RTO.Min or at RTO.Max. - var nRtos uint - - cancelCh := make(chan struct{}) - - go func() { - canceling := false - - timer := time.NewTimer(math.MaxInt64) - timer.Stop() - - for !canceling { - timeout := calculateNextTimeout(rto, nRtos, t.rtoMax) - timer.Reset(time.Duration(timeout) * time.Millisecond) - - select { - case <-timer.C: - nRtos++ - if t.maxRetrans == 0 || nRtos <= t.maxRetrans { - t.observer.onRetransmissionTimeout(t.id, nRtos) - } else { - t.stop() - t.observer.onRetransmissionFailure(t.id) - } - case <-cancelCh: - canceling = true - timer.Stop() - } - } - }() - - t.stopFunc = func() { - close(cancelCh) - } - + t.rto = rto + t.nRtos = 0 + t.state = rtxTimerStarted + t.pending++ + t.timer.Reset(t.calculateNextTimeout()) return true } @@ -210,9 +207,11 @@ func (t *rtxTimer) stop() { t.mutex.Lock() defer t.mutex.Unlock() - if t.stopFunc != nil { - t.stopFunc() - t.stopFunc = nil + if t.state == rtxTimerStarted { + if t.timer.Stop() { + t.pending-- + } + t.state = rtxTimerStopped } } @@ -222,21 +221,19 @@ func (t *rtxTimer) close() { t.mutex.Lock() defer t.mutex.Unlock() - if t.stopFunc != nil { - t.stopFunc() - t.stopFunc = nil + if t.state == rtxTimerStarted && t.timer.Stop() { + t.pending-- } - - t.closed = true + t.state = rtxTimerClosed } // isRunning tests if the timer is running. // Debug purpose only func (t *rtxTimer) isRunning() bool { - t.mutex.RLock() - defer t.mutex.RUnlock() + t.mutex.Lock() + defer t.mutex.Unlock() - return (t.stopFunc != nil) + return t.state == rtxTimerStarted } func calculateNextTimeout(rto float64, nRtos uint, rtoMax float64) float64 { diff --git a/vendor/github.com/pion/srtp/v2/context.go b/vendor/github.com/pion/srtp/v2/context.go index 27da02cfdd4..ffed38fabba 100644 --- a/vendor/github.com/pion/srtp/v2/context.go +++ b/vendor/github.com/pion/srtp/v2/context.go @@ -4,6 +4,7 @@ package srtp import ( + "bytes" "fmt" "github.com/pion/transport/v2/replaydetector" @@ -56,6 +57,14 @@ type Context struct { newSRTCPReplayDetector func() replaydetector.ReplayDetector newSRTPReplayDetector func() replaydetector.ReplayDetector + + profile ProtectionProfile + + sendMKI []byte // Master Key Identifier used for encrypting RTP/RTCP packets. Set to nil if MKI is not enabled. + mkis map[string]srtpCipher // Master Key Identifier to cipher mapping. Used for decrypting packets. Empty if MKI is not enabled. + + encryptSRTP bool + encryptSRTCP bool } // CreateContext creates a new SRTP Context. @@ -66,43 +75,19 @@ type Context struct { // // decCtx, err := srtp.CreateContext(key, salt, profile, srtp.SRTPReplayProtection(256)) func CreateContext(masterKey, masterSalt []byte, profile ProtectionProfile, opts ...ContextOption) (c *Context, err error) { - keyLen, err := profile.keyLen() - if err != nil { - return nil, err - } - - saltLen, err := profile.saltLen() - if err != nil { - return nil, err - } - - if masterKeyLen := len(masterKey); masterKeyLen != keyLen { - return c, fmt.Errorf("%w expected(%d) actual(%d)", errShortSrtpMasterKey, masterKey, keyLen) - } else if masterSaltLen := len(masterSalt); masterSaltLen != saltLen { - return c, fmt.Errorf("%w expected(%d) actual(%d)", errShortSrtpMasterSalt, saltLen, masterSaltLen) - } - c = &Context{ srtpSSRCStates: map[uint32]*srtpSSRCState{}, srtcpSSRCStates: map[uint32]*srtcpSSRCState{}, - } - - switch profile { - case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: - c.cipher, err = newSrtpCipherAeadAesGcm(profile, masterKey, masterSalt) - case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80: - c.cipher, err = newSrtpCipherAesCmHmacSha1(profile, masterKey, masterSalt) - default: - return nil, fmt.Errorf("%w: %#v", errNoSuchSRTPProfile, profile) - } - if err != nil { - return nil, err + profile: profile, + mkis: map[string]srtpCipher{}, } for _, o := range append( []ContextOption{ // Default options SRTPNoReplayProtection(), SRTCPNoReplayProtection(), + SRTPEncryption(), + SRTCPEncryption(), }, opts..., // User specified options ) { @@ -111,9 +96,93 @@ func CreateContext(masterKey, masterSalt []byte, profile ProtectionProfile, opts } } + c.cipher, err = c.createCipher(c.sendMKI, masterKey, masterSalt, c.encryptSRTP, c.encryptSRTCP) + if err != nil { + return nil, err + } + if len(c.sendMKI) != 0 { + c.mkis[string(c.sendMKI)] = c.cipher + } + return c, nil } +// AddCipherForMKI adds new MKI with associated masker key and salt. Context must be created with MasterKeyIndicator option +// to enable MKI support. MKI must be unique and have the same length as the one used for creating Context. +// Operation is not thread-safe, you need to provide synchronization with decrypting packets. +func (c *Context) AddCipherForMKI(mki, masterKey, masterSalt []byte) error { + if len(c.mkis) == 0 { + return errMKIIsNotEnabled + } + if len(mki) == 0 || len(mki) != len(c.sendMKI) { + return errInvalidMKILength + } + if _, ok := c.mkis[string(mki)]; ok { + return errMKIAlreadyInUse + } + + cipher, err := c.createCipher(mki, masterKey, masterSalt, c.encryptSRTP, c.encryptSRTCP) + if err != nil { + return err + } + c.mkis[string(mki)] = cipher + return nil +} + +func (c *Context) createCipher(mki, masterKey, masterSalt []byte, encryptSRTP, encryptSRTCP bool) (srtpCipher, error) { + keyLen, err := c.profile.KeyLen() + if err != nil { + return nil, err + } + + saltLen, err := c.profile.SaltLen() + if err != nil { + return nil, err + } + + if masterKeyLen := len(masterKey); masterKeyLen != keyLen { + return nil, fmt.Errorf("%w expected(%d) actual(%d)", errShortSrtpMasterKey, masterKey, keyLen) + } else if masterSaltLen := len(masterSalt); masterSaltLen != saltLen { + return nil, fmt.Errorf("%w expected(%d) actual(%d)", errShortSrtpMasterSalt, saltLen, masterSaltLen) + } + + switch c.profile { + case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: + return newSrtpCipherAeadAesGcm(c.profile, masterKey, masterSalt, mki, encryptSRTP, encryptSRTCP) + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_80: + return newSrtpCipherAesCmHmacSha1(c.profile, masterKey, masterSalt, mki, encryptSRTP, encryptSRTCP) + case ProtectionProfileNullHmacSha1_32, ProtectionProfileNullHmacSha1_80: + return newSrtpCipherAesCmHmacSha1(c.profile, masterKey, masterSalt, mki, false, false) + default: + return nil, fmt.Errorf("%w: %#v", errNoSuchSRTPProfile, c.profile) + } +} + +// RemoveMKI removes one of MKIs. You cannot remove last MKI and one used for encrypting RTP/RTCP packets. +// Operation is not thread-safe, you need to provide synchronization with decrypting packets. +func (c *Context) RemoveMKI(mki []byte) error { + if _, ok := c.mkis[string(mki)]; !ok { + return ErrMKINotFound + } + if bytes.Equal(mki, c.sendMKI) { + return errMKIAlreadyInUse + } + delete(c.mkis, string(mki)) + return nil +} + +// SetSendMKI switches MKI and cipher used for encrypting RTP/RTCP packets. +// Operation is not thread-safe, you need to provide synchronization with encrypting packets. +func (c *Context) SetSendMKI(mki []byte) error { + cipher, ok := c.mkis[string(mki)] + if !ok { + return ErrMKINotFound + } + c.sendMKI = mki + c.cipher = cipher + return nil +} + // https://tools.ietf.org/html/rfc3550#appendix-A.1 func (s *srtpSSRCState) nextRolloverCount(sequenceNumber uint16) (roc uint32, diff int32, overflow bool) { seq := int32(sequenceNumber) diff --git a/vendor/github.com/pion/srtp/v2/errors.go b/vendor/github.com/pion/srtp/v2/errors.go index 5b1751d30f0..c22653f0bb9 100644 --- a/vendor/github.com/pion/srtp/v2/errors.go +++ b/vendor/github.com/pion/srtp/v2/errors.go @@ -9,6 +9,11 @@ import ( ) var ( + // ErrFailedToVerifyAuthTag is returned when decryption fails due to invalid authentication tag + ErrFailedToVerifyAuthTag = errors.New("failed to verify auth tag") + // ErrMKINotFound is returned when decryption fails due to unknown MKI value in packet + ErrMKINotFound = errors.New("MKI not found") + errDuplicated = errors.New("duplicated packet") errShortSrtpMasterKey = errors.New("SRTP master key is not long enough") errShortSrtpMasterSalt = errors.New("SRTP master salt is not long enough") @@ -17,12 +22,15 @@ var ( errExporterWrongLabel = errors.New("exporter called with wrong label") errNoConfig = errors.New("no config provided") errNoConn = errors.New("no conn provided") - errFailedToVerifyAuthTag = errors.New("failed to verify auth tag") - errTooShortRTCP = errors.New("packet is too short to be rtcp packet") + errTooShortRTP = errors.New("packet is too short to be RTP packet") + errTooShortRTCP = errors.New("packet is too short to be RTCP packet") errPayloadDiffers = errors.New("payload differs") errStartedChannelUsedIncorrectly = errors.New("started channel used incorrectly, should only be closed") errBadIVLength = errors.New("bad iv length in xorBytesCTR") errExceededMaxPackets = errors.New("exceeded the maximum number of packets") + errMKIAlreadyInUse = errors.New("MKI already in use") + errMKIIsNotEnabled = errors.New("MKI is not enabled") + errInvalidMKILength = errors.New("invalid MKI length") errStreamNotInited = errors.New("stream has not been inited, unable to close") errStreamAlreadyClosed = errors.New("stream is already closed") diff --git a/vendor/github.com/pion/srtp/v2/key_derivation.go b/vendor/github.com/pion/srtp/v2/key_derivation.go index 05f0f29fae2..f192fafcc7b 100644 --- a/vendor/github.com/pion/srtp/v2/key_derivation.go +++ b/vendor/github.com/pion/srtp/v2/key_derivation.go @@ -19,7 +19,6 @@ func aesCmKeyDerivation(label byte, masterKey, masterSalt []byte, indexOverKdr i // concatenation of the encryption key label 0x00 with (index DIV kdr), // - index is 'rollover count' and DIV is 'divided by' - nMasterKey := len(masterKey) nMasterSalt := len(masterSalt) prfIn := make([]byte, 16) @@ -33,11 +32,12 @@ func aesCmKeyDerivation(label byte, masterKey, masterSalt []byte, indexOverKdr i return nil, err } - out := make([]byte, ((outLen+nMasterKey)/nMasterKey)*nMasterKey) + nBlockSize := block.BlockSize() + out := make([]byte, ((outLen+nBlockSize-1)/nBlockSize)*nBlockSize) var i uint16 - for n := 0; n < outLen; n += block.BlockSize() { + for n := 0; n < outLen; n += nBlockSize { binary.BigEndian.PutUint16(prfIn[len(prfIn)-2:], i) - block.Encrypt(out[n:n+nMasterKey], prfIn) + block.Encrypt(out[n:n+nBlockSize], prfIn) i++ } return out[:outLen], nil diff --git a/vendor/github.com/pion/srtp/v2/keying.go b/vendor/github.com/pion/srtp/v2/keying.go index c5977c398cd..617f4d71c2e 100644 --- a/vendor/github.com/pion/srtp/v2/keying.go +++ b/vendor/github.com/pion/srtp/v2/keying.go @@ -14,12 +14,12 @@ type KeyingMaterialExporter interface { // extracting them from DTLS. This behavior is defined in RFC5764: // https://tools.ietf.org/html/rfc5764 func (c *Config) ExtractSessionKeysFromDTLS(exporter KeyingMaterialExporter, isClient bool) error { - keyLen, err := c.Profile.keyLen() + keyLen, err := c.Profile.KeyLen() if err != nil { return err } - saltLen, err := c.Profile.saltLen() + saltLen, err := c.Profile.SaltLen() if err != nil { return err } diff --git a/vendor/github.com/pion/srtp/v2/option.go b/vendor/github.com/pion/srtp/v2/option.go index 0c75096a2e1..708274fae04 100644 --- a/vendor/github.com/pion/srtp/v2/option.go +++ b/vendor/github.com/pion/srtp/v2/option.go @@ -71,3 +71,50 @@ type nopReplayDetector struct{} func (s *nopReplayDetector) Check(uint64) (func(), bool) { return func() {}, true } + +// MasterKeyIndicator sets RTP/RTCP MKI for the initial master key. Array passed as an argument will be +// copied as-is to encrypted SRTP/SRTCP packets, so it must be of proper length and in Big Endian format. +// All MKIs added later using Context.AddCipherForMKI must have the same length as the one used here. +func MasterKeyIndicator(mki []byte) ContextOption { + return func(c *Context) error { + if len(mki) > 0 { + c.sendMKI = make([]byte, len(mki)) + copy(c.sendMKI, mki) + } + return nil + } +} + +// SRTPEncryption enables SRTP encryption. +func SRTPEncryption() ContextOption { // nolint:revive + return func(c *Context) error { + c.encryptSRTP = true + return nil + } +} + +// SRTPNoEncryption disables SRTP encryption. This option is useful when you want to use NullCipher for SRTP and keep authentication only. +// It simplifies debugging and testing, but it is not recommended for production use. +func SRTPNoEncryption() ContextOption { // nolint:revive + return func(c *Context) error { + c.encryptSRTP = false + return nil + } +} + +// SRTCPEncryption enables SRTCP encryption. +func SRTCPEncryption() ContextOption { + return func(c *Context) error { + c.encryptSRTCP = true + return nil + } +} + +// SRTCPNoEncryption disables SRTCP encryption. This option is useful when you want to use NullCipher for SRTCP and keep authentication only. +// It simplifies debugging and testing, but it is not recommended for production use. +func SRTCPNoEncryption() ContextOption { + return func(c *Context) error { + c.encryptSRTCP = false + return nil + } +} diff --git a/vendor/github.com/pion/srtp/v2/protection_profile.go b/vendor/github.com/pion/srtp/v2/protection_profile.go index c9b0fce3a25..9384bf8f924 100644 --- a/vendor/github.com/pion/srtp/v2/protection_profile.go +++ b/vendor/github.com/pion/srtp/v2/protection_profile.go @@ -10,27 +10,41 @@ type ProtectionProfile uint16 // Supported protection profiles // See https://www.iana.org/assignments/srtp-protection/srtp-protection.xhtml +// +// AES128_CM_HMAC_SHA1_80 and AES128_CM_HMAC_SHA1_32 are valid SRTP profiles, but they do not have an DTLS-SRTP Protection Profiles ID assigned +// in RFC 5764. They were in earlier draft of this RFC: https://datatracker.ietf.org/doc/html/draft-ietf-avt-dtls-srtp-03#section-4.1.2 +// Their IDs are now marked as reserved in the IANA registry. Despite this Chrome supports them: +// https://chromium.googlesource.com/chromium/deps/libsrtp/+/84122798bb16927b1e676bd4f938a6e48e5bf2fe/srtp/include/srtp.h#694 +// +// Null profiles disable encryption, they are used for debugging and testing. They are not recommended for production use. +// Use of them is equivalent to using ProtectionProfileAes128CmHmacSha1_NN profile with SRTPNoEncryption and SRTCPNoEncryption options. const ( ProtectionProfileAes128CmHmacSha1_80 ProtectionProfile = 0x0001 ProtectionProfileAes128CmHmacSha1_32 ProtectionProfile = 0x0002 + ProtectionProfileAes256CmHmacSha1_80 ProtectionProfile = 0x0003 + ProtectionProfileAes256CmHmacSha1_32 ProtectionProfile = 0x0004 + ProtectionProfileNullHmacSha1_80 ProtectionProfile = 0x0005 + ProtectionProfileNullHmacSha1_32 ProtectionProfile = 0x0006 ProtectionProfileAeadAes128Gcm ProtectionProfile = 0x0007 ProtectionProfileAeadAes256Gcm ProtectionProfile = 0x0008 ) -func (p ProtectionProfile) keyLen() (int, error) { +// KeyLen returns length of encryption key in bytes. For all profiles except NullHmacSha1_32 and NullHmacSha1_80 is is also the length of the session key. +func (p ProtectionProfile) KeyLen() (int, error) { switch p { - case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAeadAes128Gcm: + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAeadAes128Gcm, ProtectionProfileNullHmacSha1_32, ProtectionProfileNullHmacSha1_80: return 16, nil - case ProtectionProfileAeadAes256Gcm: + case ProtectionProfileAeadAes256Gcm, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_80: return 32, nil default: return 0, fmt.Errorf("%w: %#v", errNoSuchSRTPProfile, p) } } -func (p ProtectionProfile) saltLen() (int, error) { +// SaltLen returns length of salt key in bytes. For all profiles except NullHmacSha1_32 and NullHmacSha1_80 is is also the length of the session salt. +func (p ProtectionProfile) SaltLen() (int, error) { switch p { - case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80: + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_80, ProtectionProfileNullHmacSha1_32, ProtectionProfileNullHmacSha1_80: return 14, nil case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: return 12, nil @@ -39,11 +53,12 @@ func (p ProtectionProfile) saltLen() (int, error) { } } -func (p ProtectionProfile) rtpAuthTagLen() (int, error) { +// AuthTagRTPLen returns length of RTP authentication tag in bytes for AES protection profiles. For AEAD ones it returns zero. +func (p ProtectionProfile) AuthTagRTPLen() (int, error) { switch p { - case ProtectionProfileAes128CmHmacSha1_80: + case ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAes256CmHmacSha1_80, ProtectionProfileNullHmacSha1_80: return 10, nil - case ProtectionProfileAes128CmHmacSha1_32: + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileNullHmacSha1_32: return 4, nil case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: return 0, nil @@ -52,9 +67,10 @@ func (p ProtectionProfile) rtpAuthTagLen() (int, error) { } } -func (p ProtectionProfile) rtcpAuthTagLen() (int, error) { +// AuthTagRTCPLen returns length of RTCP authentication tag in bytes for AES protection profiles. For AEAD ones it returns zero. +func (p ProtectionProfile) AuthTagRTCPLen() (int, error) { switch p { - case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80: + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_80, ProtectionProfileNullHmacSha1_32, ProtectionProfileNullHmacSha1_80: return 10, nil case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: return 0, nil @@ -63,9 +79,10 @@ func (p ProtectionProfile) rtcpAuthTagLen() (int, error) { } } -func (p ProtectionProfile) aeadAuthTagLen() (int, error) { +// AEADAuthTagLen returns length of authentication tag in bytes for AEAD protection profiles. For AES ones it returns zero. +func (p ProtectionProfile) AEADAuthTagLen() (int, error) { switch p { - case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80: + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_80, ProtectionProfileNullHmacSha1_32, ProtectionProfileNullHmacSha1_80: return 0, nil case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: return 16, nil @@ -74,9 +91,10 @@ func (p ProtectionProfile) aeadAuthTagLen() (int, error) { } } -func (p ProtectionProfile) authKeyLen() (int, error) { +// AuthKeyLen returns length of authentication key in bytes for AES protection profiles. For AEAD ones it returns zero. +func (p ProtectionProfile) AuthKeyLen() (int, error) { switch p { - case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80: + case ProtectionProfileAes128CmHmacSha1_32, ProtectionProfileAes128CmHmacSha1_80, ProtectionProfileAes256CmHmacSha1_32, ProtectionProfileAes256CmHmacSha1_80, ProtectionProfileNullHmacSha1_32, ProtectionProfileNullHmacSha1_80: return 20, nil case ProtectionProfileAeadAes128Gcm, ProtectionProfileAeadAes256Gcm: return 0, nil @@ -84,3 +102,27 @@ func (p ProtectionProfile) authKeyLen() (int, error) { return 0, fmt.Errorf("%w: %#v", errNoSuchSRTPProfile, p) } } + +// String returns the name of the protection profile. +func (p ProtectionProfile) String() string { + switch p { + case ProtectionProfileAes128CmHmacSha1_80: + return "SRTP_AES128_CM_HMAC_SHA1_80" + case ProtectionProfileAes128CmHmacSha1_32: + return "SRTP_AES128_CM_HMAC_SHA1_32" + case ProtectionProfileAes256CmHmacSha1_80: + return "SRTP_AES256_CM_HMAC_SHA1_80" + case ProtectionProfileAes256CmHmacSha1_32: + return "SRTP_AES256_CM_HMAC_SHA1_32" + case ProtectionProfileAeadAes128Gcm: + return "SRTP_AEAD_AES_128_GCM" + case ProtectionProfileAeadAes256Gcm: + return "SRTP_AEAD_AES_256_GCM" + case ProtectionProfileNullHmacSha1_80: + return "SRTP_NULL_HMAC_SHA1_80" + case ProtectionProfileNullHmacSha1_32: + return "SRTP_NULL_HMAC_SHA1_32" + default: + return fmt.Sprintf("Unknown SRTP profile: %#v", p) + } +} diff --git a/vendor/github.com/pion/srtp/v2/srtcp.go b/vendor/github.com/pion/srtp/v2/srtcp.go index 7fd0746bd4c..6d1a1c1d99b 100644 --- a/vendor/github.com/pion/srtp/v2/srtcp.go +++ b/vendor/github.com/pion/srtp/v2/srtcp.go @@ -12,23 +12,22 @@ import ( const maxSRTCPIndex = 0x7FFFFFFF -func (c *Context) decryptRTCP(dst, encrypted []byte) ([]byte, error) { - out := allocateIfMismatch(dst, encrypted) +const srtcpHeaderSize = 8 - authTagLen, err := c.cipher.rtcpAuthTagLen() +func (c *Context) decryptRTCP(dst, encrypted []byte) ([]byte, error) { + authTagLen, err := c.cipher.AuthTagRTCPLen() if err != nil { return nil, err } - aeadAuthTagLen, err := c.cipher.aeadAuthTagLen() + aeadAuthTagLen, err := c.cipher.AEADAuthTagLen() if err != nil { return nil, err } - tailOffset := len(encrypted) - (authTagLen + srtcpIndexSize) + mkiLen := len(c.sendMKI) - if tailOffset < aeadAuthTagLen { + // Verify that encrypted packet is long enough + if len(encrypted) < (srtcpHeaderSize + aeadAuthTagLen + srtcpIndexSize + mkiLen + authTagLen) { return nil, fmt.Errorf("%w: %d", errTooShortRTCP, len(encrypted)) - } else if isEncrypted := encrypted[tailOffset] >> 7; isEncrypted == 0 { - return out, nil } index := c.cipher.getRTCPIndex(encrypted) @@ -40,7 +39,19 @@ func (c *Context) decryptRTCP(dst, encrypted []byte) ([]byte, error) { return nil, &duplicatedError{Proto: "srtcp", SSRC: ssrc, Index: index} } - out, err = c.cipher.decryptRTCP(out, encrypted, index, ssrc) + cipher := c.cipher + if len(c.mkis) > 0 { + // Find cipher for MKI + actualMKI := c.cipher.getMKI(encrypted, false) + cipher, ok = c.mkis[string(actualMKI)] + if !ok { + return nil, ErrMKINotFound + } + } + + out := allocateIfMismatch(dst, encrypted) + + out, err = cipher.decryptRTCP(out, encrypted, index, ssrc) if err != nil { return nil, err } @@ -63,6 +74,10 @@ func (c *Context) DecryptRTCP(dst, encrypted []byte, header *rtcp.Header) ([]byt } func (c *Context) encryptRTCP(dst, decrypted []byte) ([]byte, error) { + if len(decrypted) < srtcpHeaderSize { + return nil, fmt.Errorf("%w: %d", errTooShortRTCP, len(decrypted)) + } + ssrc := binary.BigEndian.Uint32(decrypted[4:]) s := c.getSRTCPSSRCState(ssrc) diff --git a/vendor/github.com/pion/srtp/v2/srtp.go b/vendor/github.com/pion/srtp/v2/srtp.go index 42c71be01d6..56828bcf4a7 100644 --- a/vendor/github.com/pion/srtp/v2/srtp.go +++ b/vendor/github.com/pion/srtp/v2/srtp.go @@ -5,10 +5,27 @@ package srtp import ( + "fmt" + "github.com/pion/rtp" ) func (c *Context) decryptRTP(dst, ciphertext []byte, header *rtp.Header, headerLen int) ([]byte, error) { + authTagLen, err := c.cipher.AuthTagRTPLen() + if err != nil { + return nil, err + } + aeadAuthTagLen, err := c.cipher.AEADAuthTagLen() + if err != nil { + return nil, err + } + mkiLen := len(c.sendMKI) + + // Verify that encrypted packet is long enough + if len(ciphertext) < (headerLen + aeadAuthTagLen + mkiLen + authTagLen) { + return nil, fmt.Errorf("%w: %d", errTooShortRTP, len(ciphertext)) + } + s := c.getSRTPSSRCState(header.SSRC) roc, diff, _ := s.nextRolloverCount(header.SequenceNumber) @@ -21,13 +38,19 @@ func (c *Context) decryptRTP(dst, ciphertext []byte, header *rtp.Header, headerL } } - authTagLen, err := c.cipher.rtpAuthTagLen() - if err != nil { - return nil, err + cipher := c.cipher + if len(c.mkis) > 0 { + // Find cipher for MKI + actualMKI := c.cipher.getMKI(ciphertext, true) + cipher, ok = c.mkis[string(actualMKI)] + if !ok { + return nil, ErrMKINotFound + } } - dst = growBufferSize(dst, len(ciphertext)-authTagLen) - dst, err = c.cipher.decryptRTP(dst, ciphertext, header, headerLen, roc) + dst = growBufferSize(dst, len(ciphertext)-authTagLen-len(c.sendMKI)) + + dst, err = cipher.decryptRTP(dst, ciphertext, header, headerLen, roc) if err != nil { return nil, err } diff --git a/vendor/github.com/pion/srtp/v2/srtp_cipher.go b/vendor/github.com/pion/srtp/v2/srtp_cipher.go index db501472f23..da745e7cd19 100644 --- a/vendor/github.com/pion/srtp/v2/srtp_cipher.go +++ b/vendor/github.com/pion/srtp/v2/srtp_cipher.go @@ -8,14 +8,15 @@ import "github.com/pion/rtp" // cipher represents a implementation of one // of the SRTP Specific ciphers type srtpCipher interface { - // authTagLen returns auth key length of the cipher. + // AuthTagRTPLen/AuthTagRTCPLen return auth key length of the cipher. // See the note below. - rtpAuthTagLen() (int, error) - rtcpAuthTagLen() (int, error) - // aeadAuthTagLen returns AEAD auth key length of the cipher. + AuthTagRTPLen() (int, error) + AuthTagRTCPLen() (int, error) + // AEADAuthTagLen returns AEAD auth key length of the cipher. // See the note below. - aeadAuthTagLen() (int, error) + AEADAuthTagLen() (int, error) getRTCPIndex([]byte) uint32 + getMKI([]byte, bool) []byte encryptRTP([]byte, *rtp.Header, []byte, uint32) ([]byte, error) encryptRTCP([]byte, []byte, uint32, uint32) ([]byte, error) diff --git a/vendor/github.com/pion/srtp/v2/srtp_cipher_aead_aes_gcm.go b/vendor/github.com/pion/srtp/v2/srtp_cipher_aead_aes_gcm.go index 90643d92d29..ac50c39bb45 100644 --- a/vendor/github.com/pion/srtp/v2/srtp_cipher_aead_aes_gcm.go +++ b/vendor/github.com/pion/srtp/v2/srtp_cipher_aead_aes_gcm.go @@ -7,6 +7,7 @@ import ( "crypto/aes" "crypto/cipher" "encoding/binary" + "fmt" "github.com/pion/rtp" ) @@ -21,10 +22,18 @@ type srtpCipherAeadAesGcm struct { srtpCipher, srtcpCipher cipher.AEAD srtpSessionSalt, srtcpSessionSalt []byte + + mki []byte + + srtpEncrypted, srtcpEncrypted bool } -func newSrtpCipherAeadAesGcm(profile ProtectionProfile, masterKey, masterSalt []byte) (*srtpCipherAeadAesGcm, error) { - s := &srtpCipherAeadAesGcm{ProtectionProfile: profile} +func newSrtpCipherAeadAesGcm(profile ProtectionProfile, masterKey, masterSalt, mki []byte, encryptSRTP, encryptSRTCP bool) (*srtpCipherAeadAesGcm, error) { + s := &srtpCipherAeadAesGcm{ + ProtectionProfile: profile, + srtpEncrypted: encryptSRTP, + srtcpEncrypted: encryptSRTCP, + } srtpSessionKey, err := aesCmKeyDerivation(labelSRTPEncryption, masterKey, masterSalt, 0, len(masterKey)) if err != nil { @@ -62,16 +71,22 @@ func newSrtpCipherAeadAesGcm(profile ProtectionProfile, masterKey, masterSalt [] return nil, err } + mkiLen := len(mki) + if mkiLen > 0 { + s.mki = make([]byte, mkiLen) + copy(s.mki, mki) + } + return s, nil } func (s *srtpCipherAeadAesGcm) encryptRTP(dst []byte, header *rtp.Header, payload []byte, roc uint32) (ciphertext []byte, err error) { // Grow the given buffer to fit the output. - authTagLen, err := s.aeadAuthTagLen() + authTagLen, err := s.AEADAuthTagLen() if err != nil { return nil, err } - dst = growBufferSize(dst, header.MarshalSize()+len(payload)+authTagLen) + dst = growBufferSize(dst, header.MarshalSize()+len(payload)+authTagLen+len(s.mki)) n, err := header.MarshalTo(dst) if err != nil { @@ -79,29 +94,52 @@ func (s *srtpCipherAeadAesGcm) encryptRTP(dst []byte, header *rtp.Header, payloa } iv := s.rtpInitializationVector(header, roc) - s.srtpCipher.Seal(dst[n:n], iv[:], payload, dst[:n]) + if s.srtpEncrypted { + s.srtpCipher.Seal(dst[n:n], iv[:], payload, dst[:n]) + } else { + clearLen := n + len(payload) + copy(dst[n:], payload) + s.srtpCipher.Seal(dst[clearLen:clearLen], iv[:], nil, dst[:clearLen]) + } + + // Add MKI after the encrypted payload + if len(s.mki) > 0 { + copy(dst[len(dst)-len(s.mki):], s.mki) + } + return dst, nil } func (s *srtpCipherAeadAesGcm) decryptRTP(dst, ciphertext []byte, header *rtp.Header, headerLen int, roc uint32) ([]byte, error) { // Grow the given buffer to fit the output. - authTagLen, err := s.aeadAuthTagLen() + authTagLen, err := s.AEADAuthTagLen() if err != nil { return nil, err } - nDst := len(ciphertext) - authTagLen - if nDst < 0 { + nDst := len(ciphertext) - authTagLen - len(s.mki) + if nDst < headerLen { // Size of ciphertext is shorter than AEAD auth tag len. - return nil, errFailedToVerifyAuthTag + return nil, ErrFailedToVerifyAuthTag } dst = growBufferSize(dst, nDst) iv := s.rtpInitializationVector(header, roc) - if _, err := s.srtpCipher.Open( - dst[headerLen:headerLen], iv[:], ciphertext[headerLen:], ciphertext[:headerLen], - ); err != nil { - return nil, err + nEnd := len(ciphertext) - len(s.mki) + if s.srtpEncrypted { + if _, err := s.srtpCipher.Open( + dst[headerLen:headerLen], iv[:], ciphertext[headerLen:nEnd], ciphertext[:headerLen], + ); err != nil { + return nil, fmt.Errorf("%s: %s", ErrFailedToVerifyAuthTag, err) + } + } else { + nDataEnd := nEnd - authTagLen + if _, err := s.srtpCipher.Open( + nil, iv[:], ciphertext[nDataEnd:nEnd], ciphertext[:nDataEnd], + ); err != nil { + return nil, fmt.Errorf("%s: %w", ErrFailedToVerifyAuthTag, err) + } + copy(dst[headerLen:], ciphertext[headerLen:nDataEnd]) } copy(dst[:headerLen], ciphertext[:headerLen]) @@ -109,43 +147,71 @@ func (s *srtpCipherAeadAesGcm) decryptRTP(dst, ciphertext []byte, header *rtp.He } func (s *srtpCipherAeadAesGcm) encryptRTCP(dst, decrypted []byte, srtcpIndex uint32, ssrc uint32) ([]byte, error) { - authTagLen, err := s.aeadAuthTagLen() + authTagLen, err := s.AEADAuthTagLen() if err != nil { return nil, err } aadPos := len(decrypted) + authTagLen // Grow the given buffer to fit the output. - dst = growBufferSize(dst, aadPos+srtcpIndexSize) + dst = growBufferSize(dst, aadPos+srtcpIndexSize+len(s.mki)) iv := s.rtcpInitializationVector(srtcpIndex, ssrc) - aad := s.rtcpAdditionalAuthenticatedData(decrypted, srtcpIndex) - - s.srtcpCipher.Seal(dst[8:8], iv[:], decrypted[8:], aad[:]) + if s.srtcpEncrypted { + aad := s.rtcpAdditionalAuthenticatedData(decrypted, srtcpIndex) + copy(dst[:8], decrypted[:8]) + copy(dst[aadPos:aadPos+4], aad[8:12]) + s.srtcpCipher.Seal(dst[8:8], iv[:], decrypted[8:], aad[:]) + } else { + // Copy the packet unencrypted. + copy(dst, decrypted) + // Append the SRTCP index to the end of the packet - this will form the AAD. + binary.BigEndian.PutUint32(dst[len(decrypted):], srtcpIndex) + // Generate the authentication tag. + tag := make([]byte, authTagLen) + s.srtcpCipher.Seal(tag[0:0], iv[:], nil, dst[:len(decrypted)+4]) + // Copy index to the proper place. + copy(dst[aadPos:], dst[len(decrypted):len(decrypted)+4]) + // Copy the auth tag after RTCP payload. + copy(dst[len(decrypted):], tag) + } - copy(dst[:8], decrypted[:8]) - copy(dst[aadPos:aadPos+4], aad[8:12]) + copy(dst[aadPos+4:], s.mki) return dst, nil } func (s *srtpCipherAeadAesGcm) decryptRTCP(dst, encrypted []byte, srtcpIndex, ssrc uint32) ([]byte, error) { - aadPos := len(encrypted) - srtcpIndexSize + aadPos := len(encrypted) - srtcpIndexSize - len(s.mki) // Grow the given buffer to fit the output. - authTagLen, err := s.aeadAuthTagLen() + authTagLen, err := s.AEADAuthTagLen() if err != nil { return nil, err } nDst := aadPos - authTagLen if nDst < 0 { // Size of ciphertext is shorter than AEAD auth tag len. - return nil, errFailedToVerifyAuthTag + return nil, ErrFailedToVerifyAuthTag } dst = growBufferSize(dst, nDst) + isEncrypted := encrypted[aadPos]>>7 != 0 iv := s.rtcpInitializationVector(srtcpIndex, ssrc) - aad := s.rtcpAdditionalAuthenticatedData(encrypted, srtcpIndex) - - if _, err := s.srtcpCipher.Open(dst[8:8], iv[:], encrypted[8:aadPos], aad[:]); err != nil { - return nil, err + if isEncrypted { + aad := s.rtcpAdditionalAuthenticatedData(encrypted, srtcpIndex) + if _, err := s.srtcpCipher.Open(dst[8:8], iv[:], encrypted[8:aadPos], aad[:]); err != nil { + return nil, fmt.Errorf("%s: %w", ErrFailedToVerifyAuthTag, err) + } + } else { + // Prepare AAD for received packet. + dataEnd := aadPos - authTagLen + aad := make([]byte, dataEnd+4) + copy(aad, encrypted[:dataEnd]) + copy(aad[dataEnd:], encrypted[aadPos:aadPos+4]) + // Verify the auth tag. + if _, err := s.srtcpCipher.Open(nil, iv[:], encrypted[dataEnd:aadPos], aad); err != nil { + return nil, fmt.Errorf("%s: %w", ErrFailedToVerifyAuthTag, err) + } + // Copy the unencrypted payload. + copy(dst[8:], encrypted[8:dataEnd]) } copy(dst[:8], encrypted[:8]) @@ -205,5 +271,15 @@ func (s *srtpCipherAeadAesGcm) rtcpAdditionalAuthenticatedData(rtcpPacket []byte } func (s *srtpCipherAeadAesGcm) getRTCPIndex(in []byte) uint32 { - return binary.BigEndian.Uint32(in[len(in)-4:]) &^ (rtcpEncryptionFlag << 24) + return binary.BigEndian.Uint32(in[len(in)-len(s.mki)-4:]) &^ (rtcpEncryptionFlag << 24) +} + +func (s *srtpCipherAeadAesGcm) getMKI(in []byte, _ bool) []byte { + mkiLen := len(s.mki) + if mkiLen == 0 { + return nil + } + + tailOffset := len(in) - mkiLen + return in[tailOffset:] } diff --git a/vendor/github.com/pion/srtp/v2/srtp_cipher_aes_cm_hmac_sha1.go b/vendor/github.com/pion/srtp/v2/srtp_cipher_aes_cm_hmac_sha1.go index d56e6afb18f..aa673fa94b4 100644 --- a/vendor/github.com/pion/srtp/v2/srtp_cipher_aes_cm_hmac_sha1.go +++ b/vendor/github.com/pion/srtp/v2/srtp_cipher_aes_cm_hmac_sha1.go @@ -21,14 +21,28 @@ type srtpCipherAesCmHmacSha1 struct { srtpSessionSalt []byte srtpSessionAuth hash.Hash srtpBlock cipher.Block + srtpEncrypted bool srtcpSessionSalt []byte srtcpSessionAuth hash.Hash srtcpBlock cipher.Block + srtcpEncrypted bool + + mki []byte } -func newSrtpCipherAesCmHmacSha1(profile ProtectionProfile, masterKey, masterSalt []byte) (*srtpCipherAesCmHmacSha1, error) { - s := &srtpCipherAesCmHmacSha1{ProtectionProfile: profile} +func newSrtpCipherAesCmHmacSha1(profile ProtectionProfile, masterKey, masterSalt, mki []byte, encryptSRTP, encryptSRTCP bool) (*srtpCipherAesCmHmacSha1, error) { + if profile == ProtectionProfileNullHmacSha1_80 || profile == ProtectionProfileNullHmacSha1_32 { + encryptSRTP = false + encryptSRTCP = false + } + + s := &srtpCipherAesCmHmacSha1{ + ProtectionProfile: profile, + srtpEncrypted: encryptSRTP, + srtcpEncrypted: encryptSRTCP, + } + srtpSessionKey, err := aesCmKeyDerivation(labelSRTPEncryption, masterKey, masterSalt, 0, len(masterKey)) if err != nil { return nil, err @@ -49,7 +63,7 @@ func newSrtpCipherAesCmHmacSha1(profile ProtectionProfile, masterKey, masterSalt return nil, err } - authKeyLen, err := profile.authKeyLen() + authKeyLen, err := profile.AuthKeyLen() if err != nil { return nil, err } @@ -66,16 +80,23 @@ func newSrtpCipherAesCmHmacSha1(profile ProtectionProfile, masterKey, masterSalt s.srtcpSessionAuth = hmac.New(sha1.New, srtcpSessionAuthTag) s.srtpSessionAuth = hmac.New(sha1.New, srtpSessionAuthTag) + + mkiLen := len(mki) + if mkiLen > 0 { + s.mki = make([]byte, mkiLen) + copy(s.mki, mki) + } + return s, nil } func (s *srtpCipherAesCmHmacSha1) encryptRTP(dst []byte, header *rtp.Header, payload []byte, roc uint32) (ciphertext []byte, err error) { // Grow the given buffer to fit the output. - authTagLen, err := s.rtpAuthTagLen() + authTagLen, err := s.AuthTagRTPLen() if err != nil { return nil, err } - dst = growBufferSize(dst, header.MarshalSize()+len(payload)+authTagLen) + dst = growBufferSize(dst, header.MarshalSize()+len(payload)+len(s.mki)+authTagLen) // Copy the header unencrypted. n, err := header.MarshalTo(dst) @@ -84,9 +105,13 @@ func (s *srtpCipherAesCmHmacSha1) encryptRTP(dst []byte, header *rtp.Header, pay } // Encrypt the payload - counter := generateCounter(header.SequenceNumber, roc, header.SSRC, s.srtpSessionSalt) - if err = xorBytesCTR(s.srtpBlock, counter[:], dst[n:], payload); err != nil { - return nil, err + if s.srtpEncrypted { + counter := generateCounter(header.SequenceNumber, roc, header.SSRC, s.srtpSessionSalt) + if err = xorBytesCTR(s.srtpBlock, counter[:], dst[n:], payload); err != nil { + return nil, err + } + } else { + copy(dst[n:], payload) } n += len(payload) @@ -96,6 +121,12 @@ func (s *srtpCipherAesCmHmacSha1) encryptRTP(dst []byte, header *rtp.Header, pay return nil, err } + // Append the MKI (if used) + if len(s.mki) > 0 { + copy(dst[n:], s.mki) + n += len(s.mki) + } + // Write the auth tag to the dest. copy(dst[n:], authTag) @@ -104,12 +135,14 @@ func (s *srtpCipherAesCmHmacSha1) encryptRTP(dst []byte, header *rtp.Header, pay func (s *srtpCipherAesCmHmacSha1) decryptRTP(dst, ciphertext []byte, header *rtp.Header, headerLen int, roc uint32) ([]byte, error) { // Split the auth tag and the cipher text into two parts. - authTagLen, err := s.rtpAuthTagLen() + authTagLen, err := s.AuthTagRTPLen() if err != nil { return nil, err } + + // Split the auth tag and the cipher text into two parts. actualTag := ciphertext[len(ciphertext)-authTagLen:] - ciphertext = ciphertext[:len(ciphertext)-authTagLen] + ciphertext = ciphertext[:len(ciphertext)-len(s.mki)-authTagLen] // Generate the auth tag we expect to see from the ciphertext. expectedTag, err := s.generateSrtpAuthTag(ciphertext, roc) @@ -120,61 +153,93 @@ func (s *srtpCipherAesCmHmacSha1) decryptRTP(dst, ciphertext []byte, header *rtp // See if the auth tag actually matches. // We use a constant time comparison to prevent timing attacks. if subtle.ConstantTimeCompare(actualTag, expectedTag) != 1 { - return nil, errFailedToVerifyAuthTag + return nil, ErrFailedToVerifyAuthTag } // Write the plaintext header to the destination buffer. copy(dst, ciphertext[:headerLen]) // Decrypt the ciphertext for the payload. - counter := generateCounter(header.SequenceNumber, roc, header.SSRC, s.srtpSessionSalt) - err = xorBytesCTR( - s.srtpBlock, counter[:], dst[headerLen:], ciphertext[headerLen:], - ) - return dst, err + if s.srtpEncrypted { + counter := generateCounter(header.SequenceNumber, roc, header.SSRC, s.srtpSessionSalt) + err = xorBytesCTR( + s.srtpBlock, counter[:], dst[headerLen:], ciphertext[headerLen:], + ) + if err != nil { + return nil, err + } + } else { + copy(dst[headerLen:], ciphertext[headerLen:]) + } + return dst, nil } func (s *srtpCipherAesCmHmacSha1) encryptRTCP(dst, decrypted []byte, srtcpIndex uint32, ssrc uint32) ([]byte, error) { dst = allocateIfMismatch(dst, decrypted) // Encrypt everything after header - counter := generateCounter(uint16(srtcpIndex&0xffff), srtcpIndex>>16, ssrc, s.srtcpSessionSalt) - if err := xorBytesCTR(s.srtcpBlock, counter[:], dst[8:], dst[8:]); err != nil { - return nil, err + if s.srtcpEncrypted { + counter := generateCounter(uint16(srtcpIndex&0xffff), srtcpIndex>>16, ssrc, s.srtcpSessionSalt) + if err := xorBytesCTR(s.srtcpBlock, counter[:], dst[8:], dst[8:]); err != nil { + return nil, err + } + + // Add SRTCP Index and set Encryption bit + dst = append(dst, make([]byte, 4)...) + binary.BigEndian.PutUint32(dst[len(dst)-4:], srtcpIndex) + dst[len(dst)-4] |= 0x80 + } else { + // Copy the decrypted payload as is + copy(dst[8:], decrypted[8:]) + + // Add SRTCP Index with Encryption bit cleared + dst = append(dst, make([]byte, 4)...) + binary.BigEndian.PutUint32(dst[len(dst)-4:], srtcpIndex) } - // Add SRTCP Index and set Encryption bit - dst = append(dst, make([]byte, 4)...) - binary.BigEndian.PutUint32(dst[len(dst)-4:], srtcpIndex) - dst[len(dst)-4] |= 0x80 - + // Generate the authentication tag authTag, err := s.generateSrtcpAuthTag(dst) if err != nil { return nil, err } + + // Include the MKI if provided + if len(s.mki) > 0 { + dst = append(dst, s.mki...) + } + + // Append the auth tag at the end of the buffer return append(dst, authTag...), nil } func (s *srtpCipherAesCmHmacSha1) decryptRTCP(out, encrypted []byte, index, ssrc uint32) ([]byte, error) { - authTagLen, err := s.rtcpAuthTagLen() + authTagLen, err := s.AuthTagRTCPLen() if err != nil { return nil, err } - tailOffset := len(encrypted) - (authTagLen + srtcpIndexSize) + tailOffset := len(encrypted) - (authTagLen + len(s.mki) + srtcpIndexSize) + if tailOffset < 8 { + return nil, errTooShortRTCP + } out = out[0:tailOffset] - expectedTag, err := s.generateSrtcpAuthTag(encrypted[:len(encrypted)-authTagLen]) + expectedTag, err := s.generateSrtcpAuthTag(encrypted[:len(encrypted)-len(s.mki)-authTagLen]) if err != nil { return nil, err } actualTag := encrypted[len(encrypted)-authTagLen:] if subtle.ConstantTimeCompare(actualTag, expectedTag) != 1 { - return nil, errFailedToVerifyAuthTag + return nil, ErrFailedToVerifyAuthTag } - counter := generateCounter(uint16(index&0xffff), index>>16, ssrc, s.srtcpSessionSalt) - err = xorBytesCTR(s.srtcpBlock, counter[:], out[8:], out[8:]) + isEncrypted := encrypted[tailOffset]>>7 != 0 + if isEncrypted { + counter := generateCounter(uint16(index&0xffff), index>>16, ssrc, s.srtcpSessionSalt) + err = xorBytesCTR(s.srtcpBlock, counter[:], out[8:], out[8:]) + } else { + copy(out[8:], encrypted[8:]) + } return out, err } @@ -210,7 +275,7 @@ func (s *srtpCipherAesCmHmacSha1) generateSrtpAuthTag(buf []byte, roc uint32) ([ } // Truncate the hash to the size indicated by the profile - authTagLen, err := s.rtpAuthTagLen() + authTagLen, err := s.AuthTagRTPLen() if err != nil { return nil, err } @@ -234,7 +299,7 @@ func (s *srtpCipherAesCmHmacSha1) generateSrtcpAuthTag(buf []byte) ([]byte, erro if _, err := s.srtcpSessionAuth.Write(buf); err != nil { return nil, err } - authTagLen, err := s.rtcpAuthTagLen() + authTagLen, err := s.AuthTagRTCPLen() if err != nil { return nil, err } @@ -243,8 +308,24 @@ func (s *srtpCipherAesCmHmacSha1) generateSrtcpAuthTag(buf []byte) ([]byte, erro } func (s *srtpCipherAesCmHmacSha1) getRTCPIndex(in []byte) uint32 { - authTagLen, _ := s.rtcpAuthTagLen() - tailOffset := len(in) - (authTagLen + srtcpIndexSize) + authTagLen, _ := s.AuthTagRTCPLen() + tailOffset := len(in) - (authTagLen + srtcpIndexSize + len(s.mki)) srtcpIndexBuffer := in[tailOffset : tailOffset+srtcpIndexSize] return binary.BigEndian.Uint32(srtcpIndexBuffer) &^ (1 << 31) } + +func (s *srtpCipherAesCmHmacSha1) getMKI(in []byte, rtp bool) []byte { + mkiLen := len(s.mki) + if mkiLen == 0 { + return nil + } + + var authTagLen int + if rtp { + authTagLen, _ = s.AuthTagRTPLen() + } else { + authTagLen, _ = s.AuthTagRTCPLen() + } + tailOffset := len(in) - (authTagLen + mkiLen) + return in[tailOffset : tailOffset+mkiLen] +} diff --git a/vendor/github.com/pion/transport/v2/packetio/buffer.go b/vendor/github.com/pion/transport/v2/packetio/buffer.go index b487457bec0..9f91b047515 100644 --- a/vendor/github.com/pion/transport/v2/packetio/buffer.go +++ b/vendor/github.com/pion/transport/v2/packetio/buffer.go @@ -173,17 +173,11 @@ func (b *Buffer) Write(packet []byte) (int, error) { } b.count++ - waiting := b.waiting - b.waiting = false - - b.mutex.Unlock() - - if waiting { - select { - case b.notify <- struct{}{}: - default: - } + select { + case b.notify <- struct{}{}: + default: } + b.mutex.Unlock() return len(packet), nil } @@ -245,7 +239,6 @@ func (b *Buffer) Read(packet []byte) (n int, err error) { //nolint:gocognit } b.count-- - b.waiting = false b.mutex.Unlock() if copied < count { @@ -258,8 +251,6 @@ func (b *Buffer) Read(packet []byte) (n int, err error) { //nolint:gocognit b.mutex.Unlock() return 0, io.EOF } - - b.waiting = true b.mutex.Unlock() select { @@ -280,19 +271,11 @@ func (b *Buffer) Close() (err error) { return nil } - waiting := b.waiting b.waiting = false b.closed = true - + close(b.notify) b.mutex.Unlock() - if waiting { - select { - case b.notify <- struct{}{}: - default: - } - } - return nil } diff --git a/vendor/github.com/pion/transport/v2/stdnet/net.go b/vendor/github.com/pion/transport/v2/stdnet/net.go index fa4753b51ab..a2cf96438df 100644 --- a/vendor/github.com/pion/transport/v2/stdnet/net.go +++ b/vendor/github.com/pion/transport/v2/stdnet/net.go @@ -10,6 +10,7 @@ import ( "net" "github.com/pion/transport/v2" + "github.com/wlynxg/anet" ) const ( @@ -38,15 +39,15 @@ var _ transport.Net = &Net{} func (n *Net) UpdateInterfaces() error { ifs := []*transport.Interface{} - oifs, err := net.Interfaces() + oifs, err := anet.Interfaces() if err != nil { return err } - for _, oif := range oifs { - ifc := transport.NewInterface(oif) + for i := range oifs { + ifc := transport.NewInterface(oifs[i]) - addrs, err := oif.Addrs() + addrs, err := anet.InterfaceAddrsByInterface(&oifs[i]) if err != nil { return err } diff --git a/vendor/github.com/pion/webrtc/v3/datachannel.go b/vendor/github.com/pion/webrtc/v3/datachannel.go index f7c9511b350..c3ce10b9e32 100644 --- a/vendor/github.com/pion/webrtc/v3/datachannel.go +++ b/vendor/github.com/pion/webrtc/v3/datachannel.go @@ -40,6 +40,8 @@ type DataChannel struct { readyState atomic.Value // DataChannelState bufferedAmountLowThreshold uint64 detachCalled bool + readLoopActive chan struct{} + isGracefulClosed bool // The binaryType represents attribute MUST, on getting, return the value to // which it was last set. On setting, if the new value is either the string @@ -225,6 +227,10 @@ func (d *DataChannel) OnOpen(f func()) { func (d *DataChannel) onOpen() { d.mu.RLock() handler := d.onOpenHandler + if d.isGracefulClosed { + d.mu.RUnlock() + return + } d.mu.RUnlock() if handler != nil { @@ -252,6 +258,10 @@ func (d *DataChannel) OnDial(f func()) { func (d *DataChannel) onDial() { d.mu.RLock() handler := d.onDialHandler + if d.isGracefulClosed { + d.mu.RUnlock() + return + } d.mu.RUnlock() if handler != nil { @@ -261,6 +271,10 @@ func (d *DataChannel) onDial() { // OnClose sets an event handler which is invoked when // the underlying data transport has been closed. +// Note: Due to backwards compatibility, there is a chance that +// OnClose can be called, even if the GracefulClose is used. +// If this is the case for you, you can deregister OnClose +// prior to GracefulClose. func (d *DataChannel) OnClose(f func()) { d.mu.Lock() defer d.mu.Unlock() @@ -292,6 +306,10 @@ func (d *DataChannel) OnMessage(f func(msg DataChannelMessage)) { func (d *DataChannel) onMessage(msg DataChannelMessage) { d.mu.RLock() handler := d.onMessageHandler + if d.isGracefulClosed { + d.mu.RUnlock() + return + } d.mu.RUnlock() if handler == nil { @@ -302,6 +320,10 @@ func (d *DataChannel) onMessage(msg DataChannelMessage) { func (d *DataChannel) handleOpen(dc *datachannel.DataChannel, isRemote, isAlreadyNegotiated bool) { d.mu.Lock() + if d.isGracefulClosed { + d.mu.Unlock() + return + } d.dataChannel = dc bufferedAmountLowThreshold := d.bufferedAmountLowThreshold onBufferedAmountLow := d.onBufferedAmountLow @@ -326,7 +348,12 @@ func (d *DataChannel) handleOpen(dc *datachannel.DataChannel, isRemote, isAlread d.mu.Lock() defer d.mu.Unlock() + if d.isGracefulClosed { + return + } + if !d.api.settingEngine.detach.DataChannels { + d.readLoopActive = make(chan struct{}) go d.readLoop() } } @@ -342,6 +369,10 @@ func (d *DataChannel) OnError(f func(err error)) { func (d *DataChannel) onError(err error) { d.mu.RLock() handler := d.onErrorHandler + if d.isGracefulClosed { + d.mu.RUnlock() + return + } d.mu.RUnlock() if handler != nil { @@ -356,6 +387,12 @@ var rlBufPool = sync.Pool{New: func() interface{} { }} func (d *DataChannel) readLoop() { + defer func() { + d.mu.Lock() + readLoopActive := d.readLoopActive + d.mu.Unlock() + defer close(readLoopActive) + }() for { buffer := rlBufPool.Get().([]byte) //nolint:forcetypeassert n, isString, err := d.dataChannel.ReadDataChannel(buffer) @@ -438,7 +475,32 @@ func (d *DataChannel) Detach() (datachannel.ReadWriteCloser, error) { // Close Closes the DataChannel. It may be called regardless of whether // the DataChannel object was created by this peer or the remote peer. func (d *DataChannel) Close() error { + return d.close(false) +} + +// GracefulClose Closes the DataChannel. It may be called regardless of whether +// the DataChannel object was created by this peer or the remote peer. It also waits +// for any goroutines it started to complete. This is only safe to call outside of +// DataChannel callbacks or if in a callback, in its own goroutine. +func (d *DataChannel) GracefulClose() error { + return d.close(true) +} + +// Normally, close only stops writes from happening, so graceful=true +// will wait for reads to be finished based on underlying SCTP association +// closure or a SCTP reset stream from the other side. This is safe to call +// with graceful=true after tearing down a PeerConnection but not +// necessarily before. For example, if you used a vnet and dropped all packets +// right before closing the DataChannel, you'd need never see a reset stream. +func (d *DataChannel) close(shouldGracefullyClose bool) error { d.mu.Lock() + d.isGracefulClosed = true + readLoopActive := d.readLoopActive + if shouldGracefullyClose && readLoopActive != nil { + defer func() { + <-readLoopActive + }() + } haveSctpTransport := d.dataChannel != nil d.mu.Unlock() diff --git a/vendor/github.com/pion/webrtc/v3/dtlstransport.go b/vendor/github.com/pion/webrtc/v3/dtlstransport.go index 0267ca591dc..beaa7b28c14 100644 --- a/vendor/github.com/pion/webrtc/v3/dtlstransport.go +++ b/vendor/github.com/pion/webrtc/v3/dtlstransport.go @@ -50,7 +50,7 @@ type DTLSTransport struct { srtpSession, srtcpSession atomic.Value srtpEndpoint, srtcpEndpoint *mux.Endpoint - simulcastStreams []*srtp.ReadStreamSRTP + simulcastStreams []simulcastStreamPair srtpReady chan struct{} dtlsMatcher mux.MatchFunc @@ -59,6 +59,11 @@ type DTLSTransport struct { log logging.LeveledLogger } +type simulcastStreamPair struct { + srtp *srtp.ReadStreamSRTP + srtcp *srtp.ReadStreamSRTCP +} + // NewDTLSTransport creates a new DTLSTransport. // This constructor is part of the ORTC API. It is not // meant to be used together with the basic WebRTC API. @@ -374,6 +379,8 @@ func (t *DTLSTransport) Start(remoteParameters DTLSParameters) error { t.srtpProtectionProfile = srtp.ProtectionProfileAeadAes256Gcm case dtls.SRTP_AES128_CM_HMAC_SHA1_80: t.srtpProtectionProfile = srtp.ProtectionProfileAes128CmHmacSha1_80 + case dtls.SRTP_NULL_HMAC_SHA1_80: + t.srtpProtectionProfile = srtp.ProtectionProfileNullHmacSha1_80 default: t.onStateChange(DTLSTransportStateFailed) return ErrNoSRTPProtectionProfile @@ -431,7 +438,8 @@ func (t *DTLSTransport) Stop() error { } for i := range t.simulcastStreams { - closeErrs = append(closeErrs, t.simulcastStreams[i].Close()) + closeErrs = append(closeErrs, t.simulcastStreams[i].srtp.Close()) + closeErrs = append(closeErrs, t.simulcastStreams[i].srtcp.Close()) } if t.conn != nil { @@ -472,11 +480,11 @@ func (t *DTLSTransport) ensureICEConn() error { return nil } -func (t *DTLSTransport) storeSimulcastStream(s *srtp.ReadStreamSRTP) { +func (t *DTLSTransport) storeSimulcastStream(srtpReadStream *srtp.ReadStreamSRTP, srtcpReadStream *srtp.ReadStreamSRTCP) { t.lock.Lock() defer t.lock.Unlock() - t.simulcastStreams = append(t.simulcastStreams, s) + t.simulcastStreams = append(t.simulcastStreams, simulcastStreamPair{srtpReadStream, srtcpReadStream}) } func (t *DTLSTransport) streamsForSSRC(ssrc SSRC, streamInfo interceptor.StreamInfo) (*srtp.ReadStreamSRTP, interceptor.RTPReader, *srtp.ReadStreamSRTCP, interceptor.RTCPReader, error) { diff --git a/vendor/github.com/pion/webrtc/v3/icegatherer.go b/vendor/github.com/pion/webrtc/v3/icegatherer.go index d01ecc1344b..cd0d8672b82 100644 --- a/vendor/github.com/pion/webrtc/v3/icegatherer.go +++ b/vendor/github.com/pion/webrtc/v3/icegatherer.go @@ -188,13 +188,31 @@ func (g *ICEGatherer) Gather() error { // Close prunes all local candidates, and closes the ports. func (g *ICEGatherer) Close() error { + return g.close(false /* shouldGracefullyClose */) +} + +// GracefulClose prunes all local candidates, and closes the ports. It also waits +// for any goroutines it started to complete. This is only safe to call outside of +// ICEGatherer callbacks or if in a callback, in its own goroutine. +func (g *ICEGatherer) GracefulClose() error { + return g.close(true /* shouldGracefullyClose */) +} + +func (g *ICEGatherer) close(shouldGracefullyClose bool) error { g.lock.Lock() defer g.lock.Unlock() if g.agent == nil { return nil - } else if err := g.agent.Close(); err != nil { - return err + } + if shouldGracefullyClose { + if err := g.agent.GracefulClose(); err != nil { + return err + } + } else { + if err := g.agent.Close(); err != nil { + return err + } } g.agent = nil diff --git a/vendor/github.com/pion/webrtc/v3/icetransport.go b/vendor/github.com/pion/webrtc/v3/icetransport.go index 469aafbd43f..cb9aa22de88 100644 --- a/vendor/github.com/pion/webrtc/v3/icetransport.go +++ b/vendor/github.com/pion/webrtc/v3/icetransport.go @@ -16,6 +16,7 @@ import ( "github.com/pion/ice/v2" "github.com/pion/logging" "github.com/pion/webrtc/v3/internal/mux" + "github.com/pion/webrtc/v3/internal/util" ) // ICETransport allows an application access to information about the ICE @@ -187,6 +188,17 @@ func (t *ICETransport) restart() error { // Stop irreversibly stops the ICETransport. func (t *ICETransport) Stop() error { + return t.stop(false /* shouldGracefullyClose */) +} + +// GracefulStop irreversibly stops the ICETransport. It also waits +// for any goroutines it started to complete. This is only safe to call outside of +// ICETransport callbacks or if in a callback, in its own goroutine. +func (t *ICETransport) GracefulStop() error { + return t.stop(true /* shouldGracefullyClose */) +} + +func (t *ICETransport) stop(shouldGracefullyClose bool) error { t.lock.Lock() defer t.lock.Unlock() @@ -197,8 +209,18 @@ func (t *ICETransport) Stop() error { } if t.mux != nil { - return t.mux.Close() + var closeErrs []error + if shouldGracefullyClose && t.gatherer != nil { + // we can't access icegatherer/icetransport.Close via + // mux's net.Conn Close so we call it earlier here. + closeErrs = append(closeErrs, t.gatherer.GracefulClose()) + } + closeErrs = append(closeErrs, t.mux.Close()) + return util.FlattenErrs(closeErrs) } else if t.gatherer != nil { + if shouldGracefullyClose { + return t.gatherer.GracefulClose() + } return t.gatherer.Close() } return nil diff --git a/vendor/github.com/pion/webrtc/v3/interceptor.go b/vendor/github.com/pion/webrtc/v3/interceptor.go index 85232f35d36..7a5b4a72403 100644 --- a/vendor/github.com/pion/webrtc/v3/interceptor.go +++ b/vendor/github.com/pion/webrtc/v3/interceptor.go @@ -109,6 +109,19 @@ func ConfigureTWCCSender(mediaEngine *MediaEngine, interceptorRegistry *intercep return nil } +// ConfigureSimulcastExtensionHeaders enables the RTP Extension Headers needed for Simulcast +func ConfigureSimulcastExtensionHeaders(mediaEngine *MediaEngine) error { + if err := mediaEngine.RegisterHeaderExtension(RTPHeaderExtensionCapability{URI: sdp.SDESMidURI}, RTPCodecTypeVideo); err != nil { + return err + } + + if err := mediaEngine.RegisterHeaderExtension(RTPHeaderExtensionCapability{URI: sdp.SDESRTPStreamIDURI}, RTPCodecTypeVideo); err != nil { + return err + } + + return mediaEngine.RegisterHeaderExtension(RTPHeaderExtensionCapability{URI: sdesRepairRTPStreamIDURI}, RTPCodecTypeVideo) +} + type interceptorToTrackLocalWriter struct{ interceptor atomic.Value } // interceptor.RTPWriter } func (i *interceptorToTrackLocalWriter) WriteRTP(header *rtp.Header, payload []byte) (int, error) { diff --git a/vendor/github.com/pion/webrtc/v3/internal/fmtp/av1.go b/vendor/github.com/pion/webrtc/v3/internal/fmtp/av1.go new file mode 100644 index 00000000000..29eccd114d0 --- /dev/null +++ b/vendor/github.com/pion/webrtc/v3/internal/fmtp/av1.go @@ -0,0 +1,41 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package fmtp + +type av1FMTP struct { + parameters map[string]string +} + +func (h *av1FMTP) MimeType() string { + return "video/av1" +} + +func (h *av1FMTP) Match(b FMTP) bool { + c, ok := b.(*av1FMTP) + if !ok { + return false + } + + // RTP Payload Format For AV1 (v1.0) + // https://aomediacodec.github.io/av1-rtp-spec/ + // If the profile parameter is not present, it MUST be inferred to be 0 (“Main” profile). + hProfile, ok := h.parameters["profile"] + if !ok { + hProfile = "0" + } + cProfile, ok := c.parameters["profile"] + if !ok { + cProfile = "0" + } + if hProfile != cProfile { + return false + } + + return true +} + +func (h *av1FMTP) Parameter(key string) (string, bool) { + v, ok := h.parameters[key] + return v, ok +} diff --git a/vendor/github.com/pion/webrtc/v3/internal/fmtp/fmtp.go b/vendor/github.com/pion/webrtc/v3/internal/fmtp/fmtp.go index 5461c019bd2..f515a648d01 100644 --- a/vendor/github.com/pion/webrtc/v3/internal/fmtp/fmtp.go +++ b/vendor/github.com/pion/webrtc/v3/internal/fmtp/fmtp.go @@ -8,6 +8,22 @@ import ( "strings" ) +func parseParameters(line string) map[string]string { + parameters := make(map[string]string) + + for _, p := range strings.Split(line, ";") { + pp := strings.SplitN(strings.TrimSpace(p), "=", 2) + key := strings.ToLower(pp[0]) + var value string + if len(pp) > 1 { + value = pp[1] + } + parameters[key] = value + } + + return parameters +} + // FMTP interface for implementing custom // FMTP parsers based on MimeType type FMTP interface { @@ -23,29 +39,30 @@ type FMTP interface { } // Parse parses an fmtp string based on the MimeType -func Parse(mimetype, line string) FMTP { +func Parse(mimeType, line string) FMTP { var f FMTP - parameters := make(map[string]string) - - for _, p := range strings.Split(line, ";") { - pp := strings.SplitN(strings.TrimSpace(p), "=", 2) - key := strings.ToLower(pp[0]) - var value string - if len(pp) > 1 { - value = pp[1] - } - parameters[key] = value - } + parameters := parseParameters(line) switch { - case strings.EqualFold(mimetype, "video/h264"): + case strings.EqualFold(mimeType, "video/h264"): f = &h264FMTP{ parameters: parameters, } + + case strings.EqualFold(mimeType, "video/vp9"): + f = &vp9FMTP{ + parameters: parameters, + } + + case strings.EqualFold(mimeType, "video/av1"): + f = &av1FMTP{ + parameters: parameters, + } + default: f = &genericFMTP{ - mimeType: mimetype, + mimeType: mimeType, parameters: parameters, } } diff --git a/vendor/github.com/pion/webrtc/v3/internal/fmtp/vp9.go b/vendor/github.com/pion/webrtc/v3/internal/fmtp/vp9.go new file mode 100644 index 00000000000..7fc618bccd8 --- /dev/null +++ b/vendor/github.com/pion/webrtc/v3/internal/fmtp/vp9.go @@ -0,0 +1,41 @@ +// SPDX-FileCopyrightText: 2023 The Pion community +// SPDX-License-Identifier: MIT + +package fmtp + +type vp9FMTP struct { + parameters map[string]string +} + +func (h *vp9FMTP) MimeType() string { + return "video/vp9" +} + +func (h *vp9FMTP) Match(b FMTP) bool { + c, ok := b.(*vp9FMTP) + if !ok { + return false + } + + // RTP Payload Format for VP9 Video - draft-ietf-payload-vp9-16 + // https://datatracker.ietf.org/doc/html/draft-ietf-payload-vp9-16 + // If no profile-id is present, Profile 0 MUST be inferred + hProfileID, ok := h.parameters["profile-id"] + if !ok { + hProfileID = "0" + } + cProfileID, ok := c.parameters["profile-id"] + if !ok { + cProfileID = "0" + } + if hProfileID != cProfileID { + return false + } + + return true +} + +func (h *vp9FMTP) Parameter(key string) (string, bool) { + v, ok := h.parameters[key] + return v, ok +} diff --git a/vendor/github.com/pion/webrtc/v3/internal/mux/mux.go b/vendor/github.com/pion/webrtc/v3/internal/mux/mux.go index 1e167b89784..d57b9c3cf3b 100644 --- a/vendor/github.com/pion/webrtc/v3/internal/mux/mux.go +++ b/vendor/github.com/pion/webrtc/v3/internal/mux/mux.go @@ -120,6 +120,10 @@ func (m *Mux) readLoop() { } if err = m.dispatch(buf[:n]); err != nil { + if errors.Is(err, io.ErrClosedPipe) { + // if the buffer was closed, that's not an error we care to report + return + } m.log.Errorf("mux: ending readLoop dispatch error %s", err.Error()) return } diff --git a/vendor/github.com/pion/webrtc/v3/mediaengine.go b/vendor/github.com/pion/webrtc/v3/mediaengine.go index 7abb8c3a798..a7cfa882639 100644 --- a/vendor/github.com/pion/webrtc/v3/mediaengine.go +++ b/vendor/github.com/pion/webrtc/v3/mediaengine.go @@ -456,6 +456,30 @@ func (m *MediaEngine) matchRemoteCodec(remoteCodec RTPCodecParameters, typ RTPCo return matchType, nil } +// Update header extensions from a remote media section +func (m *MediaEngine) updateHeaderExtensionFromMediaSection(media *sdp.MediaDescription) error { + var typ RTPCodecType + switch { + case strings.EqualFold(media.MediaName.Media, "audio"): + typ = RTPCodecTypeAudio + case strings.EqualFold(media.MediaName.Media, "video"): + typ = RTPCodecTypeVideo + default: + return nil + } + extensions, err := rtpExtensionsFromMediaDescription(media) + if err != nil { + return err + } + + for extension, id := range extensions { + if err = m.updateHeaderExtension(id, extension, typ); err != nil { + return err + } + } + return nil +} + // Look up a header extension and enable if it exists func (m *MediaEngine) updateHeaderExtension(id int, extension string, typ RTPCodecType) error { if m.negotiatedHeaderExtensions == nil { @@ -499,14 +523,27 @@ func (m *MediaEngine) updateFromRemoteDescription(desc sdp.SessionDescription) e for _, media := range desc.MediaDescriptions { var typ RTPCodecType + switch { - case !m.negotiatedAudio && strings.EqualFold(media.MediaName.Media, "audio"): - m.negotiatedAudio = true + case strings.EqualFold(media.MediaName.Media, "audio"): typ = RTPCodecTypeAudio - case !m.negotiatedVideo && strings.EqualFold(media.MediaName.Media, "video"): - m.negotiatedVideo = true + case strings.EqualFold(media.MediaName.Media, "video"): typ = RTPCodecTypeVideo + } + + switch { + case !m.negotiatedAudio && typ == RTPCodecTypeAudio: + m.negotiatedAudio = true + case !m.negotiatedVideo && typ == RTPCodecTypeVideo: + m.negotiatedVideo = true default: + // update header extesions from remote sdp if codec is negotiated, Firefox + // would send updated header extension in renegotiation. + // e.g. publish first track without simucalst ->negotiated-> publish second track with simucalst + // then the two media secontions have different rtp header extensions in offer + if err := m.updateHeaderExtensionFromMediaSection(media); err != nil { + return err + } continue } @@ -542,16 +579,9 @@ func (m *MediaEngine) updateFromRemoteDescription(desc sdp.SessionDescription) e continue } - extensions, err := rtpExtensionsFromMediaDescription(media) - if err != nil { + if err := m.updateHeaderExtensionFromMediaSection(media); err != nil { return err } - - for extension, id := range extensions { - if err = m.updateHeaderExtension(id, extension, typ); err != nil { - return err - } - } } return nil } diff --git a/vendor/github.com/pion/webrtc/v3/operations.go b/vendor/github.com/pion/webrtc/v3/operations.go index d9dca4a8e48..67d24eebecc 100644 --- a/vendor/github.com/pion/webrtc/v3/operations.go +++ b/vendor/github.com/pion/webrtc/v3/operations.go @@ -13,33 +13,55 @@ type operation func() // Operations is a task executor. type operations struct { - mu sync.Mutex - busy bool - ops *list.List + mu sync.Mutex + busyCh chan struct{} + ops *list.List + + updateNegotiationNeededFlagOnEmptyChain *atomicBool + onNegotiationNeeded func() + isClosed bool } -func newOperations() *operations { +func newOperations( + updateNegotiationNeededFlagOnEmptyChain *atomicBool, + onNegotiationNeeded func(), +) *operations { return &operations{ - ops: list.New(), + ops: list.New(), + updateNegotiationNeededFlagOnEmptyChain: updateNegotiationNeededFlagOnEmptyChain, + onNegotiationNeeded: onNegotiationNeeded, } } // Enqueue adds a new action to be executed. If there are no actions scheduled, -// the execution will start immediately in a new goroutine. +// the execution will start immediately in a new goroutine. If the queue has been +// closed, the operation will be dropped. The queue is only deliberately closed +// by a user. func (o *operations) Enqueue(op operation) { + o.mu.Lock() + defer o.mu.Unlock() + _ = o.tryEnqueue(op) +} + +// tryEnqueue attempts to enqueue the given operation. It returns false +// if the op is invalid or the queue is closed. mu must be locked by +// tryEnqueue's caller. +func (o *operations) tryEnqueue(op operation) bool { if op == nil { - return + return false } - o.mu.Lock() - running := o.busy + if o.isClosed { + return false + } o.ops.PushBack(op) - o.busy = true - o.mu.Unlock() - if !running { + if o.busyCh == nil { + o.busyCh = make(chan struct{}) go o.start() } + + return true } // IsEmpty checks if there are tasks in the queue @@ -54,12 +76,38 @@ func (o *operations) IsEmpty() bool { func (o *operations) Done() { var wg sync.WaitGroup wg.Add(1) - o.Enqueue(func() { + o.mu.Lock() + enqueued := o.tryEnqueue(func() { wg.Done() }) + o.mu.Unlock() + if !enqueued { + return + } wg.Wait() } +// GracefulClose waits for the operations queue to be cleared and forbids +// new operations from being enqueued. +func (o *operations) GracefulClose() { + o.mu.Lock() + if o.isClosed { + o.mu.Unlock() + return + } + // do not enqueue anymore ops from here on + // o.isClosed=true will also not allow a new busyCh + // to be created. + o.isClosed = true + + busyCh := o.busyCh + o.mu.Unlock() + if busyCh == nil { + return + } + <-busyCh +} + func (o *operations) pop() func() { o.mu.Lock() defer o.mu.Unlock() @@ -79,12 +127,17 @@ func (o *operations) start() { defer func() { o.mu.Lock() defer o.mu.Unlock() - if o.ops.Len() == 0 { - o.busy = false + // this wil lbe the most recent busy chan + close(o.busyCh) + + if o.ops.Len() == 0 || o.isClosed { + o.busyCh = nil return } + // either a new operation was enqueued while we // were busy, or an operation panicked + o.busyCh = make(chan struct{}) go o.start() }() @@ -93,4 +146,9 @@ func (o *operations) start() { fn() fn = o.pop() } + if !o.updateNegotiationNeededFlagOnEmptyChain.get() { + return + } + o.updateNegotiationNeededFlagOnEmptyChain.set(false) + o.onNegotiationNeeded() } diff --git a/vendor/github.com/pion/webrtc/v3/peerconnection.go b/vendor/github.com/pion/webrtc/v3/peerconnection.go index 1f0a7d1591b..3b40dafecb6 100644 --- a/vendor/github.com/pion/webrtc/v3/peerconnection.go +++ b/vendor/github.com/pion/webrtc/v3/peerconnection.go @@ -55,9 +55,11 @@ type PeerConnection struct { idpLoginURL *string - isClosed *atomicBool - isNegotiationNeeded *atomicBool - negotiationNeededState negotiationNeededState + isClosed *atomicBool + isGracefulClosed *atomicBool + isGracefulClosedDone chan struct{} + isNegotiationNeeded *atomicBool + updateNegotiationNeededFlagOnEmptyChain *atomicBool lastOffer string lastAnswer string @@ -68,7 +70,8 @@ type PeerConnection struct { // should be defined (see JSEP 3.4.1). greaterMid int - rtpTransceivers []*RTPTransceiver + rtpTransceivers []*RTPTransceiver + nonMediaBandwidthProbe atomic.Value // RTPReceiver onSignalingStateChangeHandler func(SignalingState) onICEConnectionStateChangeHandler atomic.Value // func(ICEConnectionState) @@ -115,6 +118,7 @@ func (api *API) NewPeerConnection(configuration Configuration) (*PeerConnection, // https://w3c.github.io/webrtc-pc/#constructor (Step #2) // Some variables defined explicitly despite their implicit zero values to // allow better readability to understand what is happening. + pc := &PeerConnection{ statsID: fmt.Sprintf("PeerConnection-%d", time.Now().UnixNano()), configuration: Configuration{ @@ -125,18 +129,21 @@ func (api *API) NewPeerConnection(configuration Configuration) (*PeerConnection, Certificates: []Certificate{}, ICECandidatePoolSize: 0, }, - ops: newOperations(), - isClosed: &atomicBool{}, - isNegotiationNeeded: &atomicBool{}, - negotiationNeededState: negotiationNeededStateEmpty, - lastOffer: "", - lastAnswer: "", - greaterMid: -1, - signalingState: SignalingStateStable, + isClosed: &atomicBool{}, + isGracefulClosed: &atomicBool{}, + isGracefulClosedDone: make(chan struct{}), + isNegotiationNeeded: &atomicBool{}, + updateNegotiationNeededFlagOnEmptyChain: &atomicBool{}, + lastOffer: "", + lastAnswer: "", + greaterMid: -1, + signalingState: SignalingStateStable, api: api, log: api.settingEngine.LoggerFactory.NewLogger("pc"), } + pc.ops = newOperations(pc.updateNegotiationNeededFlagOnEmptyChain, pc.onNegotiationNeeded) + pc.iceConnectionState.Store(ICEConnectionStateNew) pc.connectionState.Store(PeerConnectionStateNew) @@ -293,66 +300,54 @@ func (pc *PeerConnection) OnNegotiationNeeded(f func()) { // onNegotiationNeeded enqueues negotiationNeededOp if necessary // caller of this method should hold `pc.mu` lock +// https://www.w3.org/TR/webrtc/#dfn-update-the-negotiation-needed-flag func (pc *PeerConnection) onNegotiationNeeded() { - // https://w3c.github.io/webrtc-pc/#updating-the-negotiation-needed-flag - // non-canon step 1 - if pc.negotiationNeededState == negotiationNeededStateRun { - pc.negotiationNeededState = negotiationNeededStateQueue - return - } else if pc.negotiationNeededState == negotiationNeededStateQueue { + // 4.7.3.1 If the length of connection.[[Operations]] is not 0, then set + // connection.[[UpdateNegotiationNeededFlagOnEmptyChain]] to true, and abort these steps. + if !pc.ops.IsEmpty() { + pc.updateNegotiationNeededFlagOnEmptyChain.set(true) return } - pc.negotiationNeededState = negotiationNeededStateRun pc.ops.Enqueue(pc.negotiationNeededOp) } +// https://www.w3.org/TR/webrtc/#dfn-update-the-negotiation-needed-flag func (pc *PeerConnection) negotiationNeededOp() { - // Don't run NegotiatedNeeded checks if OnNegotiationNeeded is not set - if handler, ok := pc.onNegotiationNeededHandler.Load().(func()); !ok || handler == nil { - return - } - - // https://www.w3.org/TR/webrtc/#updating-the-negotiation-needed-flag - // Step 2.1 + // 4.7.3.2.1 If connection.[[IsClosed]] is true, abort these steps. if pc.isClosed.get() { return } - // non-canon step 2.2 + + // 4.7.3.2.2 If the length of connection.[[Operations]] is not 0, + // then set connection.[[UpdateNegotiationNeededFlagOnEmptyChain]] to + // true, and abort these steps. if !pc.ops.IsEmpty() { - pc.ops.Enqueue(pc.negotiationNeededOp) + pc.updateNegotiationNeededFlagOnEmptyChain.set(true) return } - // non-canon, run again if there was a request - defer func() { - pc.mu.Lock() - defer pc.mu.Unlock() - if pc.negotiationNeededState == negotiationNeededStateQueue { - defer pc.onNegotiationNeeded() - } - pc.negotiationNeededState = negotiationNeededStateEmpty - }() - - // Step 2.3 + // 4.7.3.2.3 If connection's signaling state is not "stable", abort these steps. if pc.SignalingState() != SignalingStateStable { return } - // Step 2.4 + // 4.7.3.2.4 If the result of checking if negotiation is needed is false, + // clear the negotiation-needed flag by setting connection.[[NegotiationNeeded]] + // to false, and abort these steps. if !pc.checkNegotiationNeeded() { pc.isNegotiationNeeded.set(false) return } - // Step 2.5 + // 4.7.3.2.5 If connection.[[NegotiationNeeded]] is already true, abort these steps. if pc.isNegotiationNeeded.get() { return } - // Step 2.6 + // 4.7.3.2.6 Set connection.[[NegotiationNeeded]] to true. pc.isNegotiationNeeded.set(true) - // Step 2.7 + // 4.7.3.2.7 Fire an event named negotiationneeded at connection. if handler, ok := pc.onNegotiationNeededHandler.Load().(func()); ok && handler != nil { handler() } @@ -1534,6 +1529,32 @@ func (pc *PeerConnection) handleUndeclaredSSRC(ssrc SSRC, remoteDescription *Ses return true, nil } +// Chrome sends probing traffic on SSRC 0. This reads the packets to ensure that we properly +// generate TWCC reports for it. Since this isn't actually media we don't pass this to the user +func (pc *PeerConnection) handleNonMediaBandwidthProbe() { + nonMediaBandwidthProbe, err := pc.api.NewRTPReceiver(RTPCodecTypeVideo, pc.dtlsTransport) + if err != nil { + pc.log.Errorf("handleNonMediaBandwidthProbe failed to create RTPReceiver: %v", err) + return + } + + if err = nonMediaBandwidthProbe.Receive(RTPReceiveParameters{ + Encodings: []RTPDecodingParameters{{RTPCodingParameters: RTPCodingParameters{}}}, + }); err != nil { + pc.log.Errorf("handleNonMediaBandwidthProbe failed to start RTPReceiver: %v", err) + return + } + + pc.nonMediaBandwidthProbe.Store(nonMediaBandwidthProbe) + b := make([]byte, pc.api.settingEngine.getReceiveMTU()) + for { + if _, _, err = nonMediaBandwidthProbe.readRTP(b, nonMediaBandwidthProbe.Track()); err != nil { + pc.log.Tracef("handleNonMediaBandwidthProbe read exiting: %v", err) + return + } + } +} + func (pc *PeerConnection) handleIncomingSSRC(rtpStream io.Reader, ssrc SSRC) error { //nolint:gocognit remoteDescription := pc.RemoteDescription() if remoteDescription == nil { @@ -1653,20 +1674,41 @@ func (pc *PeerConnection) undeclaredRTPMediaProcessor() { return } - stream, ssrc, err := srtpSession.AcceptStream() + srtcpSession, err := pc.dtlsTransport.getSRTCPSession() + if err != nil { + pc.log.Warnf("undeclaredMediaProcessor failed to open SrtcpSession: %v", err) + return + } + + srtpReadStream, ssrc, err := srtpSession.AcceptStream() if err != nil { pc.log.Warnf("Failed to accept RTP %v", err) return } + // open accompanying srtcp stream + srtcpReadStream, err := srtcpSession.OpenReadStream(ssrc) + if err != nil { + pc.log.Warnf("Failed to open RTCP stream for %d: %v", ssrc, err) + return + } + if pc.isClosed.get() { - if err = stream.Close(); err != nil { + if err = srtpReadStream.Close(); err != nil { pc.log.Warnf("Failed to close RTP stream %v", err) } + if err = srtcpReadStream.Close(); err != nil { + pc.log.Warnf("Failed to close RTCP stream %v", err) + } continue } - pc.dtlsTransport.storeSimulcastStream(stream) + pc.dtlsTransport.storeSimulcastStream(srtpReadStream, srtcpReadStream) + + if ssrc == 0 { + go pc.handleNonMediaBandwidthProbe() + continue + } if atomic.AddUint64(&simulcastRoutineCount, 1) >= simulcastMaxProbeRoutines { atomic.AddUint64(&simulcastRoutineCount, ^uint64(0)) @@ -1679,7 +1721,7 @@ func (pc *PeerConnection) undeclaredRTPMediaProcessor() { pc.log.Errorf(incomingUnhandledRTPSsrc, ssrc, err) } atomic.AddUint64(&simulcastRoutineCount, ^uint64(0)) - }(stream, SSRC(ssrc)) + }(srtpReadStream, SSRC(ssrc)) } } @@ -2044,13 +2086,34 @@ func (pc *PeerConnection) writeRTCP(pkts []rtcp.Packet, _ interceptor.Attributes return pc.dtlsTransport.WriteRTCP(pkts) } -// Close ends the PeerConnection +// Close ends the PeerConnection. func (pc *PeerConnection) Close() error { + return pc.close(false /* shouldGracefullyClose */) +} + +// GracefulClose ends the PeerConnection. It also waits +// for any goroutines it started to complete. This is only safe to call outside of +// PeerConnection callbacks or if in a callback, in its own goroutine. +func (pc *PeerConnection) GracefulClose() error { + return pc.close(true /* shouldGracefullyClose */) +} + +func (pc *PeerConnection) close(shouldGracefullyClose bool) error { // https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #1) // https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #2) + alreadyGracefullyClosed := shouldGracefullyClose && pc.isGracefulClosed.swap(true) if pc.isClosed.swap(true) { + if alreadyGracefullyClosed { + // similar but distinct condition where we may be waiting for some + // other graceful close to finish. Incorrectly using isClosed may + // leak a goroutine. + <-pc.isGracefulClosedDone + } return nil } + if shouldGracefullyClose && !alreadyGracefullyClosed { + defer close(pc.isGracefulClosedDone) + } // https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #3) pc.signalingState.Set(SignalingStateClosed) @@ -2072,6 +2135,9 @@ func (pc *PeerConnection) Close() error { closeErrs = append(closeErrs, t.Stop()) } } + if nonMediaBandwidthProbe, ok := pc.nonMediaBandwidthProbe.Load().(*RTPReceiver); ok { + closeErrs = append(closeErrs, nonMediaBandwidthProbe.Stop()) + } pc.mu.Unlock() // https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #5) @@ -2091,12 +2157,28 @@ func (pc *PeerConnection) Close() error { // https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #8, #9, #10) if pc.iceTransport != nil { - closeErrs = append(closeErrs, pc.iceTransport.Stop()) + if shouldGracefullyClose { + // note that it isn't canon to stop gracefully + closeErrs = append(closeErrs, pc.iceTransport.GracefulStop()) + } else { + closeErrs = append(closeErrs, pc.iceTransport.Stop()) + } } // https://www.w3.org/TR/webrtc/#dom-rtcpeerconnection-close (step #11) pc.updateConnectionState(pc.ICEConnectionState(), pc.dtlsTransport.State()) + if shouldGracefullyClose { + pc.ops.GracefulClose() + + // note that it isn't canon to stop gracefully + pc.sctpTransport.lock.Lock() + for _, d := range pc.sctpTransport.dataChannels { + closeErrs = append(closeErrs, d.GracefulClose()) + } + pc.sctpTransport.lock.Unlock() + } + return util.FlattenErrs(closeErrs) } @@ -2114,10 +2196,11 @@ func (pc *PeerConnection) addRTPTransceiver(t *RTPTransceiver) { // by the ICEAgent since the offer or answer was created. func (pc *PeerConnection) CurrentLocalDescription() *SessionDescription { pc.mu.Lock() + defer pc.mu.Unlock() + localDescription := pc.currentLocalDescription iceGather := pc.iceGatherer iceGatheringState := pc.ICEGatheringState() - pc.mu.Unlock() return populateLocalCandidates(localDescription, iceGather, iceGatheringState) } @@ -2127,10 +2210,11 @@ func (pc *PeerConnection) CurrentLocalDescription() *SessionDescription { // PeerConnection is in the stable state, the value is null. func (pc *PeerConnection) PendingLocalDescription() *SessionDescription { pc.mu.Lock() + defer pc.mu.Unlock() + localDescription := pc.pendingLocalDescription iceGather := pc.iceGatherer iceGatheringState := pc.ICEGatheringState() - pc.mu.Unlock() return populateLocalCandidates(localDescription, iceGather, iceGatheringState) } @@ -2455,7 +2539,9 @@ func (pc *PeerConnection) generateMatchedSDP(transceivers []*RTPTransceiver, use sender.setNegotiated() } mediaTransceivers := []*RTPTransceiver{t} - mediaSections = append(mediaSections, mediaSection{id: midValue, transceivers: mediaTransceivers, ridMap: getRids(media)}) + + extensions, _ := rtpExtensionsFromMediaDescription(media) + mediaSections = append(mediaSections, mediaSection{id: midValue, transceivers: mediaTransceivers, matchExtensions: extensions, rids: getRids(media)}) } } diff --git a/vendor/github.com/pion/webrtc/v3/peerconnectionstate.go b/vendor/github.com/pion/webrtc/v3/peerconnectionstate.go index 1f1456882d8..b626a31e32d 100644 --- a/vendor/github.com/pion/webrtc/v3/peerconnectionstate.go +++ b/vendor/github.com/pion/webrtc/v3/peerconnectionstate.go @@ -84,14 +84,3 @@ func (t PeerConnectionState) String() string { return ErrUnknownType.Error() } } - -type negotiationNeededState int - -const ( - // NegotiationNeededStateEmpty not running and queue is empty - negotiationNeededStateEmpty = iota - // NegotiationNeededStateEmpty running and queue is empty - negotiationNeededStateRun - // NegotiationNeededStateEmpty running and queue - negotiationNeededStateQueue -) diff --git a/vendor/github.com/pion/webrtc/v3/rtpreceiver.go b/vendor/github.com/pion/webrtc/v3/rtpreceiver.go index ec7c79bce2e..28d72246da3 100644 --- a/vendor/github.com/pion/webrtc/v3/rtpreceiver.go +++ b/vendor/github.com/pion/webrtc/v3/rtpreceiver.go @@ -201,7 +201,7 @@ func (r *RTPReceiver) startReceive(parameters RTPReceiveParameters) error { var t *trackStreams for idx, ts := range r.tracks { - if ts.track != nil && parameters.Encodings[i].SSRC != 0 && ts.track.SSRC() == parameters.Encodings[i].SSRC { + if ts.track != nil && ts.track.SSRC() == parameters.Encodings[i].SSRC { t = &r.tracks[idx] break } @@ -210,12 +210,10 @@ func (r *RTPReceiver) startReceive(parameters RTPReceiveParameters) error { return fmt.Errorf("%w: %d", errRTPReceiverWithSSRCTrackStreamNotFound, parameters.Encodings[i].SSRC) } - if parameters.Encodings[i].SSRC != 0 { - t.streamInfo = createStreamInfo("", parameters.Encodings[i].SSRC, 0, codec, globalParams.HeaderExtensions) - var err error - if t.rtpReadStream, t.rtpInterceptor, t.rtcpReadStream, t.rtcpInterceptor, err = r.transport.streamsForSSRC(parameters.Encodings[i].SSRC, *t.streamInfo); err != nil { - return err - } + t.streamInfo = createStreamInfo("", parameters.Encodings[i].SSRC, 0, codec, globalParams.HeaderExtensions) + var err error + if t.rtpReadStream, t.rtpInterceptor, t.rtcpReadStream, t.rtcpInterceptor, err = r.transport.streamsForSSRC(parameters.Encodings[i].SSRC, *t.streamInfo); err != nil { + return err } if rtxSsrc := parameters.Encodings[i].RTX.SSRC; rtxSsrc != 0 { @@ -426,7 +424,7 @@ func (r *RTPReceiver) receiveForRtx(ssrc SSRC, rsid string, streamInfo *intercep track.repairInterceptor = rtpInterceptor track.repairRtcpReadStream = rtcpReadStream track.repairRtcpInterceptor = rtcpInterceptor - track.repairStreamChannel = make(chan rtxPacketWithAttributes) + track.repairStreamChannel = make(chan rtxPacketWithAttributes, 50) go func() { for { @@ -476,6 +474,8 @@ func (r *RTPReceiver) receiveForRtx(ssrc SSRC, rsid string, streamInfo *intercep r.rtxPool.Put(b) // nolint:staticcheck return case track.repairStreamChannel <- rtxPacketWithAttributes{pkt: b[:i-2], attributes: attributes, pool: &r.rtxPool}: + default: + // skip the RTX packet if the repair stream channel is full, could be blocked in the application's read loop } } }() diff --git a/vendor/github.com/pion/webrtc/v3/sctptransport.go b/vendor/github.com/pion/webrtc/v3/sctptransport.go index a468cbc98d6..8167423b7ef 100644 --- a/vendor/github.com/pion/webrtc/v3/sctptransport.go +++ b/vendor/github.com/pion/webrtc/v3/sctptransport.go @@ -45,6 +45,7 @@ type SCTPTransport struct { // OnStateChange func() onErrorHandler func(error) + onCloseHandler func(error) sctpAssociation *sctp.Association onDataChannelHandler func(*DataChannel) @@ -174,6 +175,7 @@ func (r *SCTPTransport) acceptDataChannels(a *sctp.Association) { dataChannels = append(dataChannels, dc.dataChannel) } r.lock.RUnlock() + ACCEPT: for { dc, err := datachannel.Accept(a, &datachannel.Config{ @@ -183,6 +185,9 @@ ACCEPT: if !errors.Is(err, io.EOF) { r.log.Errorf("Failed to accept data channel: %v", err) r.onError(err) + r.onClose(err) + } else { + r.onClose(nil) } return } @@ -230,9 +235,14 @@ ACCEPT: MaxRetransmits: maxRetransmits, }, r, r.api.settingEngine.LoggerFactory.NewLogger("ortc")) if err != nil { + // This data channel is invalid. Close it and log an error. + if err1 := dc.Close(); err1 != nil { + r.log.Errorf("Failed to close invalid data channel: %v", err1) + } r.log.Errorf("Failed to accept data channel: %v", err) r.onError(err) - return + // We've received a datachannel with invalid configuration. We can still receive other datachannels. + continue ACCEPT } <-r.onDataChannel(rtcDC) @@ -249,8 +259,7 @@ ACCEPT: } } -// OnError sets an event handler which is invoked when -// the SCTP connection error occurs. +// OnError sets an event handler which is invoked when the SCTP Association errors. func (r *SCTPTransport) OnError(f func(err error)) { r.lock.Lock() defer r.lock.Unlock() @@ -267,6 +276,23 @@ func (r *SCTPTransport) onError(err error) { } } +// OnClose sets an event handler which is invoked when the SCTP Association closes. +func (r *SCTPTransport) OnClose(f func(err error)) { + r.lock.Lock() + defer r.lock.Unlock() + r.onCloseHandler = f +} + +func (r *SCTPTransport) onClose(err error) { + r.lock.RLock() + handler := r.onCloseHandler + r.lock.RUnlock() + + if handler != nil { + go handler(err) + } +} + // OnDataChannel sets an event handler which is invoked when a data // channel message arrives from a remote peer. func (r *SCTPTransport) OnDataChannel(f func(*DataChannel)) { diff --git a/vendor/github.com/pion/webrtc/v3/sdp.go b/vendor/github.com/pion/webrtc/v3/sdp.go index 560fc788432..18f05e884e8 100644 --- a/vendor/github.com/pion/webrtc/v3/sdp.go +++ b/vendor/github.com/pion/webrtc/v3/sdp.go @@ -202,8 +202,8 @@ func trackDetailsFromSDP(log logging.LeveledLogger, s *sdp.SessionDescription) ( id: trackID, rids: []string{}, } - for rid := range rids { - simulcastTrack.rids = append(simulcastTrack.rids, rid) + for _, rid := range rids { + simulcastTrack.rids = append(simulcastTrack.rids, rid.id) } tracksInMediaSection = []trackDetails{simulcastTrack} @@ -238,13 +238,13 @@ func trackDetailsToRTPReceiveParameters(t *trackDetails) RTPReceiveParameters { return RTPReceiveParameters{Encodings: encodings} } -func getRids(media *sdp.MediaDescription) map[string]*simulcastRid { - rids := map[string]*simulcastRid{} +func getRids(media *sdp.MediaDescription) []*simulcastRid { + rids := []*simulcastRid{} var simulcastAttr string for _, attr := range media.Attributes { if attr.Key == sdpAttributeRid { split := strings.Split(attr.Value, " ") - rids[split[0]] = &simulcastRid{attrValue: attr.Value} + rids = append(rids, &simulcastRid{id: split[0], attrValue: attr.Value}) } else if attr.Key == sdpAttributeSimulcast { simulcastAttr = attr.Value } @@ -257,9 +257,12 @@ func getRids(media *sdp.MediaDescription) map[string]*simulcastRid { ridStates := strings.Split(simulcastAttr, ";") for _, ridState := range ridStates { if ridState[:1] == "~" { - rid := ridState[1:] - if r, ok := rids[rid]; ok { - r.paused = true + ridID := ridState[1:] + for _, rid := range rids { + if rid.id == ridID { + rid.paused = true + break + } } } } @@ -487,6 +490,11 @@ func addTransceiverSDP( parameters := mediaEngine.getRTPParametersByKind(t.kind, directions) for _, rtpExtension := range parameters.HeaderExtensions { + if mediaSection.matchExtensions != nil { + if _, enabled := mediaSection.matchExtensions[rtpExtension.URI]; !enabled { + continue + } + } extURL, err := url.Parse(rtpExtension.URI) if err != nil { return false, err @@ -494,15 +502,16 @@ func addTransceiverSDP( media.WithExtMap(sdp.ExtMap{Value: rtpExtension.ID, URI: extURL}) } - if len(mediaSection.ridMap) > 0 { - recvRids := make([]string, 0, len(mediaSection.ridMap)) + if len(mediaSection.rids) > 0 { + recvRids := make([]string, 0, len(mediaSection.rids)) - for rid := range mediaSection.ridMap { - media.WithValueAttribute(sdpAttributeRid, rid+" recv") - if mediaSection.ridMap[rid].paused { - rid = "~" + rid + for _, rid := range mediaSection.rids { + ridID := rid.id + media.WithValueAttribute(sdpAttributeRid, ridID+" recv") + if rid.paused { + ridID = "~" + ridID } - recvRids = append(recvRids, rid) + recvRids = append(recvRids, ridID) } // Simulcast media.WithValueAttribute(sdpAttributeSimulcast, "recv "+strings.Join(recvRids, ";")) @@ -528,15 +537,17 @@ func addTransceiverSDP( } type simulcastRid struct { + id string attrValue string paused bool } type mediaSection struct { - id string - transceivers []*RTPTransceiver - data bool - ridMap map[string]*simulcastRid + id string + transceivers []*RTPTransceiver + data bool + matchExtensions map[string]int + rids []*simulcastRid } func bundleMatchFromRemote(matchBundleGroup *string) func(mid string) bool { diff --git a/vendor/github.com/pion/webrtc/v3/track_remote.go b/vendor/github.com/pion/webrtc/v3/track_remote.go index e2d8d70df2e..7e448dd9895 100644 --- a/vendor/github.com/pion/webrtc/v3/track_remote.go +++ b/vendor/github.com/pion/webrtc/v3/track_remote.go @@ -153,7 +153,8 @@ func (t *TrackRemote) checkAndUpdateTrack(b []byte) error { return errRTPTooShort } - if payloadType := PayloadType(b[1] & rtpPayloadTypeBitmask); payloadType != t.PayloadType() { + payloadType := PayloadType(b[1] & rtpPayloadTypeBitmask) + if payloadType != t.PayloadType() || len(t.params.Codecs) == 0 { t.mu.Lock() defer t.mu.Unlock() diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee9..b9cc55abbb0 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 00000000000..65d761bc9f2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 00000000000..8547c8dfd18 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 00000000000..2e45780b74b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0d..520cbd7d418 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f43..51174641729 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395a..8d35f2d8ae9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,15 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + // If exemplars are not configured, the cap will be 0. + // So append is not needed in this case. + if cap(h.nativeExemplars.exemplars) > 0 { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1122,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1135,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1373,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1654,142 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + ttl time.Duration + exemplars []*dto.Exemplar +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if cap(n.exemplars) == 0 { + return + } + + n.Lock() + defer n.Unlock() + + // The index where to insert the new exemplar. + var nIdx int = -1 + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + rIdx int // The index where to remove the old exemplar. + + ot = time.Now() // Oldest timestamp seen. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + mdIdx = -1 // Index of the older exemplar within the closest pair. + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if *e.Value <= *exemplar.Value && nIdx == -1 { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + mdIdx = i + } else { + mdIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + md = diff + mdIdx = nIdx + if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { + mdIdx = nIdx - 1 + } + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + mdIdx = nIdx + if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { + mdIdx = nIdx + } + } + } + rIdx = mdIdx + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d6444..a4fa6eabd78 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237d..9d9b81ab448 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed5..efbc3ea8fbf 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea3..14d56d2d068 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83b..315eab5f179 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbead..2e0b9a86486 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,20 +190,20 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz - } + rsp.Header().Set(contentEncodingHeader, encodingHeader) enc := expfmt.NewEncoder(w, contentType) @@ -343,9 +368,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +416,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +428,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a02..c6fd2f58b74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446c..1ab0e479655 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f83..2c808eece0a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index b2b89b017e6..25cfaa21643 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -75,14 +75,14 @@ func ResponseFormat(h http.Header) Format { func NewDecoder(r io.Reader, format Format) Decoder { switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: r} + return &protoDecoder{r: bufio.NewReader(r)} } return &textDecoder{r: r} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { - r io.Reader + r protodelim.Reader } // Decode implements the Decoder interface. @@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { opts := protodelim.UnmarshalOptions{ MaxSize: -1, } - if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { + if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 8fd80618472..ff5ef7a9d92 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -21,9 +21,10 @@ import ( "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "github.com/prometheus/common/model" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" ) @@ -139,7 +140,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // interface is kept for backwards compatibility. // In cases where the Format does not allow for UTF-8 names, the global // NameEscapingScheme will be applied. -func NewEncoder(w io.Writer, format Format) Encoder { +// +// NewEncoder can be called with additional options to customize the OpenMetrics text output. +// For example: +// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) +// +// Extra options are ignored for all other formats. +func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { escapingScheme := format.ToEscapingScheme() switch format.FormatType() { @@ -178,7 +185,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 6fc9555e3ff..051b38cd178 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,6 +15,7 @@ package expfmt import ( + "fmt" "strings" "github.com/prometheus/common/model" @@ -63,7 +64,7 @@ const ( type FormatType int const ( - TypeUnknown = iota + TypeUnknown FormatType = iota TypeProtoCompact TypeProtoDelim TypeProtoText @@ -73,7 +74,8 @@ const ( // NewFormat generates a new Format from the type provided. Mostly used for // tests, most Formats should be generated as part of content negotiation in -// encode.go. +// encode.go. If a type has more than one version, the latest version will be +// returned. func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: @@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format { } } +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return fmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return fmtOpenMetrics_1_0_0, nil + } + return fmtUnknown, fmt.Errorf("unknown open metrics version string") +} + // FormatType deduces an overall FormatType for the given format. func (f Format) FormatType() FormatType { toks := strings.Split(string(f), ";") - if len(toks) < 2 { - return TypeUnknown - } - params := make(map[string]string) for i, t := range toks { if i == 0 { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 5622578ed6c..353c5e93f92 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,47 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +type encoderOption struct { + withCreatedLines bool + withUnit bool +} + +type EncoderOption func(*encoderOption) + +// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder +// to include _created lines (See +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// Created timestamps can improve the accuracy of series reset detection, but +// come with a bandwidth cost. +// +// At the time of writing, created timestamp ingestion is still experimental in +// Prometheus and need to be enabled with the feature-flag +// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are +// still possible. Therefore, it is recommended to use this feature with caution. +func WithCreatedLines() EncoderOption { + return func(t *encoderOption) { + t.withCreatedLines = true + } +} + +// WithUnit is an EncoderOption enabling a set unit to be written to the output +// and to be added to the metric name, if it's not there already, as a suffix. +// Without opting in this way, the unit will not be added to the metric name and, +// on top of that, the unit will not be passed onto the output, even if it +// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. +func WithUnit() EncoderOption { + return func(t *encoderOption) { + t.withUnit = true + } +} + // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the // OpenMetrics text format and writes the resulting lines to 'out'. It returns // the number of bytes written and any error encountered. The output will have @@ -59,20 +95,34 @@ import ( // Prometheus to OpenMetrics or vice versa: // // - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, +// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` +// lines. A counter with a missing `_total` suffix is not an error. However, // its type will be set to `unknown` in that case to avoid invalid OpenMetrics // output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - According to the OM specs, the `# UNIT` line is optional, but if populated, +// the unit has to be present in the metric name as its suffix: +// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// However, in order to accommodate any potential scenario where such a change in the +// metric name is not desirable, the users are here given the choice of either explicitly +// opt in, in case they wish for the unit to be included in the output AND in the metric name +// as a suffix (see the description of the WithUnit function above), +// or not to opt in, in case they don't want for any of that to happen. +// +// - No support for the following (optional) features: info type, +// stateset type, gaugehistogram type. // // - The size of exemplar labels is not checked (i.e. it's possible to create // exemplars that are larger than allowed by the OpenMetrics specification). // // - The value of Counters is not checked. (OpenMetrics doesn't allow counters // with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { + toOM := encoderOption{} + for _, option := range options { + option(&toOM) + } + name := in.GetName() if name == "" { return 0, fmt.Errorf("MetricFamily has no name: %s", in) @@ -95,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } var ( - n int - metricType = in.GetType() - shortName = name + n int + metricType = in.GetType() + compliantName = name ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { + compliantName = name[:len(name)-6] + } + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { + compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) } // Comments, first HELP, then TYPE. @@ -110,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = writeName(w, shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -136,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = writeName(w, shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -163,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } + if toOM.withUnit && in.Unit != nil { + n, err = w.WriteString("# UNIT ") + written += n + if err != nil { + return + } + n, err = writeName(w, compliantName) + written += n + if err != nil { + return + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Unit, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + + var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: if metric.Counter == nil { return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, + "expected counter in metric %s %s", compliantName, metric, ) } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Counter.GetValue(), 0, false, metric.Counter.Exemplar, ) + if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_GAUGE: if metric.Gauge == nil { return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, + "expected gauge in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Gauge.GetValue(), 0, false, nil, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, + "expected untyped in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Untyped.GetValue(), 0, false, nil, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, + "expected summary in metric %s %s", compliantName, metric, ) } for _, q := range metric.Summary.Quantile { n, err = writeOpenMetricsSample( - w, name, "", metric, + w, compliantName, "", metric, model.QuantileLabel, q.GetQuantile(), q.GetValue(), 0, false, nil, @@ -222,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), 0, false, nil, ) @@ -231,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Summary.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, + "expected histogram in metric %s %s", compliantName, metric, ) } infSeen := false for _, b := range metric.Histogram.Bucket { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), 0, b.GetCumulativeCount(), true, b.Exemplar, @@ -259,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } if !infSeen { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, math.Inf(+1), 0, metric.Histogram.GetSampleCount(), true, nil, @@ -270,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), 0, false, nil, ) @@ -279,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Histogram.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) + n += createdTsBytesWritten + } default: return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, + "unexpected type in metric %s %s", compliantName, metric, ) } written += n @@ -350,7 +445,7 @@ func writeOpenMetricsSample( return written, err } } - if exemplar != nil { + if exemplar != nil && len(exemplar.Label) > 0 { n, err = writeExemplar(w, exemplar) written += n if err != nil { @@ -473,6 +568,49 @@ func writeOpenMetricsNameAndLabelPairs( return written, nil } +// writeOpenMetricsCreated writes the created timestamp for a single time series +// following OpenMetrics text format to w, given the metric name, the metric proto +// message itself, optionally a suffix to be removed, e.g. '_total' for counters, +// an additional label name with a float64 value (use empty string as label name if +// not required) and the timestamp that represents the created timestamp. +// The function returns the number of bytes written and any error encountered. +func writeOpenMetricsCreated(w enhancedWriter, + name, suffixToTrim string, metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + createdTimestamp *timestamppb.Timestamp, +) (int, error) { + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + // writeExemplar writes the provided exemplar in OpenMetrics format to w. The // function returns the number of bytes written and any error encountered. func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 178fdbaf615..80d1fe944ea 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool { // Status returns the status of the alert. func (a *Alert) Status() AlertStatus { - if a.Resolved() { + return a.StatusAt(time.Now()) +} + +// StatusAt returns the status of the alert at the given timestamp. +func (a *Alert) StatusAt(ts time.Time) AlertStatus { + if a.ResolvedAt(ts) { return AlertResolved } return AlertFiring @@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool { return false } +// HasFiringAt returns true iff one of the alerts is not resolved +// at the time ts. +func (as Alerts) HasFiringAt(ts time.Time) bool { + for _, a := range as { + if !a.ResolvedAt(ts) { + return true + } + } + return false +} + // Status returns StatusFiring iff at least one of the alerts is firing. func (as Alerts) Status() AlertStatus { if as.HasFiring() { @@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus { } return AlertResolved } + +// StatusAt returns StatusFiring iff at least one of the alerts is firing +// at the time ts. +func (as Alerts) StatusAt(ts time.Time) AlertStatus { + if as.HasFiringAt(ts) { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index 6eda08a7395..d0ad88da334 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -17,7 +17,6 @@ import ( "encoding/json" "fmt" "sort" - "strings" ) // A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet @@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet { return result } -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - // Fingerprint returns the LabelSet's fingerprint. func (ls LabelSet) Fingerprint() Fingerprint { return labelSetToFingerprint(ls) diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go new file mode 100644 index 00000000000..481c47b46e5 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.21 + +package model + +import ( + "bytes" + "slices" + "strconv" +) + +// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. +func (l LabelSet) String() string { + var lna [32]string // On stack to avoid memory allocation for sorting names. + labelNames := lna[:0] + for name := range l { + labelNames = append(labelNames, string(name)) + } + slices.Sort(labelNames) + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + b.WriteByte('{') + for i, name := range labelNames { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) + } + b.WriteByte('}') + return b.String() +} diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go new file mode 100644 index 00000000000..c4212685e71 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.21 + +package model + +import ( + "fmt" + "sort" + "strings" +) + +// String was optimized using functions not available for go 1.20 +// or lower. We keep the old implementation for compatibility with client_golang. +// Once client golang drops support for go 1.20 (scheduled for August 2024), this +// file can be removed. +func (l LabelSet) String() string { + labelNames := make([]string, 0, len(l)) + for name := range l { + labelNames = append(labelNames, string(name)) + } + sort.Strings(labelNames) + lstrs := make([]string, 0, len(l)) + for _, name := range labelNames { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) + } + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 0bd29b3a3f6..eb865e5a59c 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -204,6 +204,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF out := &dto.MetricFamily{ Help: v.Help, Type: v.Type, + Unit: v.Unit, } // If the name is nil, copy as-is, don't try to escape. diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index c24864a9273..126df9e67ac 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,9 +1,16 @@ --- linters: enable: + - errcheck - godot + - gosimple + - govet + - ineffassign - misspell - revive + - staticcheck + - testifylint + - unused linter-settings: godot: diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3e31..e00f3b365b6 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,3 @@ * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 062a2818563..16172923506 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,23 +49,23 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum > /dev/null),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.54.2 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +GOLANGCI_LINT_VERSION ?= v1.59.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) @@ -169,16 +169,20 @@ common-vet: common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint > /dev/null)) +ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -204,6 +208,10 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 28783e2ddc6..cdcc8a7ccc4 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 4a173636c96..83807500908 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f4f5501c68b..f0950bb4953 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) } field := strings.SplitN(firstLine, ": ", 2) @@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 9a73e263932..5f2a37a78b3 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f560a8db301..cf2e3eaa03c 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 5a145bbfe1f..bc3a20c932d 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { return nil, 0, - fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 59465c5bbcb..332e76c17f5 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index fdd4b95445b..67a9d2b4486 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive @@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index eaf00e22482..4b2c4050a3d 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -126,6 +126,7 @@ type Meminfo struct { VmallocUsed *uint64 // largest contiguous block of vmalloc area which is free VmallocChunk *uint64 + Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 ShmemHugePages *uint64 @@ -140,6 +141,55 @@ type Meminfo struct { DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 + + // The struct fields below are the byte-normalized counterparts to the + // existing struct fields. Values are normalized using the optional + // unit field in the meminfo line. + MemTotalBytes *uint64 + MemFreeBytes *uint64 + MemAvailableBytes *uint64 + BuffersBytes *uint64 + CachedBytes *uint64 + SwapCachedBytes *uint64 + ActiveBytes *uint64 + InactiveBytes *uint64 + ActiveAnonBytes *uint64 + InactiveAnonBytes *uint64 + ActiveFileBytes *uint64 + InactiveFileBytes *uint64 + UnevictableBytes *uint64 + MlockedBytes *uint64 + SwapTotalBytes *uint64 + SwapFreeBytes *uint64 + DirtyBytes *uint64 + WritebackBytes *uint64 + AnonPagesBytes *uint64 + MappedBytes *uint64 + ShmemBytes *uint64 + SlabBytes *uint64 + SReclaimableBytes *uint64 + SUnreclaimBytes *uint64 + KernelStackBytes *uint64 + PageTablesBytes *uint64 + NFSUnstableBytes *uint64 + BounceBytes *uint64 + WritebackTmpBytes *uint64 + CommitLimitBytes *uint64 + CommittedASBytes *uint64 + VmallocTotalBytes *uint64 + VmallocUsedBytes *uint64 + VmallocChunkBytes *uint64 + PercpuBytes *uint64 + HardwareCorruptedBytes *uint64 + AnonHugePagesBytes *uint64 + ShmemHugePagesBytes *uint64 + ShmemPmdMappedBytes *uint64 + CmaTotalBytes *uint64 + CmaFreeBytes *uint64 + HugepagesizeBytes *uint64 + DirectMap4kBytes *uint64 + DirectMap2MBytes *uint64 + DirectMap1GBytes *uint64 } // Meminfo returns an information about current kernel/system memory statistics. @@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) } return *m, nil @@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { var m Meminfo s := bufio.NewScanner(r) for s.Scan() { - // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) - } + var val, valBytes uint64 - v, err := strconv.ParseUint(fields[1], 0, 64) + val, err := strconv.ParseUint(fields[1], 0, 64) if err != nil { return nil, err } + switch len(fields) { + case 2: + // No unit present, use the parsed the value as bytes directly. + valBytes = val + case 3: + // Unit present in optional 3rd field, convert it to + // bytes. The only unit supported within the Linux + // kernel is `kB`. + if fields[2] != "kB" { + return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) + } + + valBytes = 1024 * val + + default: + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) + } + switch fields[0] { case "MemTotal:": - m.MemTotal = &v + m.MemTotal = &val + m.MemTotalBytes = &valBytes case "MemFree:": - m.MemFree = &v + m.MemFree = &val + m.MemFreeBytes = &valBytes case "MemAvailable:": - m.MemAvailable = &v + m.MemAvailable = &val + m.MemAvailableBytes = &valBytes case "Buffers:": - m.Buffers = &v + m.Buffers = &val + m.BuffersBytes = &valBytes case "Cached:": - m.Cached = &v + m.Cached = &val + m.CachedBytes = &valBytes case "SwapCached:": - m.SwapCached = &v + m.SwapCached = &val + m.SwapCachedBytes = &valBytes case "Active:": - m.Active = &v + m.Active = &val + m.ActiveBytes = &valBytes case "Inactive:": - m.Inactive = &v + m.Inactive = &val + m.InactiveBytes = &valBytes case "Active(anon):": - m.ActiveAnon = &v + m.ActiveAnon = &val + m.ActiveAnonBytes = &valBytes case "Inactive(anon):": - m.InactiveAnon = &v + m.InactiveAnon = &val + m.InactiveAnonBytes = &valBytes case "Active(file):": - m.ActiveFile = &v + m.ActiveFile = &val + m.ActiveFileBytes = &valBytes case "Inactive(file):": - m.InactiveFile = &v + m.InactiveFile = &val + m.InactiveFileBytes = &valBytes case "Unevictable:": - m.Unevictable = &v + m.Unevictable = &val + m.UnevictableBytes = &valBytes case "Mlocked:": - m.Mlocked = &v + m.Mlocked = &val + m.MlockedBytes = &valBytes case "SwapTotal:": - m.SwapTotal = &v + m.SwapTotal = &val + m.SwapTotalBytes = &valBytes case "SwapFree:": - m.SwapFree = &v + m.SwapFree = &val + m.SwapFreeBytes = &valBytes case "Dirty:": - m.Dirty = &v + m.Dirty = &val + m.DirtyBytes = &valBytes case "Writeback:": - m.Writeback = &v + m.Writeback = &val + m.WritebackBytes = &valBytes case "AnonPages:": - m.AnonPages = &v + m.AnonPages = &val + m.AnonPagesBytes = &valBytes case "Mapped:": - m.Mapped = &v + m.Mapped = &val + m.MappedBytes = &valBytes case "Shmem:": - m.Shmem = &v + m.Shmem = &val + m.ShmemBytes = &valBytes case "Slab:": - m.Slab = &v + m.Slab = &val + m.SlabBytes = &valBytes case "SReclaimable:": - m.SReclaimable = &v + m.SReclaimable = &val + m.SReclaimableBytes = &valBytes case "SUnreclaim:": - m.SUnreclaim = &v + m.SUnreclaim = &val + m.SUnreclaimBytes = &valBytes case "KernelStack:": - m.KernelStack = &v + m.KernelStack = &val + m.KernelStackBytes = &valBytes case "PageTables:": - m.PageTables = &v + m.PageTables = &val + m.PageTablesBytes = &valBytes case "NFS_Unstable:": - m.NFSUnstable = &v + m.NFSUnstable = &val + m.NFSUnstableBytes = &valBytes case "Bounce:": - m.Bounce = &v + m.Bounce = &val + m.BounceBytes = &valBytes case "WritebackTmp:": - m.WritebackTmp = &v + m.WritebackTmp = &val + m.WritebackTmpBytes = &valBytes case "CommitLimit:": - m.CommitLimit = &v + m.CommitLimit = &val + m.CommitLimitBytes = &valBytes case "Committed_AS:": - m.CommittedAS = &v + m.CommittedAS = &val + m.CommittedASBytes = &valBytes case "VmallocTotal:": - m.VmallocTotal = &v + m.VmallocTotal = &val + m.VmallocTotalBytes = &valBytes case "VmallocUsed:": - m.VmallocUsed = &v + m.VmallocUsed = &val + m.VmallocUsedBytes = &valBytes case "VmallocChunk:": - m.VmallocChunk = &v + m.VmallocChunk = &val + m.VmallocChunkBytes = &valBytes + case "Percpu:": + m.Percpu = &val + m.PercpuBytes = &valBytes case "HardwareCorrupted:": - m.HardwareCorrupted = &v + m.HardwareCorrupted = &val + m.HardwareCorruptedBytes = &valBytes case "AnonHugePages:": - m.AnonHugePages = &v + m.AnonHugePages = &val + m.AnonHugePagesBytes = &valBytes case "ShmemHugePages:": - m.ShmemHugePages = &v + m.ShmemHugePages = &val + m.ShmemHugePagesBytes = &valBytes case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v + m.ShmemPmdMapped = &val + m.ShmemPmdMappedBytes = &valBytes case "CmaTotal:": - m.CmaTotal = &v + m.CmaTotal = &val + m.CmaTotalBytes = &valBytes case "CmaFree:": - m.CmaFree = &v + m.CmaFree = &val + m.CmaFreeBytes = &valBytes case "HugePages_Total:": - m.HugePagesTotal = &v + m.HugePagesTotal = &val case "HugePages_Free:": - m.HugePagesFree = &v + m.HugePagesFree = &val case "HugePages_Rsvd:": - m.HugePagesRsvd = &v + m.HugePagesRsvd = &val case "HugePages_Surp:": - m.HugePagesSurp = &v + m.HugePagesSurp = &val case "Hugepagesize:": - m.Hugepagesize = &v + m.Hugepagesize = &val + m.HugepagesizeBytes = &valBytes case "DirectMap4k:": - m.DirectMap4k = &v + m.DirectMap4k = &val + m.DirectMap4kBytes = &valBytes case "DirectMap2M:": - m.DirectMap2M = &v + m.DirectMap2M = &val + m.DirectMap2MBytes = &valBytes case "DirectMap1G:": - m.DirectMap1G = &v + m.DirectMap1G = &val + m.DirectMap1GBytes = &valBytes } } diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 388ebf396d5..a704c5e735f 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, fmt.Errorf("%s: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 9d8af6db742..75a3b6c810f 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -88,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -194,8 +194,6 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 - // The average time from the point the client sends RPC requests until it receives the response. - AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this @@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } - if ns[0] != 0 { - opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) - } if len(ns) > 8 { opStats.Errors = ns[8] @@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index fdfa4561197..316df5fbb74 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 4da81ea577c..b70f1fc7a4a 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -50,10 +50,13 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 + // Drops shows the total number of dropped packets of all UPD sockets. + Drops *uint64 } // netIPSocketLine represents the fields parsed from a single line // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { Sl uint64 @@ -66,6 +69,7 @@ type ( RxQueue uint64 UID uint64 Inode uint64 + Drops *uint64 } ) @@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) { defer f.Close() var netIPSocket NetIPSocket + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } @@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { defer f.Close() var netIPSocketSummary NetIPSocketSummary + var udpPacketDrops uint64 + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } netIPSocketSummary.TxQueueLength += line.TxQueue netIPSocketSummary.RxQueueLength += line.RxQueue netIPSocketSummary.UsedSockets++ + if isUDP { + udpPacketDrops += *line.Drops + netIPSocketSummary.Drops = &udpPacketDrops + } } if err := s.Err(); err != nil { return nil, err @@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) } } // parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { +func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( @@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") @@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address @@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue @@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + } + + // drops + if isUDP { + drops, err := strconv.ParseUint(fields[12], 0, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + } + line.Drops = &drops } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index 360e36af7df..fae62b13d96 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index c7708529192..71c8059f4d7 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go new file mode 100644 index 00000000000..13994c1782f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -0,0 +1,119 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// TLSStat struct represents data in /proc/net/tls_stat. +// See https://docs.kernel.org/networking/tls.html#statistics +type TLSStat struct { + // number of TX sessions currently installed where host handles cryptography + TLSCurrTxSw int + // number of RX sessions currently installed where host handles cryptography + TLSCurrRxSw int + // number of TX sessions currently installed where NIC handles cryptography + TLSCurrTxDevice int + // number of RX sessions currently installed where NIC handles cryptography + TLSCurrRxDevice int + //number of TX sessions opened with host cryptography + TLSTxSw int + //number of RX sessions opened with host cryptography + TLSRxSw int + // number of TX sessions opened with NIC cryptography + TLSTxDevice int + // number of RX sessions opened with NIC cryptography + TLSRxDevice int + // record decryption failed (e.g. due to incorrect authentication tag) + TLSDecryptError int + // number of RX resyncs sent to NICs handling cryptography + TLSRxDeviceResync int + // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. + TLSDecryptRetry int + // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. + TLSRxNoPadViolation int +} + +// NewTLSStat reads the tls_stat statistics. +func NewTLSStat() (TLSStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return TLSStat{}, err + } + + return fs.NewTLSStat() +} + +// NewTLSStat reads the tls_stat statistics. +func (fs FS) NewTLSStat() (TLSStat, error) { + file, err := os.Open(fs.proc.Path("net/tls_stat")) + if err != nil { + return TLSStat{}, err + } + defer file.Close() + + var ( + tlsstat = TLSStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return TLSStat{}, err + } + + switch name { + case "TlsCurrTxSw": + tlsstat.TLSCurrTxSw = value + case "TlsCurrRxSw": + tlsstat.TLSCurrRxSw = value + case "TlsCurrTxDevice": + tlsstat.TLSCurrTxDevice = value + case "TlsCurrRxDevice": + tlsstat.TLSCurrRxDevice = value + case "TlsTxSw": + tlsstat.TLSTxSw = value + case "TlsRxSw": + tlsstat.TLSRxSw = value + case "TlsTxDevice": + tlsstat.TLSTxDevice = value + case "TlsRxDevice": + tlsstat.TLSRxDevice = value + case "TlsDecryptError": + tlsstat.TLSDecryptError = value + case "TlsRxDeviceResync": + tlsstat.TLSRxDeviceResync = value + case "TlsDecryptRetry": + tlsstat.TLSDecryptRetry = value + case "TlsRxNoPadViolation": + tlsstat.TLSRxNoPadViolation = value + } + + } + + return tlsstat, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index acbbc57eaba..d868cebdaae 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7443edca946..7c597bc8708 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) { m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) } return m, nil @@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index d1f71caa5d7..142796368fe 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. @@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index c86d815d735..9530b14bc68 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c22666750f2..0f8f847f954 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) { typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index fe9dbb425f5..ccd35f153a0 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,7 +61,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index ad8785a407a..09060e82080 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 923e55005ba..06a8d931c98 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -110,6 +110,11 @@ type ProcStat struct { Policy uint // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 + // Guest time of the process (time spent running a virtual CPU for a guest + // operating system), measured in clock ticks. + GuestTime int + // Guest time of the process's children, measured in clock ticks. + CGuestTime int proc FS } @@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) { &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, + &s.GuestTime, + &s.CGuestTime, ) if err != nil { return ProcStat{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 46307f5721e..a055197c63e 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "math/bits" "sort" "strconv" "strings" @@ -76,9 +77,9 @@ type ProcStatus struct { NonVoluntaryCtxtSwitches uint64 // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string + UIDs [4]uint64 // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string + GIDs [4]uint64 // CpusAllowedList: List of cpu cores processes are allowed to run on. CpusAllowedList []uint64 @@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) { // convert kB to B vBytes := vKBytes * 1024 - s.fillStatus(k, v, vKBytes, vBytes) + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } } return s, nil } -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { switch k { case "Tgid": s.TGID = int(vUint) case "Name": s.Name = vString case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "NSpid": s.NSpids = calcNSPidsList(vString) case "VmPeak": @@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.CpusAllowedList = calcCpusAllowedList(vString) } + return nil } // TotalCtxtSwitches returns the total context switch. diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 12c5bf05b74..5eefbe2ef8b 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index b8fad677dc6..28708e07459 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 34fc3ee21b6..e36b41c18a9 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index fa00f555db7..65fec834bf4 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index df2215ece00..80e0e947be7 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index ce5fefa5b3e..e54d94b0903 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/quic-go/quic-go/.gitignore b/vendor/github.com/quic-go/quic-go/.gitignore index 3cc06f240fc..b454729d5b2 100644 --- a/vendor/github.com/quic-go/quic-go/.gitignore +++ b/vendor/github.com/quic-go/quic-go/.gitignore @@ -4,6 +4,7 @@ main mockgen_tmp.go *.qtr *.qlog +*.sqlog *.txt race.[0-9]* diff --git a/vendor/github.com/quic-go/quic-go/client.go b/vendor/github.com/quic-go/quic-go/client.go index 20ef74072b9..1c5654f6dca 100644 --- a/vendor/github.com/quic-go/quic-go/client.go +++ b/vendor/github.com/quic-go/quic-go/client.go @@ -191,6 +191,7 @@ func (c *client) dial(ctx context.Context) error { c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sendConn.LocalAddr(), c.sendConn.RemoteAddr(), c.srcConnID, c.destConnID, c.version) c.conn = newClientConnection( + context.WithValue(context.WithoutCancel(ctx), ConnectionTracingKey, c.tracingID), c.sendConn, c.packetHandlers, c.destConnID, @@ -202,7 +203,6 @@ func (c *client) dial(ctx context.Context) error { c.use0RTT, c.hasNegotiatedVersion, c.tracer, - c.tracingID, c.logger, c.version, ) diff --git a/vendor/github.com/quic-go/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go index 0404fb19586..f4a5ca93ea1 100644 --- a/vendor/github.com/quic-go/quic-go/connection.go +++ b/vendor/github.com/quic-go/quic-go/connection.go @@ -16,7 +16,6 @@ import ( "github.com/quic-go/quic-go/internal/ackhandler" "github.com/quic-go/quic-go/internal/flowcontrol" "github.com/quic-go/quic-go/internal/handshake" - "github.com/quic-go/quic-go/internal/logutils" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" "github.com/quic-go/quic-go/internal/utils" @@ -25,15 +24,10 @@ import ( ) type unpacker interface { - UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.Version) (*unpackedPacket, error) + UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) } -type streamGetter interface { - GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) - GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) -} - type streamManager interface { GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) @@ -52,13 +46,14 @@ type streamManager interface { } type cryptoStreamHandler interface { - StartHandshake() error + StartHandshake(context.Context) error ChangeConnectionID(protocol.ConnectionID) SetLargest1RTTAcked(protocol.PacketNumber) error SetHandshakeConfirmed() GetSessionTicket() ([]byte, error) NextEvent() handshake.Event DiscardInitialKeys() + HandleMessage([]byte, protocol.EncryptionLevel) error io.Closer ConnectionState() handshake.ConnectionState } @@ -144,8 +139,7 @@ type connection struct { sentPacketHandler ackhandler.SentPacketHandler receivedPacketHandler ackhandler.ReceivedPacketHandler retransmissionQueue *retransmissionQueue - framer framer - windowUpdateQueue *windowUpdateQueue + framer *framer connFlowController flowcontrol.ConnectionFlowController tokenStoreKey string // only set for the client tokenGenerator *handshake.TokenGenerator // only set for the server @@ -157,9 +151,9 @@ type connection struct { maxPayloadSizeEstimate atomic.Uint32 - initialStream cryptoStream - handshakeStream cryptoStream - oneRTTStream cryptoStream // only set for the server + initialStream *cryptoStream + handshakeStream *cryptoStream + oneRTTStream *cryptoStream // only set for the server cryptoStreamHandler cryptoStreamHandler receivedPackets chan receivedPacket @@ -169,10 +163,9 @@ type connection struct { // closeChan is used to notify the run loop that it should terminate closeChan chan closeError - ctx context.Context - ctxCancel context.CancelCauseFunc - handshakeCtx context.Context - handshakeCtxCancel context.CancelFunc + ctx context.Context + ctxCancel context.CancelCauseFunc + handshakeCompleteChan chan struct{} undecryptablePackets []receivedPacket // undecryptable packets, waiting for a change in encryption level undecryptablePacketsToProcess []receivedPacket @@ -222,6 +215,8 @@ var ( ) var newConnection = func( + ctx context.Context, + ctxCancel context.CancelCauseFunc, conn sendConn, runner connRunner, origDestConnID protocol.ConnectionID, @@ -236,11 +231,12 @@ var newConnection = func( tokenGenerator *handshake.TokenGenerator, clientAddressValidated bool, tracer *logging.ConnectionTracer, - tracingID ConnectionTracingID, logger utils.Logger, v protocol.Version, ) quicConn { s := &connection{ + ctx: ctx, + ctxCancel: ctxCancel, conn: conn, config: conf, handshakeDestConnID: destConnID, @@ -274,7 +270,6 @@ var newConnection = func( s.queueControlFrame, connIDGenerator, ) - s.ctx, s.ctxCancel = context.WithCancelCause(context.WithValue(context.Background(), ConnectionTracingKey, tracingID)) s.preSetup() s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler( 0, @@ -333,12 +328,13 @@ var newConnection = func( s.cryptoStreamHandler = cs s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) - s.cryptoStreamManager = newCryptoStreamManager(cs, s.initialStream, s.handshakeStream, s.oneRTTStream) + s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, s.oneRTTStream) return s } // declare this as a variable, such that we can it mock it in the tests var newClientConnection = func( + ctx context.Context, conn sendConn, runner connRunner, destConnID protocol.ConnectionID, @@ -350,7 +346,6 @@ var newClientConnection = func( enable0RTT bool, hasNegotiatedVersion bool, tracer *logging.ConnectionTracer, - tracingID ConnectionTracingID, logger utils.Logger, v protocol.Version, ) quicConn { @@ -384,7 +379,7 @@ var newClientConnection = func( s.queueControlFrame, connIDGenerator, ) - s.ctx, s.ctxCancel = context.WithCancelCause(context.WithValue(context.Background(), ConnectionTracingKey, tracingID)) + s.ctx, s.ctxCancel = context.WithCancelCause(ctx) s.preSetup() s.sentPacketHandler, s.receivedPacketHandler = ackhandler.NewAckHandler( initialPacketNumber, @@ -437,7 +432,7 @@ var newClientConnection = func( s.version, ) s.cryptoStreamHandler = cs - s.cryptoStreamManager = newCryptoStreamManager(cs, s.initialStream, s.handshakeStream, oneRTTStream) + s.cryptoStreamManager = newCryptoStreamManager(s.initialStream, s.handshakeStream, oneRTTStream) s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, s.initialStream, s.handshakeStream, s.sentPacketHandler, s.retransmissionQueue, cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) if len(tlsConf.ServerName) > 0 { @@ -463,7 +458,6 @@ func (s *connection) preSetup() { s.connFlowController = flowcontrol.NewConnectionFlowController( protocol.ByteCount(s.config.InitialConnectionReceiveWindow), protocol.ByteCount(s.config.MaxConnectionReceiveWindow), - s.onHasConnectionWindowUpdate, func(size protocol.ByteCount) bool { if s.config.AllowConnectionWindowIncrease == nil { return true @@ -477,22 +471,22 @@ func (s *connection) preSetup() { s.streamsMap = newStreamsMap( s.ctx, s, + s.queueControlFrame, s.newFlowController, uint64(s.config.MaxIncomingStreams), uint64(s.config.MaxIncomingUniStreams), s.perspective, ) - s.framer = newFramer(s.streamsMap) + s.framer = newFramer() s.receivedPackets = make(chan receivedPacket, protocol.MaxConnUnprocessedPackets) s.closeChan = make(chan closeError, 1) s.sendingScheduled = make(chan struct{}, 1) - s.handshakeCtx, s.handshakeCtxCancel = context.WithCancel(context.Background()) + s.handshakeCompleteChan = make(chan struct{}) now := time.Now() s.lastPacketReceivedTime = now s.creationTime = now - s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame) s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger) s.connState.Version = s.version } @@ -500,13 +494,11 @@ func (s *connection) preSetup() { // run the connection main loop func (s *connection) run() error { var closeErr closeError - defer func() { - s.ctxCancel(closeErr.err) - }() + defer func() { s.ctxCancel(closeErr.err) }() s.timer = *newTimer() - if err := s.cryptoStreamHandler.StartHandshake(); err != nil { + if err := s.cryptoStreamHandler.StartHandshake(s.ctx); err != nil { return err } if err := s.handleHandshakeEvents(); err != nil { @@ -667,7 +659,7 @@ func (s *connection) earlyConnReady() <-chan struct{} { } func (s *connection) HandshakeComplete() <-chan struct{} { - return s.handshakeCtx.Done() + return s.handshakeCompleteChan } func (s *connection) Context() context.Context { @@ -707,10 +699,10 @@ func (s *connection) nextKeepAliveTime() time.Time { func (s *connection) maybeResetTimer() { var deadline time.Time if !s.handshakeComplete { - deadline = utils.MinTime( - s.creationTime.Add(s.config.handshakeTimeout()), - s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout), - ) + deadline = s.creationTime.Add(s.config.handshakeTimeout()) + if t := s.idleTimeoutStartTime().Add(s.config.HandshakeIdleTimeout); t.Before(deadline) { + deadline = t + } } else { if keepAliveTime := s.nextKeepAliveTime(); !keepAliveTime.IsZero() { deadline = keepAliveTime @@ -728,11 +720,15 @@ func (s *connection) maybeResetTimer() { } func (s *connection) idleTimeoutStartTime() time.Time { - return utils.MaxTime(s.lastPacketReceivedTime, s.firstAckElicitingPacketAfterIdleSentTime) + startTime := s.lastPacketReceivedTime + if t := s.firstAckElicitingPacketAfterIdleSentTime; t.After(startTime) { + startTime = t + } + return startTime } func (s *connection) handleHandshakeComplete() error { - defer s.handshakeCtxCancel() + defer close(s.handshakeCompleteChan) // Once the handshake completes, we have derived 1-RTT keys. // There's no point in queueing undecryptable packets for later decryption anymore. s.undecryptablePackets = nil @@ -804,13 +800,11 @@ func (s *connection) handlePacketImpl(rp receivedPacket) bool { data := rp.data p := rp for len(data) > 0 { - var destConnID protocol.ConnectionID if counter > 0 { p = *(p.Clone()) p.data = data - var err error - destConnID, err = wire.ParseConnectionID(p.data, s.srcConnIDLen) + destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen) if err != nil { if s.tracer != nil && s.tracer.DroppedPacket != nil { s.tracer.DroppedPacket(logging.PacketTypeNotDetermined, protocol.InvalidPacketNumber, protocol.ByteCount(len(data)), logging.PacketDropHeaderParseError) @@ -870,7 +864,7 @@ func (s *connection) handlePacketImpl(rp receivedPacket) bool { if counter > 0 { p.buffer.Split() } - processed = s.handleShortHeaderPacket(p, destConnID) + processed = s.handleShortHeaderPacket(p) break } } @@ -879,7 +873,7 @@ func (s *connection) handlePacketImpl(rp receivedPacket) bool { return processed } -func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protocol.ConnectionID) bool { +func (s *connection) handleShortHeaderPacket(p receivedPacket) bool { var wasQueued bool defer func() { @@ -889,6 +883,11 @@ func (s *connection) handleShortHeaderPacket(p receivedPacket, destConnID protoc } }() + destConnID, err := wire.ParseConnectionID(p.data, s.srcConnIDLen) + if err != nil { + s.tracer.DroppedPacket(logging.PacketType1RTT, protocol.InvalidPacketNumber, protocol.ByteCount(len(p.data)), logging.PacketDropHeaderParseError) + return false + } pn, pnLen, keyPhase, data, err := s.unpacker.UnpackShortHeader(p.rcvTime, p.data) if err != nil { wasQueued = s.handleUnpackError(err, p, logging.PacketType1RTT) @@ -962,7 +961,7 @@ func (s *connection) handleLongHeaderPacket(p receivedPacket, hdr *wire.Header) return false } - packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version) + packet, err := s.unpacker.UnpackLongHeader(hdr, p.data) if err != nil { wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) return false @@ -1262,7 +1261,7 @@ func (s *connection) handleFrames( isAckEliciting = true } if log != nil { - frames = append(frames, logutils.ConvertFrame(frame)) + frames = append(frames, toLoggingFrame(frame)) } // An error occurred handling a previous frame. // Don't handle the current frame. @@ -1379,6 +1378,15 @@ func (s *connection) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protoco if err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel); err != nil { return err } + for { + data := s.cryptoStreamManager.GetCryptoData(encLevel) + if data == nil { + break + } + if err := s.cryptoStreamHandler.HandleMessage(data, encLevel); err != nil { + return err + } + } return s.handleHandshakeEvents() } @@ -1669,10 +1677,8 @@ func (s *connection) dropEncryptionLevel(encLevel protocol.EncryptionLevel) erro s.cryptoStreamHandler.DiscardInitialKeys() case protocol.Encryption0RTT: s.streamsMap.ResetFor0RTT() - if err := s.connFlowController.Reset(); err != nil { - return err - } - return s.framer.Handle0RTTRejection() + s.framer.Handle0RTTRejection() + return s.connFlowController.Reset() } return s.cryptoStreamManager.Drop(encLevel) } @@ -1759,7 +1765,10 @@ func (s *connection) checkTransportParameters(params *wire.TransportParameters) func (s *connection) applyTransportParameters() { params := s.peerParams // Our local idle timeout will always be > 0. - s.idleTimeout = utils.MinNonZeroDuration(s.config.MaxIdleTimeout, params.MaxIdleTimeout) + s.idleTimeout = s.config.MaxIdleTimeout + if s.idleTimeout > 0 && params.MaxIdleTimeout < s.idleTimeout { + s.idleTimeout = params.MaxIdleTimeout + } s.keepAliveInterval = min(s.config.KeepAlivePeriod, min(s.idleTimeout/2, protocol.MaxKeepAliveInterval)) s.streamsMap.UpdateLimits(params) s.frameParser.SetAckDelayExponent(params.AckDelayExponent) @@ -1867,7 +1876,9 @@ func (s *connection) sendPackets(now time.Time) error { if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { s.framer.QueueControlFrame(&wire.DataBlockedFrame{MaximumData: offset}) } - s.windowUpdateQueue.QueueAll() + if offset := s.connFlowController.GetWindowUpdate(); offset > 0 { + s.framer.QueueControlFrame(&wire.MaxDataFrame{MaximumData: offset}) + } if cf := s.cryptoStreamManager.GetPostHandshakeData(protocol.MaxPostHandshakeCryptoFrameSize); cf != nil { s.queueControlFrame(cf) } @@ -2158,128 +2169,6 @@ func (s *connection) maxPacketSize() protocol.ByteCount { return s.mtuDiscoverer.CurrentSize() } -func (s *connection) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) { - // quic-go logging - if s.logger.Debug() { - p.header.Log(s.logger) - if p.ack != nil { - wire.LogFrame(s.logger, p.ack, true) - } - for _, frame := range p.frames { - wire.LogFrame(s.logger, frame.Frame, true) - } - for _, frame := range p.streamFrames { - wire.LogFrame(s.logger, frame.Frame, true) - } - } - - // tracing - if s.tracer != nil && s.tracer.SentLongHeaderPacket != nil { - frames := make([]logging.Frame, 0, len(p.frames)) - for _, f := range p.frames { - frames = append(frames, logutils.ConvertFrame(f.Frame)) - } - for _, f := range p.streamFrames { - frames = append(frames, logutils.ConvertFrame(f.Frame)) - } - var ack *logging.AckFrame - if p.ack != nil { - ack = logutils.ConvertAckFrame(p.ack) - } - s.tracer.SentLongHeaderPacket(p.header, p.length, ecn, ack, frames) - } -} - -func (s *connection) logShortHeaderPacket( - destConnID protocol.ConnectionID, - ackFrame *wire.AckFrame, - frames []ackhandler.Frame, - streamFrames []ackhandler.StreamFrame, - pn protocol.PacketNumber, - pnLen protocol.PacketNumberLen, - kp protocol.KeyPhaseBit, - ecn protocol.ECN, - size protocol.ByteCount, - isCoalesced bool, -) { - if s.logger.Debug() && !isCoalesced { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, s.logID, ecn) - } - // quic-go logging - if s.logger.Debug() { - wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) - if ackFrame != nil { - wire.LogFrame(s.logger, ackFrame, true) - } - for _, f := range frames { - wire.LogFrame(s.logger, f.Frame, true) - } - for _, f := range streamFrames { - wire.LogFrame(s.logger, f.Frame, true) - } - } - - // tracing - if s.tracer != nil && s.tracer.SentShortHeaderPacket != nil { - fs := make([]logging.Frame, 0, len(frames)+len(streamFrames)) - for _, f := range frames { - fs = append(fs, logutils.ConvertFrame(f.Frame)) - } - for _, f := range streamFrames { - fs = append(fs, logutils.ConvertFrame(f.Frame)) - } - var ack *logging.AckFrame - if ackFrame != nil { - ack = logutils.ConvertAckFrame(ackFrame) - } - s.tracer.SentShortHeaderPacket( - &logging.ShortHeader{ - DestConnectionID: destConnID, - PacketNumber: pn, - PacketNumberLen: pnLen, - KeyPhase: kp, - }, - size, - ecn, - ack, - fs, - ) - } -} - -func (s *connection) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) { - if s.logger.Debug() { - // There's a short period between dropping both Initial and Handshake keys and completion of the handshake, - // during which we might call PackCoalescedPacket but just pack a short header packet. - if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil { - s.logShortHeaderPacket( - packet.shortHdrPacket.DestConnID, - packet.shortHdrPacket.Ack, - packet.shortHdrPacket.Frames, - packet.shortHdrPacket.StreamFrames, - packet.shortHdrPacket.PacketNumber, - packet.shortHdrPacket.PacketNumberLen, - packet.shortHdrPacket.KeyPhase, - ecn, - packet.shortHdrPacket.Length, - false, - ) - return - } - if len(packet.longHdrPackets) > 1 { - s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) - } else { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) - } - } - for _, p := range packet.longHdrPackets { - s.logLongHeaderPacket(p, ecn) - } - if p := packet.shortHdrPacket; p != nil { - s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true) - } -} - // AcceptStream returns the next stream openend by the peer func (s *connection) AcceptStream(ctx context.Context) (Stream, error) { return s.streamsMap.AcceptStream(ctx) @@ -2321,7 +2210,6 @@ func (s *connection) newFlowController(id protocol.StreamID) flowcontrol.StreamF protocol.ByteCount(s.config.InitialStreamReceiveWindow), protocol.ByteCount(s.config.MaxStreamReceiveWindow), initialSendWindow, - s.onHasStreamWindowUpdate, s.rttStats, s.logger, ) @@ -2360,18 +2248,13 @@ func (s *connection) queueControlFrame(f wire.Frame) { s.scheduleSending() } -func (s *connection) onHasStreamWindowUpdate(id protocol.StreamID) { - s.windowUpdateQueue.AddStream(id) - s.scheduleSending() -} - -func (s *connection) onHasConnectionWindowUpdate() { - s.windowUpdateQueue.AddConnection() +func (s *connection) onHasStreamData(id protocol.StreamID, str sendStreamI) { + s.framer.AddActiveStream(id, str) s.scheduleSending() } -func (s *connection) onHasStreamData(id protocol.StreamID) { - s.framer.AddActiveStream(id) +func (s *connection) onHasStreamControlFrame(id protocol.StreamID, str streamControlFrameGetter) { + s.framer.AddStreamWithControlFrames(id, str) s.scheduleSending() } @@ -2379,6 +2262,7 @@ func (s *connection) onStreamCompleted(id protocol.StreamID) { if err := s.streamsMap.DeleteStream(id); err != nil { s.closeLocal(err) } + s.framer.RemoveActiveStream(id) } func (s *connection) onMTUIncreased(mtu protocol.ByteCount) { @@ -2425,10 +2309,17 @@ func (s *connection) GetVersion() protocol.Version { return s.version } -func (s *connection) NextConnection() Connection { - <-s.HandshakeComplete() - s.streamsMap.UseResetMaps() - return s +func (s *connection) NextConnection(ctx context.Context) (Connection, error) { + // The handshake might fail after the server rejected 0-RTT. + // This could happen if the Finished message is malformed or never received. + select { + case <-ctx.Done(): + return nil, context.Cause(ctx) + case <-s.Context().Done(): + case <-s.HandshakeComplete(): + s.streamsMap.UseResetMaps() + } + return s, nil } // estimateMaxPayloadSize estimates the maximum payload size for short header packets. diff --git a/vendor/github.com/quic-go/quic-go/connection_logging.go b/vendor/github.com/quic-go/quic-go/connection_logging.go new file mode 100644 index 00000000000..f75b39f6f52 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/connection_logging.go @@ -0,0 +1,173 @@ +package quic + +import ( + "slices" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" +) + +// ConvertFrame converts a wire.Frame into a logging.Frame. +// This makes it possible for external packages to access the frames. +// Furthermore, it removes the data slices from CRYPTO and STREAM frames. +func toLoggingFrame(frame wire.Frame) logging.Frame { + switch f := frame.(type) { + case *wire.AckFrame: + // We use a pool for ACK frames. + // Implementations of the tracer interface may hold on to frames, so we need to make a copy here. + return toLoggingAckFrame(f) + case *wire.CryptoFrame: + return &logging.CryptoFrame{ + Offset: f.Offset, + Length: protocol.ByteCount(len(f.Data)), + } + case *wire.StreamFrame: + return &logging.StreamFrame{ + StreamID: f.StreamID, + Offset: f.Offset, + Length: f.DataLen(), + Fin: f.Fin, + } + case *wire.DatagramFrame: + return &logging.DatagramFrame{ + Length: logging.ByteCount(len(f.Data)), + } + default: + return logging.Frame(frame) + } +} + +func toLoggingAckFrame(f *wire.AckFrame) *logging.AckFrame { + ack := &logging.AckFrame{ + AckRanges: slices.Clone(f.AckRanges), + DelayTime: f.DelayTime, + ECNCE: f.ECNCE, + ECT0: f.ECT0, + ECT1: f.ECT1, + } + return ack +} + +func (s *connection) logLongHeaderPacket(p *longHeaderPacket, ecn protocol.ECN) { + // quic-go logging + if s.logger.Debug() { + p.header.Log(s.logger) + if p.ack != nil { + wire.LogFrame(s.logger, p.ack, true) + } + for _, frame := range p.frames { + wire.LogFrame(s.logger, frame.Frame, true) + } + for _, frame := range p.streamFrames { + wire.LogFrame(s.logger, frame.Frame, true) + } + } + + // tracing + if s.tracer != nil && s.tracer.SentLongHeaderPacket != nil { + frames := make([]logging.Frame, 0, len(p.frames)) + for _, f := range p.frames { + frames = append(frames, toLoggingFrame(f.Frame)) + } + for _, f := range p.streamFrames { + frames = append(frames, toLoggingFrame(f.Frame)) + } + var ack *logging.AckFrame + if p.ack != nil { + ack = toLoggingAckFrame(p.ack) + } + s.tracer.SentLongHeaderPacket(p.header, p.length, ecn, ack, frames) + } +} + +func (s *connection) logShortHeaderPacket( + destConnID protocol.ConnectionID, + ackFrame *wire.AckFrame, + frames []ackhandler.Frame, + streamFrames []ackhandler.StreamFrame, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + ecn protocol.ECN, + size protocol.ByteCount, + isCoalesced bool, +) { + if s.logger.Debug() && !isCoalesced { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT (ECN: %s)", pn, size, s.logID, ecn) + } + // quic-go logging + if s.logger.Debug() { + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) + if ackFrame != nil { + wire.LogFrame(s.logger, ackFrame, true) + } + for _, f := range frames { + wire.LogFrame(s.logger, f.Frame, true) + } + for _, f := range streamFrames { + wire.LogFrame(s.logger, f.Frame, true) + } + } + + // tracing + if s.tracer != nil && s.tracer.SentShortHeaderPacket != nil { + fs := make([]logging.Frame, 0, len(frames)+len(streamFrames)) + for _, f := range frames { + fs = append(fs, toLoggingFrame(f.Frame)) + } + for _, f := range streamFrames { + fs = append(fs, toLoggingFrame(f.Frame)) + } + var ack *logging.AckFrame + if ackFrame != nil { + ack = toLoggingAckFrame(ackFrame) + } + s.tracer.SentShortHeaderPacket( + &logging.ShortHeader{ + DestConnectionID: destConnID, + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, + size, + ecn, + ack, + fs, + ) + } +} + +func (s *connection) logCoalescedPacket(packet *coalescedPacket, ecn protocol.ECN) { + if s.logger.Debug() { + // There's a short period between dropping both Initial and Handshake keys and completion of the handshake, + // during which we might call PackCoalescedPacket but just pack a short header packet. + if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil { + s.logShortHeaderPacket( + packet.shortHdrPacket.DestConnID, + packet.shortHdrPacket.Ack, + packet.shortHdrPacket.Frames, + packet.shortHdrPacket.StreamFrames, + packet.shortHdrPacket.PacketNumber, + packet.shortHdrPacket.PacketNumberLen, + packet.shortHdrPacket.KeyPhase, + ecn, + packet.shortHdrPacket.Length, + false, + ) + return + } + if len(packet.longHdrPackets) > 1 { + s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) + } else { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) + } + } + for _, p := range packet.longHdrPackets { + s.logLongHeaderPacket(p, ecn) + } + if p := packet.shortHdrPacket; p != nil { + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.StreamFrames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, ecn, p.Length, true) + } +} diff --git a/vendor/github.com/quic-go/quic-go/crypto_stream.go b/vendor/github.com/quic-go/quic-go/crypto_stream.go index abc7ddcf86c..9a387baa422 100644 --- a/vendor/github.com/quic-go/quic-go/crypto_stream.go +++ b/vendor/github.com/quic-go/quic-go/crypto_stream.go @@ -2,27 +2,14 @@ package quic import ( "fmt" - "io" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/qerr" "github.com/quic-go/quic-go/internal/wire" ) -type cryptoStream interface { - // for receiving data - HandleCryptoFrame(*wire.CryptoFrame) error - GetCryptoData() []byte - Finish() error - // for sending data - io.Writer - HasData() bool - PopCryptoFrame(protocol.ByteCount) *wire.CryptoFrame -} - -type cryptoStreamImpl struct { - queue *frameSorter - msgBuf []byte +type cryptoStream struct { + queue frameSorter highestOffset protocol.ByteCount finished bool @@ -31,11 +18,11 @@ type cryptoStreamImpl struct { writeBuf []byte } -func newCryptoStream() cryptoStream { - return &cryptoStreamImpl{queue: newFrameSorter()} +func newCryptoStream() *cryptoStream { + return &cryptoStream{queue: *newFrameSorter()} } -func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error { +func (s *cryptoStream) HandleCryptoFrame(f *wire.CryptoFrame) error { highestOffset := f.Offset + protocol.ByteCount(len(f.Data)) if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset { return &qerr.TransportError{ @@ -56,26 +43,16 @@ func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error { return nil } s.highestOffset = max(s.highestOffset, highestOffset) - if err := s.queue.Push(f.Data, f.Offset, nil); err != nil { - return err - } - for { - _, data, _ := s.queue.Pop() - if data == nil { - return nil - } - s.msgBuf = append(s.msgBuf, data...) - } + return s.queue.Push(f.Data, f.Offset, nil) } // GetCryptoData retrieves data that was received in CRYPTO frames -func (s *cryptoStreamImpl) GetCryptoData() []byte { - b := s.msgBuf - s.msgBuf = nil - return b +func (s *cryptoStream) GetCryptoData() []byte { + _, data, _ := s.queue.Pop() + return data } -func (s *cryptoStreamImpl) Finish() error { +func (s *cryptoStream) Finish() error { if s.queue.HasMoreData() { return &qerr.TransportError{ ErrorCode: qerr.ProtocolViolation, @@ -87,16 +64,16 @@ func (s *cryptoStreamImpl) Finish() error { } // Writes writes data that should be sent out in CRYPTO frames -func (s *cryptoStreamImpl) Write(p []byte) (int, error) { +func (s *cryptoStream) Write(p []byte) (int, error) { s.writeBuf = append(s.writeBuf, p...) return len(p), nil } -func (s *cryptoStreamImpl) HasData() bool { +func (s *cryptoStream) HasData() bool { return len(s.writeBuf) > 0 } -func (s *cryptoStreamImpl) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame { +func (s *cryptoStream) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame { f := &wire.CryptoFrame{Offset: s.writeOffset} n := min(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf))) f.Data = s.writeBuf[:n] diff --git a/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go index c48e238a78a..d70b9b00770 100644 --- a/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go +++ b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go @@ -3,32 +3,22 @@ package quic import ( "fmt" - "github.com/quic-go/quic-go/internal/handshake" "github.com/quic-go/quic-go/internal/protocol" "github.com/quic-go/quic-go/internal/wire" ) -type cryptoDataHandler interface { - HandleMessage([]byte, protocol.EncryptionLevel) error - NextEvent() handshake.Event -} - type cryptoStreamManager struct { - cryptoHandler cryptoDataHandler - - initialStream cryptoStream - handshakeStream cryptoStream - oneRTTStream cryptoStream + initialStream *cryptoStream + handshakeStream *cryptoStream + oneRTTStream *cryptoStream } func newCryptoStreamManager( - cryptoHandler cryptoDataHandler, - initialStream cryptoStream, - handshakeStream cryptoStream, - oneRTTStream cryptoStream, + initialStream *cryptoStream, + handshakeStream *cryptoStream, + oneRTTStream *cryptoStream, ) *cryptoStreamManager { return &cryptoStreamManager{ - cryptoHandler: cryptoHandler, initialStream: initialStream, handshakeStream: handshakeStream, oneRTTStream: oneRTTStream, @@ -36,7 +26,7 @@ func newCryptoStreamManager( } func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { - var str cryptoStream + var str *cryptoStream //nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets. switch encLevel { case protocol.EncryptionInitial: @@ -48,18 +38,23 @@ func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLeve default: return fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel) } - if err := str.HandleCryptoFrame(frame); err != nil { - return err - } - for { - data := str.GetCryptoData() - if data == nil { - return nil - } - if err := m.cryptoHandler.HandleMessage(data, encLevel); err != nil { - return err - } + return str.HandleCryptoFrame(frame) +} + +func (m *cryptoStreamManager) GetCryptoData(encLevel protocol.EncryptionLevel) []byte { + var str *cryptoStream + //nolint:exhaustive // CRYPTO frames cannot be sent in 0-RTT packets. + switch encLevel { + case protocol.EncryptionInitial: + str = m.initialStream + case protocol.EncryptionHandshake: + str = m.handshakeStream + case protocol.Encryption1RTT: + str = m.oneRTTStream + default: + panic(fmt.Sprintf("received CRYPTO frame with unexpected encryption level: %s", encLevel)) } + return str.GetCryptoData() } func (m *cryptoStreamManager) GetPostHandshakeData(maxSize protocol.ByteCount) *wire.CryptoFrame { diff --git a/vendor/github.com/quic-go/quic-go/framer.go b/vendor/github.com/quic-go/quic-go/framer.go index 5eef65372e8..e162f6b8f24 100644 --- a/vendor/github.com/quic-go/quic-go/framer.go +++ b/vendor/github.com/quic-go/quic-go/framer.go @@ -1,7 +1,7 @@ package quic import ( - "errors" + "slices" "sync" "github.com/quic-go/quic-go/internal/ackhandler" @@ -11,37 +11,25 @@ import ( "github.com/quic-go/quic-go/quicvarint" ) -type framer interface { - HasData() bool - - QueueControlFrame(wire.Frame) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount, protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) - - AddActiveStream(protocol.StreamID) - AppendStreamFrames([]ackhandler.StreamFrame, protocol.ByteCount, protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) - - Handle0RTTRejection() error - - // QueuedTooManyControlFrames says if the control frame queue exceeded its maximum queue length. - // This is a hack. - // It is easier to implement than propagating an error return value in QueueControlFrame. - // The correct solution would be to queue frames with their respective structs. - // See https://github.com/quic-go/quic-go/issues/4271 for the queueing of stream-related control frames. - QueuedTooManyControlFrames() bool -} - const ( maxPathResponses = 256 maxControlFrames = 16 << 10 ) -type framerI struct { - mutex sync.Mutex +// This is the largest possible size of a stream-related control frame +// (which is the RESET_STREAM frame). +const maxStreamControlFrameSize = 25 - streamGetter streamGetter +type streamControlFrameGetter interface { + getControlFrame() (_ ackhandler.Frame, ok, hasMore bool) +} - activeStreams map[protocol.StreamID]struct{} - streamQueue ringbuffer.RingBuffer[protocol.StreamID] +type framer struct { + mutex sync.Mutex + + activeStreams map[protocol.StreamID]sendStreamI + streamQueue ringbuffer.RingBuffer[protocol.StreamID] + streamsWithControlFrames map[protocol.StreamID]streamControlFrameGetter controlFrameMutex sync.Mutex controlFrames []wire.Frame @@ -49,16 +37,14 @@ type framerI struct { queuedTooManyControlFrames bool } -var _ framer = &framerI{} - -func newFramer(streamGetter streamGetter) framer { - return &framerI{ - streamGetter: streamGetter, - activeStreams: make(map[protocol.StreamID]struct{}), +func newFramer() *framer { + return &framer{ + activeStreams: make(map[protocol.StreamID]sendStreamI), + streamsWithControlFrames: make(map[protocol.StreamID]streamControlFrameGetter), } } -func (f *framerI) HasData() bool { +func (f *framer) HasData() bool { f.mutex.Lock() hasData := !f.streamQueue.Empty() f.mutex.Unlock() @@ -67,10 +53,10 @@ func (f *framerI) HasData() bool { } f.controlFrameMutex.Lock() defer f.controlFrameMutex.Unlock() - return len(f.controlFrames) > 0 || len(f.pathResponses) > 0 + return len(f.streamsWithControlFrames) > 0 || len(f.controlFrames) > 0 || len(f.pathResponses) > 0 } -func (f *framerI) QueueControlFrame(frame wire.Frame) { +func (f *framer) QueueControlFrame(frame wire.Frame) { f.controlFrameMutex.Lock() defer f.controlFrameMutex.Unlock() @@ -92,7 +78,7 @@ func (f *framerI) QueueControlFrame(frame wire.Frame) { f.controlFrames = append(f.controlFrames, frame) } -func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framer) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.Frame, protocol.ByteCount) { f.controlFrameMutex.Lock() defer f.controlFrameMutex.Unlock() @@ -108,6 +94,29 @@ func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol } } + // add stream-related control frames + for id, str := range f.streamsWithControlFrames { + start: + remainingLen := maxLen - length + if remainingLen <= maxStreamControlFrameSize { + break + } + fr, ok, hasMore := str.getControlFrame() + if !hasMore { + delete(f.streamsWithControlFrames, id) + } + if !ok { + continue + } + frames = append(frames, fr) + length += fr.Frame.Length(v) + if hasMore { + // It is rare that a stream has more than one control frame to queue. + // We don't want to spawn another loop for just to cover that case. + goto start + } + } + for len(f.controlFrames) > 0 { frame := f.controlFrames[len(f.controlFrames)-1] frameLen := frame.Length(v) @@ -118,27 +127,51 @@ func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol length += frameLen f.controlFrames = f.controlFrames[:len(f.controlFrames)-1] } + return frames, length } -func (f *framerI) QueuedTooManyControlFrames() bool { +// QueuedTooManyControlFrames says if the control frame queue exceeded its maximum queue length. +// This is a hack. +// It is easier to implement than propagating an error return value in QueueControlFrame. +// The correct solution would be to queue frames with their respective structs. +// See https://github.com/quic-go/quic-go/issues/4271 for the queueing of stream-related control frames. +func (f *framer) QueuedTooManyControlFrames() bool { return f.queuedTooManyControlFrames } -func (f *framerI) AddActiveStream(id protocol.StreamID) { +func (f *framer) AddActiveStream(id protocol.StreamID, str sendStreamI) { f.mutex.Lock() if _, ok := f.activeStreams[id]; !ok { f.streamQueue.PushBack(id) - f.activeStreams[id] = struct{}{} + f.activeStreams[id] = str } f.mutex.Unlock() } -func (f *framerI) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) { +func (f *framer) AddStreamWithControlFrames(id protocol.StreamID, str streamControlFrameGetter) { + f.controlFrameMutex.Lock() + if _, ok := f.streamsWithControlFrames[id]; !ok { + f.streamsWithControlFrames[id] = str + } + f.controlFrameMutex.Unlock() +} + +// RemoveActiveStream is called when a stream completes. +func (f *framer) RemoveActiveStream(id protocol.StreamID) { + f.mutex.Lock() + delete(f.activeStreams, id) + // We don't delete the stream from the streamQueue, + // since we'd have to iterate over the ringbuffer. + // Instead, we check if the stream is still in activeStreams in AppendStreamFrames. + f.mutex.Unlock() +} + +func (f *framer) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen protocol.ByteCount, v protocol.Version) ([]ackhandler.StreamFrame, protocol.ByteCount) { startLen := len(frames) var length protocol.ByteCount f.mutex.Lock() - // pop STREAM frames, until less than MinStreamFrameSize bytes are left in the packet + // pop STREAM frames, until less than 128 bytes are left in the packet numActiveStreams := f.streamQueue.Len() for i := 0; i < numActiveStreams; i++ { if protocol.MinStreamFrameSize+length > maxLen { @@ -147,10 +180,9 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen pro id := f.streamQueue.PopFront() // This should never return an error. Better check it anyway. // The stream will only be in the streamQueue, if it enqueued itself there. - str, err := f.streamGetter.GetOrOpenSendStream(id) - // The stream can be nil if it completed after it said it had data. - if str == nil || err != nil { - delete(f.activeStreams, id) + str, ok := f.activeStreams[id] + // The stream might have been removed after being enqueued. + if !ok { continue } remainingLen := maxLen - length @@ -165,7 +197,7 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen pro delete(f.activeStreams, id) } // The frame can be "nil" - // * if the receiveStream was canceled after it said it had data + // * if the stream was canceled after it said it had data // * the remaining size doesn't allow us to add another STREAM frame if !ok { continue @@ -183,11 +215,12 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.StreamFrame, maxLen pro return frames, length } -func (f *framerI) Handle0RTTRejection() error { +func (f *framer) Handle0RTTRejection() { f.mutex.Lock() defer f.mutex.Unlock() - f.controlFrameMutex.Lock() + defer f.controlFrameMutex.Unlock() + f.streamQueue.Clear() for id := range f.activeStreams { delete(f.activeStreams, id) @@ -195,16 +228,13 @@ func (f *framerI) Handle0RTTRejection() error { var j int for i, frame := range f.controlFrames { switch frame.(type) { - case *wire.MaxDataFrame, *wire.MaxStreamDataFrame, *wire.MaxStreamsFrame: - return errors.New("didn't expect MAX_DATA / MAX_STREAM_DATA / MAX_STREAMS frame to be sent in 0-RTT") - case *wire.DataBlockedFrame, *wire.StreamDataBlockedFrame, *wire.StreamsBlockedFrame: + case *wire.MaxDataFrame, *wire.MaxStreamDataFrame, *wire.MaxStreamsFrame, + *wire.DataBlockedFrame, *wire.StreamDataBlockedFrame, *wire.StreamsBlockedFrame: continue default: f.controlFrames[j] = f.controlFrames[i] j++ } } - f.controlFrames = f.controlFrames[:j] - f.controlFrameMutex.Unlock() - return nil + f.controlFrames = slices.Delete(f.controlFrames, j, len(f.controlFrames)) } diff --git a/vendor/github.com/quic-go/quic-go/http3/client.go b/vendor/github.com/quic-go/quic-go/http3/client.go index f5370549bab..e60acffeed1 100644 --- a/vendor/github.com/quic-go/quic-go/http3/client.go +++ b/vendor/github.com/quic-go/quic-go/http3/client.go @@ -82,7 +82,14 @@ func (c *SingleDestinationRoundTripper) Start() Connection { func (c *SingleDestinationRoundTripper) init() { c.decoder = qpack.NewDecoder(func(hf qpack.HeaderField) {}) c.requestWriter = newRequestWriter() - c.hconn = newConnection(c.Connection, c.EnableDatagrams, protocol.PerspectiveClient, c.Logger) + c.hconn = newConnection( + c.Connection.Context(), + c.Connection, + c.EnableDatagrams, + protocol.PerspectiveClient, + c.Logger, + 0, + ) // send the SETTINGs frame, using 0-RTT data, if possible go func() { if err := c.setupConn(c.hconn); err != nil { diff --git a/vendor/github.com/quic-go/quic-go/http3/conn.go b/vendor/github.com/quic-go/quic-go/http3/conn.go index 7ea4b292918..0fd9412f86c 100644 --- a/vendor/github.com/quic-go/quic-go/http3/conn.go +++ b/vendor/github.com/quic-go/quic-go/http3/conn.go @@ -7,6 +7,7 @@ import ( "net" "sync" "sync/atomic" + "time" "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/internal/protocol" @@ -37,6 +38,7 @@ type Connection interface { type connection struct { quic.Connection + ctx context.Context perspective protocol.Perspective logger *slog.Logger @@ -50,31 +52,48 @@ type connection struct { settings *Settings receivedSettings chan struct{} + + idleTimeout time.Duration + idleTimer *time.Timer } func newConnection( + ctx context.Context, quicConn quic.Connection, enableDatagrams bool, perspective protocol.Perspective, logger *slog.Logger, + idleTimeout time.Duration, ) *connection { c := &connection{ + ctx: ctx, Connection: quicConn, perspective: perspective, logger: logger, + idleTimeout: idleTimeout, enableDatagrams: enableDatagrams, decoder: qpack.NewDecoder(func(hf qpack.HeaderField) {}), receivedSettings: make(chan struct{}), streams: make(map[protocol.StreamID]*datagrammer), } + if idleTimeout > 0 { + c.idleTimer = time.AfterFunc(idleTimeout, c.onIdleTimer) + } return c } +func (c *connection) onIdleTimer() { + c.CloseWithError(quic.ApplicationErrorCode(ErrCodeNoError), "idle timeout") +} + func (c *connection) clearStream(id quic.StreamID) { c.streamMx.Lock() defer c.streamMx.Unlock() delete(c.streams, id) + if c.idleTimeout > 0 && len(c.streams) == 0 { + c.idleTimer.Reset(c.idleTimeout) + } } func (c *connection) openRequestStream( @@ -107,12 +126,24 @@ func (c *connection) acceptStream(ctx context.Context) (quic.Stream, *datagramme strID := str.StreamID() c.streamMx.Lock() c.streams[strID] = datagrams + if c.idleTimeout > 0 { + if len(c.streams) == 1 { + c.idleTimer.Stop() + } + } c.streamMx.Unlock() str = newStateTrackingStream(str, c, datagrams) } return str, datagrams, nil } +func (c *connection) CloseWithError(code quic.ApplicationErrorCode, msg string) error { + if c.idleTimer != nil { + c.idleTimer.Stop() + } + return c.Connection.CloseWithError(code, msg) +} + func (c *connection) HandleUnidirectionalStreams(hijack func(StreamType, quic.ConnectionTracingID, quic.ReceiveStream, error) (hijacked bool)) { var ( rcvdControlStr atomic.Bool @@ -264,3 +295,5 @@ func (c *connection) ReceivedSettings() <-chan struct{} { return c.receivedSetti // Settings returns the settings received on this connection. // It is only valid to call this function after the channel returned by ReceivedSettings was closed. func (c *connection) Settings() *Settings { return c.settings } + +func (c *connection) Context() context.Context { return c.ctx } diff --git a/vendor/github.com/quic-go/quic-go/http3/roundtrip.go b/vendor/github.com/quic-go/quic-go/http3/roundtrip.go index 148e337350a..a9b169ee1dc 100644 --- a/vendor/github.com/quic-go/quic-go/http3/roundtrip.go +++ b/vendor/github.com/quic-go/quic-go/http3/roundtrip.go @@ -166,6 +166,7 @@ func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http. } if cl.dialErr != nil { + r.removeClient(hostname) return nil, cl.dialErr } defer cl.useCount.Add(-1) @@ -258,6 +259,7 @@ func (r *RoundTripper) getClient(ctx context.Context, hostname string, onlyCache select { case <-cl.dialing: if cl.dialErr != nil { + delete(r.clients, hostname) return nil, false, cl.dialErr } select { diff --git a/vendor/github.com/quic-go/quic-go/http3/server.go b/vendor/github.com/quic-go/quic-go/http3/server.go index 5d7aec8a558..9e7cd644fc3 100644 --- a/vendor/github.com/quic-go/quic-go/http3/server.go +++ b/vendor/github.com/quic-go/quic-go/http3/server.go @@ -94,6 +94,10 @@ func ConfigureTLSConfig(tlsConf *tls.Config) *tls.Config { if config == nil { return nil, nil } + // Workaround for https://github.com/golang/go/issues/60506. + // This initializes the session tickets _before_ cloning the config. + _, _ = config.DecryptTicket(nil, tls.ConnectionState{}) + config = config.Clone() config.NextProtos = []string{proto} return config, nil @@ -194,9 +198,14 @@ type Server struct { // In that case, the stream type will not be set. UniStreamHijacker func(StreamType, quic.ConnectionTracingID, quic.ReceiveStream, error) (hijacked bool) - // ConnContext optionally specifies a function that modifies - // the context used for a new connection c. The provided ctx - // has a ServerContextKey value. + // IdleTimeout specifies how long until idle clients connection should be + // closed. Idle refers only to the HTTP/3 layer, activity at the QUIC layer + // like PING frames are not considered. + // If zero or negative, there is no timeout. + IdleTimeout time.Duration + + // ConnContext optionally specifies a function that modifies the context used for a new connection c. + // The provided ctx has a ServerContextKey value. ConnContext func(ctx context.Context, c quic.Connection) context.Context Logger *slog.Logger @@ -213,7 +222,13 @@ type Server struct { // // If s.Addr is blank, ":https" is used. func (s *Server) ListenAndServe() error { - return s.serveConn(s.TLSConfig, nil) + ln, err := s.setupListenerForConn(s.TLSConfig, nil) + if err != nil { + return err + } + defer s.removeListener(&ln) + + return s.serveListener(ln) } // ListenAndServeTLS listens on the UDP address s.Addr and calls s.Handler to handle HTTP/3 requests on incoming connections. @@ -228,17 +243,26 @@ func (s *Server) ListenAndServeTLS(certFile, keyFile string) error { } // We currently only use the cert-related stuff from tls.Config, // so we don't need to make a full copy. - config := &tls.Config{ - Certificates: certs, + ln, err := s.setupListenerForConn(&tls.Config{Certificates: certs}, nil) + if err != nil { + return err } - return s.serveConn(config, nil) + defer s.removeListener(&ln) + + return s.serveListener(ln) } // Serve an existing UDP connection. // It is possible to reuse the same connection for outgoing connections. // Closing the server does not close the connection. func (s *Server) Serve(conn net.PacketConn) error { - return s.serveConn(s.TLSConfig, conn) + ln, err := s.setupListenerForConn(s.TLSConfig, conn) + if err != nil { + return err + } + defer s.removeListener(&ln) + + return s.serveListener(ln) } // ServeQUICConn serves a single QUIC connection. @@ -252,10 +276,18 @@ func (s *Server) ServeQUICConn(conn quic.Connection) error { // Closing the server does close the listener. // ServeListener always returns a non-nil error. After Shutdown or Close, the returned error is http.ErrServerClosed. func (s *Server) ServeListener(ln QUICEarlyListener) error { + s.mutex.Lock() if err := s.addListener(&ln); err != nil { + s.mutex.Unlock() return err } + s.mutex.Unlock() defer s.removeListener(&ln) + + return s.serveListener(ln) +} + +func (s *Server) serveListener(ln QUICEarlyListener) error { for { conn, err := ln.Accept(context.Background()) if err == quic.ErrServerClosed { @@ -276,16 +308,9 @@ func (s *Server) ServeListener(ln QUICEarlyListener) error { var errServerWithoutTLSConfig = errors.New("use of http3.Server without TLSConfig") -func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error { +func (s *Server) setupListenerForConn(tlsConf *tls.Config, conn net.PacketConn) (QUICEarlyListener, error) { if tlsConf == nil { - return errServerWithoutTLSConfig - } - - s.mutex.Lock() - closed := s.closed - s.mutex.Unlock() - if closed { - return http.ErrServerClosed + return nil, errServerWithoutTLSConfig } baseConf := ConfigureTLSConfig(tlsConf) @@ -299,6 +324,13 @@ func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error { quicConf.EnableDatagrams = true } + s.mutex.Lock() + defer s.mutex.Unlock() + closed := s.closed + if closed { + return nil, http.ErrServerClosed + } + var ln QUICEarlyListener var err error if conn == nil { @@ -311,9 +343,12 @@ func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error { ln, err = quicListen(conn, baseConf, quicConf) } if err != nil { - return err + return nil, err + } + if err := s.addListener(&ln); err != nil { + return nil, err } - return s.ServeListener(ln) + return ln, nil } func extractPort(addr string) (int, error) { @@ -389,9 +424,6 @@ func (s *Server) generateAltSvcHeader() { // call trackListener via Serve and can track+defer untrack the same pointer to // local variable there. We never need to compare a Listener from another caller. func (s *Server) addListener(l *QUICEarlyListener) error { - s.mutex.Lock() - defer s.mutex.Unlock() - if s.closed { return http.ErrServerClosed } @@ -436,13 +468,27 @@ func (s *Server) handleConn(conn quic.Connection) error { }).Append(b) str.Write(b) + ctx := conn.Context() + ctx = context.WithValue(ctx, ServerContextKey, s) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, conn.LocalAddr()) + ctx = context.WithValue(ctx, RemoteAddrContextKey, conn.RemoteAddr()) + if s.ConnContext != nil { + ctx = s.ConnContext(ctx, conn) + if ctx == nil { + panic("http3: ConnContext returned nil") + } + } + hconn := newConnection( + ctx, conn, s.EnableDatagrams, protocol.PerspectiveServer, s.Logger, + s.IdleTimeout, ) go hconn.HandleUnidirectionalStreams(s.UniStreamHijacker) + // Process all requests immediately. // It's the client's responsibility to decide which requests are eligible for 0-RTT. for { @@ -533,17 +579,10 @@ func (s *Server) handleRequest(conn *connection, str quic.Stream, datagrams *dat s.Logger.Debug("handling request", "method", req.Method, "host", req.Host, "uri", req.RequestURI) } - ctx := str.Context() - ctx = context.WithValue(ctx, ServerContextKey, s) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, conn.LocalAddr()) - ctx = context.WithValue(ctx, RemoteAddrContextKey, conn.RemoteAddr()) - if s.ConnContext != nil { - ctx = s.ConnContext(ctx, conn.Connection) - if ctx == nil { - panic("http3: ConnContext returned nil") - } - } + ctx, cancel := context.WithCancel(conn.Context()) req = req.WithContext(ctx) + context.AfterFunc(str.Context(), cancel) + r := newResponseWriter(hstr, conn, req.Method == http.MethodHead, s.Logger) handler := s.Handler if handler == nil { diff --git a/vendor/github.com/quic-go/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go index 5837cb40510..cec92d6de6a 100644 --- a/vendor/github.com/quic-go/quic-go/interface.go +++ b/vendor/github.com/quic-go/quic-go/interface.go @@ -57,9 +57,11 @@ var Err0RTTRejected = errors.New("0-RTT rejected") // ConnectionTracingKey can be used to associate a ConnectionTracer with a Connection. // It is set on the Connection.Context() context, // as well as on the context passed to logging.Tracer.NewConnectionTracer. +// Deprecated: Applications can set their own tracing key using Transport.ConnContext. var ConnectionTracingKey = connTracingCtxKey{} // ConnectionTracingID is the type of the context value saved under the ConnectionTracingKey. +// Deprecated: Applications can set their own tracing key using Transport.ConnContext. type ConnectionTracingID uint64 type connTracingCtxKey struct{} @@ -87,8 +89,8 @@ type ReceiveStream interface { // Read reads data from the stream. // Read can be made to time out and return a net.Error with Timeout() == true // after a fixed time limit; see SetDeadline and SetReadDeadline. - // If the stream was canceled by the peer, the error implements the StreamError - // interface, and Canceled() == true. + // If the stream was canceled by the peer, the error is a StreamError and + // Remote == true. // If the connection was closed due to a timeout, the error satisfies // the net.Error interface, and Timeout() will be true. io.Reader @@ -111,8 +113,8 @@ type SendStream interface { // Write writes data to the stream. // Write can be made to time out and return a net.Error with Timeout() == true // after a fixed time limit; see SetDeadline and SetWriteDeadline. - // If the stream was canceled by the peer, the error implements the StreamError - // interface, and Canceled() == true. + // If the stream was canceled by the peer, the error is a StreamError and + // Remote == true. // If the connection was closed due to a timeout, the error satisfies // the net.Error interface, and Timeout() will be true. io.Writer @@ -148,7 +150,7 @@ type SendStream interface { // * TransportError: for errors triggered by the QUIC transport (in many cases a misbehaving peer) // * IdleTimeoutError: when the peer goes away unexpectedly (this is a net.Error timeout error) // * HandshakeTimeoutError: when the cryptographic handshake takes too long (this is a net.Error timeout error) -// * StatelessResetError: when we receive a stateless reset (this is a net.Error temporary error) +// * StatelessResetError: when we receive a stateless reset // * VersionNegotiationError: returned by the client, when there's no version overlap between the peers type Connection interface { // AcceptStream returns the next stream opened by the peer, blocking until one is available. @@ -161,28 +163,29 @@ type Connection interface { AcceptUniStream(context.Context) (ReceiveStream, error) // OpenStream opens a new bidirectional QUIC stream. // There is no signaling to the peer about new streams: - // The peer can only accept the stream after data has been sent on the stream. - // If the error is non-nil, it satisfies the net.Error interface. - // When reaching the peer's stream limit, err.Temporary() will be true. - // If the connection was closed due to a timeout, Timeout() will be true. + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + // When reaching the peer's stream limit, it is not possible to open a new stream until the + // peer raises the stream limit. In that case, a StreamLimitReachedError is returned. OpenStream() (Stream, error) // OpenStreamSync opens a new bidirectional QUIC stream. // It blocks until a new stream can be opened. // There is no signaling to the peer about new streams: // The peer can only accept the stream after data has been sent on the stream, // or the stream has been reset or closed. - // If the error is non-nil, it satisfies the net.Error interface. - // If the connection was closed due to a timeout, Timeout() will be true. OpenStreamSync(context.Context) (Stream, error) // OpenUniStream opens a new outgoing unidirectional QUIC stream. - // If the error is non-nil, it satisfies the net.Error interface. - // When reaching the peer's stream limit, Temporary() will be true. - // If the connection was closed due to a timeout, Timeout() will be true. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. + // When reaching the peer's stream limit, it is not possible to open a new stream until the + // peer raises the stream limit. In that case, a StreamLimitReachedError is returned. OpenUniStream() (SendStream, error) // OpenUniStreamSync opens a new outgoing unidirectional QUIC stream. // It blocks until a new stream can be opened. - // If the error is non-nil, it satisfies the net.Error interface. - // If the connection was closed due to a timeout, Timeout() will be true. + // There is no signaling to the peer about new streams: + // The peer can only accept the stream after data has been sent on the stream, + // or the stream has been reset or closed. OpenUniStreamSync(context.Context) (SendStream, error) // LocalAddr returns the local address. LocalAddr() net.Addr @@ -222,7 +225,7 @@ type EarlyConnection interface { // however the client's identity is only verified once the handshake completes. HandshakeComplete() <-chan struct{} - NextConnection() Connection + NextConnection(context.Context) (Connection, error) } // StatelessResetKey is a key used to derive stateless reset tokens. @@ -334,7 +337,6 @@ type Config struct { // DisablePathMTUDiscovery disables Path MTU Discovery (RFC 8899). // This allows the sending of QUIC packets that fully utilize the available MTU of the path. // Path MTU discovery is only available on systems that allow setting of the Don't Fragment (DF) bit. - // If unavailable or disabled, packets will be at most 1280 bytes in size. DisablePathMTUDiscovery bool // Allow0RTT allows the application to decide if a 0-RTT connection attempt should be accepted. // Only valid for the server. diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go index 3143bfe1204..f9feae1dbc6 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go @@ -1,10 +1,9 @@ package ackhandler import ( - "sync" + "slices" "github.com/quic-go/quic-go/internal/protocol" - list "github.com/quic-go/quic-go/internal/utils/linkedlist" "github.com/quic-go/quic-go/internal/wire" ) @@ -14,25 +13,17 @@ type interval struct { End protocol.PacketNumber } -var intervalElementPool sync.Pool - -func init() { - intervalElementPool = *list.NewPool[interval]() -} - // The receivedPacketHistory stores if a packet number has already been received. // It generates ACK ranges which can be used to assemble an ACK frame. // It does not store packet contents. type receivedPacketHistory struct { - ranges *list.List[interval] + ranges []interval // maximum length: protocol.MaxNumAckRanges deletedBelow protocol.PacketNumber } func newReceivedPacketHistory() *receivedPacketHistory { - return &receivedPacketHistory{ - ranges: list.NewWithPool[interval](&intervalElementPool), - } + return &receivedPacketHistory{} } // ReceivedPacket registers a packet with PacketNumber p and updates the ranges @@ -41,58 +32,54 @@ func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) bool /* if p < h.deletedBelow { return false } + isNew := h.addToRanges(p) - h.maybeDeleteOldRanges() + // Delete old ranges, if we're tracking too many of them. + // This is a DoS defense against a peer that sends us too many gaps. + if len(h.ranges) > protocol.MaxNumAckRanges { + h.ranges = slices.Delete(h.ranges, 0, len(h.ranges)-protocol.MaxNumAckRanges) + } return isNew } func (h *receivedPacketHistory) addToRanges(p protocol.PacketNumber) bool /* is a new packet (and not a duplicate / delayed packet) */ { - if h.ranges.Len() == 0 { - h.ranges.PushBack(interval{Start: p, End: p}) + if len(h.ranges) == 0 { + h.ranges = append(h.ranges, interval{Start: p, End: p}) return true } - for el := h.ranges.Back(); el != nil; el = el.Prev() { + for i := len(h.ranges) - 1; i >= 0; i-- { // p already included in an existing range. Nothing to do here - if p >= el.Value.Start && p <= el.Value.End { + if p >= h.ranges[i].Start && p <= h.ranges[i].End { return false } - if el.Value.End == p-1 { // extend a range at the end - el.Value.End = p + if h.ranges[i].End == p-1 { // extend a range at the end + h.ranges[i].End = p return true } - if el.Value.Start == p+1 { // extend a range at the beginning - el.Value.Start = p + if h.ranges[i].Start == p+1 { // extend a range at the beginning + h.ranges[i].Start = p - prev := el.Prev() - if prev != nil && prev.Value.End+1 == el.Value.Start { // merge two ranges - prev.Value.End = el.Value.End - h.ranges.Remove(el) + if i > 0 && h.ranges[i-1].End+1 == h.ranges[i].Start { // merge two ranges + h.ranges[i-1].End = h.ranges[i].End + h.ranges = slices.Delete(h.ranges, i, i+1) } return true } - // create a new range at the end - if p > el.Value.End { - h.ranges.InsertAfter(interval{Start: p, End: p}, el) + // create a new range after the current one + if p > h.ranges[i].End { + h.ranges = slices.Insert(h.ranges, i+1, interval{Start: p, End: p}) return true } } // create a new range at the beginning - h.ranges.InsertBefore(interval{Start: p, End: p}, h.ranges.Front()) + h.ranges = slices.Insert(h.ranges, 0, interval{Start: p, End: p}) return true } -// Delete old ranges, if we're tracking more than 500 of them. -// This is a DoS defense against a peer that sends us too many gaps. -func (h *receivedPacketHistory) maybeDeleteOldRanges() { - for h.ranges.Len() > protocol.MaxNumAckRanges { - h.ranges.Remove(h.ranges.Front()) - } -} - // DeleteBelow deletes all entries below (but not including) p func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { if p < h.deletedBelow { @@ -100,37 +87,39 @@ func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { } h.deletedBelow = p - nextEl := h.ranges.Front() - for el := h.ranges.Front(); nextEl != nil; el = nextEl { - nextEl = el.Next() + if len(h.ranges) == 0 { + return + } - if el.Value.End < p { // delete a whole range - h.ranges.Remove(el) - } else if p > el.Value.Start && p <= el.Value.End { - el.Value.Start = p - return + idx := -1 + for i := 0; i < len(h.ranges); i++ { + if h.ranges[i].End < p { // delete a whole range + idx = i + } else if p > h.ranges[i].Start && p <= h.ranges[i].End { + h.ranges[i].Start = p + break } else { // no ranges affected. Nothing to do - return + break } } + if idx >= 0 { + h.ranges = slices.Delete(h.ranges, 0, idx+1) + } } // AppendAckRanges appends to a slice of all AckRanges that can be used in an AckFrame func (h *receivedPacketHistory) AppendAckRanges(ackRanges []wire.AckRange) []wire.AckRange { - if h.ranges.Len() > 0 { - for el := h.ranges.Back(); el != nil; el = el.Prev() { - ackRanges = append(ackRanges, wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End}) - } + for i := len(h.ranges) - 1; i >= 0; i-- { + ackRanges = append(ackRanges, wire.AckRange{Smallest: h.ranges[i].Start, Largest: h.ranges[i].End}) } return ackRanges } func (h *receivedPacketHistory) GetHighestAckRange() wire.AckRange { ackRange := wire.AckRange{} - if h.ranges.Len() > 0 { - r := h.ranges.Back().Value - ackRange.Smallest = r.Start - ackRange.Largest = r.End + if len(h.ranges) > 0 { + ackRange.Smallest = h.ranges[len(h.ranges)-1].Start + ackRange.Largest = h.ranges[len(h.ranges)-1].End } return ackRange } @@ -139,11 +128,12 @@ func (h *receivedPacketHistory) IsPotentiallyDuplicate(p protocol.PacketNumber) if p < h.deletedBelow { return true } - for el := h.ranges.Back(); el != nil; el = el.Prev() { - if p > el.Value.End { + // Iterating over the slices is faster than using a binary search (using slices.BinarySearchFunc). + for i := len(h.ranges) - 1; i >= 0; i-- { + if p > h.ranges[i].End { return false } - if p <= el.Value.End && p >= el.Value.Start { + if p <= h.ranges[i].End && p >= h.ranges[i].Start { return true } } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go index 3cef89239c9..7a30f7ed749 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go @@ -28,7 +28,7 @@ const ( ) type packetNumberSpace struct { - history *sentPacketHistory + history sentPacketHistory pns packetNumberGenerator lossTime time.Time @@ -38,15 +38,15 @@ type packetNumberSpace struct { largestSent protocol.PacketNumber } -func newPacketNumberSpace(initialPN protocol.PacketNumber, skipPNs bool) *packetNumberSpace { +func newPacketNumberSpace(initialPN protocol.PacketNumber, isAppData bool) *packetNumberSpace { var pns packetNumberGenerator - if skipPNs { + if isAppData { pns = newSkippingPacketNumberGenerator(initialPN, protocol.SkipPacketInitialPeriod, protocol.SkipPacketMaxPeriod) } else { pns = newSequentialPacketNumberGenerator(initialPN) } return &packetNumberSpace{ - history: newSentPacketHistory(), + history: *newSentPacketHistory(isAppData), pns: pns, largestSent: protocol.InvalidPacketNumber, largestAcked: protocol.InvalidPacketNumber, diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go index c14c0f49bae..9968df6ac54 100644 --- a/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go @@ -14,11 +14,16 @@ type sentPacketHistory struct { highestPacketNumber protocol.PacketNumber } -func newSentPacketHistory() *sentPacketHistory { - return &sentPacketHistory{ - packets: make([]*packet, 0, 32), +func newSentPacketHistory(isAppData bool) *sentPacketHistory { + h := &sentPacketHistory{ highestPacketNumber: protocol.InvalidPacketNumber, } + if isAppData { + h.packets = make([]*packet, 0, 32) + } else { + h.packets = make([]*packet, 0, 6) + } + return h } func (h *sentPacketHistory) checkSequentialPacketNumberUse(pn protocol.PacketNumber) { diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go index 8504cdcf529..2efcad747ea 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go @@ -12,8 +12,6 @@ import ( type connectionFlowController struct { baseFlowController - - queueWindowUpdate func() } var _ ConnectionFlowController = &connectionFlowController{} @@ -23,7 +21,6 @@ var _ ConnectionFlowController = &connectionFlowController{} func NewConnectionFlowController( receiveWindow protocol.ByteCount, maxReceiveWindow protocol.ByteCount, - queueWindowUpdate func(), allowWindowIncrease func(size protocol.ByteCount) bool, rttStats *utils.RTTStats, logger utils.Logger, @@ -37,7 +34,6 @@ func NewConnectionFlowController( allowWindowIncrease: allowWindowIncrease, logger: logger, }, - queueWindowUpdate: queueWindowUpdate, } } @@ -63,18 +59,14 @@ func (c *connectionFlowController) IncrementHighestReceived(increment protocol.B func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) { c.mutex.Lock() c.baseFlowController.addBytesRead(n) - shouldQueueWindowUpdate := c.hasWindowUpdate() c.mutex.Unlock() - if shouldQueueWindowUpdate { - c.queueWindowUpdate() - } } func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount { c.mutex.Lock() oldWindowSize := c.receiveWindowSize offset := c.baseFlowController.getWindowUpdate() - if oldWindowSize < c.receiveWindowSize { + if c.logger.Debug() && oldWindowSize < c.receiveWindowSize { c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10)) } c.mutex.Unlock() diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go index fc5f9de0a5e..57d12a95e7a 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go @@ -8,14 +8,13 @@ type flowController interface { UpdateSendWindow(protocol.ByteCount) (updated bool) AddBytesSent(protocol.ByteCount) // for receiving - AddBytesRead(protocol.ByteCount) GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary - IsNewlyBlocked() (bool, protocol.ByteCount) } // A StreamFlowController is a flow controller for a QUIC stream. type StreamFlowController interface { flowController + AddBytesRead(protocol.ByteCount) (shouldQueueWindowUpdate bool) // UpdateHighestReceived is called when a new highest offset is received // final has to be to true if this is the final offset of the stream, // as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame @@ -23,12 +22,15 @@ type StreamFlowController interface { // Abandon is called when reading from the stream is aborted early, // and there won't be any further calls to AddBytesRead. Abandon() + IsNewlyBlocked() bool } // The ConnectionFlowController is the flow controller for the connection. type ConnectionFlowController interface { flowController + AddBytesRead(protocol.ByteCount) Reset() error + IsNewlyBlocked() (bool, protocol.ByteCount) } type connectionFlowControllerI interface { diff --git a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go index ccee287dc17..2d58351cb9b 100644 --- a/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go @@ -13,8 +13,6 @@ type streamFlowController struct { streamID protocol.StreamID - queueWindowUpdate func() - connection connectionFlowControllerI receivedFinalOffset bool @@ -29,14 +27,12 @@ func NewStreamFlowController( receiveWindow protocol.ByteCount, maxReceiveWindow protocol.ByteCount, initialSendWindow protocol.ByteCount, - queueWindowUpdate func(protocol.StreamID), rttStats *utils.RTTStats, logger utils.Logger, ) StreamFlowController { return &streamFlowController{ - streamID: streamID, - connection: cfc.(connectionFlowControllerI), - queueWindowUpdate: func() { queueWindowUpdate(streamID) }, + streamID: streamID, + connection: cfc.(connectionFlowControllerI), baseFlowController: baseFlowController{ rttStats: rttStats, receiveWindow: receiveWindow, @@ -97,15 +93,13 @@ func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, return c.connection.IncrementHighestReceived(increment) } -func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) { +func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) (shouldQueueWindowUpdate bool) { c.mutex.Lock() c.baseFlowController.addBytesRead(n) - shouldQueueWindowUpdate := c.shouldQueueWindowUpdate() + shouldQueueWindowUpdate = c.shouldQueueWindowUpdate() c.mutex.Unlock() - if shouldQueueWindowUpdate { - c.queueWindowUpdate() - } c.connection.AddBytesRead(n) + return } func (c *streamFlowController) Abandon() { @@ -127,6 +121,11 @@ func (c *streamFlowController) SendWindowSize() protocol.ByteCount { return min(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize()) } +func (c *streamFlowController) IsNewlyBlocked() bool { + blocked, _ := c.baseFlowController.IsNewlyBlocked() + return blocked +} + func (c *streamFlowController) shouldQueueWindowUpdate() bool { return !c.receivedFinalOffset && c.hasWindowUpdate() } diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go index 4273a4a6ad3..0fb75dc8a8c 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go @@ -123,44 +123,12 @@ func NewCryptoSetupServer( ) cs.allow0RTT = allow0RTT - quicConf := &tls.QUICConfig{TLSConfig: tlsConf} - qtls.SetupConfigForServer(quicConf, cs.allow0RTT, cs.getDataForSessionTicket, cs.handleSessionTicket) - addConnToClientHelloInfo(quicConf.TLSConfig, localAddr, remoteAddr) - - cs.tlsConf = quicConf.TLSConfig - cs.conn = tls.QUICServer(quicConf) - + tlsConf = qtls.SetupConfigForServer(tlsConf, localAddr, remoteAddr, cs.getDataForSessionTicket, cs.handleSessionTicket) + cs.tlsConf = tlsConf + cs.conn = tls.QUICServer(&tls.QUICConfig{TLSConfig: tlsConf}) return cs } -// The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo. -// Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn -// that allows the caller to get the local and the remote address. -func addConnToClientHelloInfo(conf *tls.Config, localAddr, remoteAddr net.Addr) { - if conf.GetConfigForClient != nil { - gcfc := conf.GetConfigForClient - conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { - info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} - c, err := gcfc(info) - if c != nil { - c = c.Clone() - // This won't be necessary anymore once https://github.com/golang/go/issues/63722 is accepted. - c.MinVersion = tls.VersionTLS13 - // We're returning a tls.Config here, so we need to apply this recursively. - addConnToClientHelloInfo(c, localAddr, remoteAddr) - } - return c, err - } - } - if conf.GetCertificate != nil { - gc := conf.GetCertificate - conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { - info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} - return gc(info) - } - } -} - func newCryptoSetup( connID protocol.ConnectionID, tp *wire.TransportParameters, @@ -203,8 +171,8 @@ func (h *cryptoSetup) SetLargest1RTTAcked(pn protocol.PacketNumber) error { return h.aead.SetLargestAcked(pn) } -func (h *cryptoSetup) StartHandshake() error { - err := h.conn.Start(context.WithValue(context.Background(), QUICVersionContextKey, h.version)) +func (h *cryptoSetup) StartHandshake(ctx context.Context) error { + err := h.conn.Start(context.WithValue(ctx, QUICVersionContextKey, h.version)) if err != nil { return wrapError(err) } @@ -285,7 +253,10 @@ func (h *cryptoSetup) handleEvent(ev tls.QUICEvent) (done bool, err error) { h.handshakeComplete() return false, nil default: - return false, fmt.Errorf("unexpected event: %d", ev.Kind) + // Unknown events should be ignored. + // crypto/tls will ensure that this is safe to do. + // See the discussion following https://github.com/golang/go/issues/68124#issuecomment-2187042510 for details. + return false, nil } } @@ -376,9 +347,7 @@ func (h *cryptoSetup) getDataForSessionTicket() []byte { // Due to limitations in crypto/tls, it's only possible to generate a single session ticket per connection. // It is only valid for the server. func (h *cryptoSetup) GetSessionTicket() ([]byte, error) { - if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{ - EarlyData: h.allow0RTT, - }); err != nil { + if err := h.conn.SendSessionTicket(tls.QUICSessionTicketOptions{EarlyData: h.allow0RTT}); err != nil { // Session tickets might be disabled by tls.Config.SessionTicketsDisabled. // We can't check h.tlsConfig here, since the actual config might have been obtained from // the GetConfigForClient callback. @@ -655,8 +624,7 @@ func (h *cryptoSetup) ConnectionState() ConnectionState { } func wrapError(err error) error { - // alert 80 is an internal error - if alertErr := tls.AlertError(0); errors.As(err, &alertErr) && alertErr != 80 { + if alertErr := tls.AlertError(0); errors.As(err, &alertErr) { return qerr.NewLocalCryptoError(uint8(alertErr), err) } return &qerr.TransportError{ErrorCode: qerr.InternalError, ErrorMessage: err.Error()} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go index fab224f9bdb..6e0eec77aa3 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go @@ -1,6 +1,7 @@ package handshake import ( + "context" "crypto/tls" "errors" "io" @@ -91,7 +92,7 @@ type Event struct { // CryptoSetup handles the handshake and protecting / unprotecting packets type CryptoSetup interface { - StartHandshake() error + StartHandshake(context.Context) error io.Closer ChangeConnectionID(protocol.ConnectionID) GetSessionTicket() ([]byte, error) diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go index 2d91e6b2597..84e58cfc7b8 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go @@ -46,7 +46,7 @@ type TokenGenerator struct { // NewTokenGenerator initializes a new TokenGenerator func NewTokenGenerator(key TokenProtectorKey) *TokenGenerator { - return &TokenGenerator{tokenProtector: newTokenProtector(key)} + return &TokenGenerator{tokenProtector: *newTokenProtector(key)} } // NewRetryToken generates a new token for a Retry for a given source address diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go index f3a99e4110e..1577918953c 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go @@ -14,28 +14,20 @@ import ( // TokenProtectorKey is the key used to encrypt both Retry and session resumption tokens. type TokenProtectorKey [32]byte -// TokenProtector is used to create and verify a token -type tokenProtector interface { - // NewToken creates a new token - NewToken([]byte) ([]byte, error) - // DecodeToken decodes a token - DecodeToken([]byte) ([]byte, error) -} - const tokenNonceSize = 32 // tokenProtector is used to create and verify a token -type tokenProtectorImpl struct { +type tokenProtector struct { key TokenProtectorKey } // newTokenProtector creates a source for source address tokens -func newTokenProtector(key TokenProtectorKey) tokenProtector { - return &tokenProtectorImpl{key: key} +func newTokenProtector(key TokenProtectorKey) *tokenProtector { + return &tokenProtector{key: key} } // NewToken encodes data into a new token. -func (s *tokenProtectorImpl) NewToken(data []byte) ([]byte, error) { +func (s *tokenProtector) NewToken(data []byte) ([]byte, error) { var nonce [tokenNonceSize]byte if _, err := rand.Read(nonce[:]); err != nil { return nil, err @@ -48,7 +40,7 @@ func (s *tokenProtectorImpl) NewToken(data []byte) ([]byte, error) { } // DecodeToken decodes a token. -func (s *tokenProtectorImpl) DecodeToken(p []byte) ([]byte, error) { +func (s *tokenProtector) DecodeToken(p []byte) ([]byte, error) { if len(p) < tokenNonceSize { return nil, fmt.Errorf("token too short: %d", len(p)) } @@ -60,7 +52,7 @@ func (s *tokenProtectorImpl) DecodeToken(p []byte) ([]byte, error) { return aead.Open(nil, aeadNonce, p[tokenNonceSize:], nil) } -func (s *tokenProtectorImpl) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) { +func (s *tokenProtector) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) { h := hkdf.New(sha256.New, s.key[:], nonce, []byte("quic-go token source")) key := make([]byte, 32) // use a 32 byte key, in order to select AES-256 if _, err := io.ReadFull(h, key); err != nil { diff --git a/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go b/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go deleted file mode 100644 index a6032fc20d7..00000000000 --- a/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go +++ /dev/null @@ -1,50 +0,0 @@ -package logutils - -import ( - "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/wire" - "github.com/quic-go/quic-go/logging" -) - -// ConvertFrame converts a wire.Frame into a logging.Frame. -// This makes it possible for external packages to access the frames. -// Furthermore, it removes the data slices from CRYPTO and STREAM frames. -func ConvertFrame(frame wire.Frame) logging.Frame { - switch f := frame.(type) { - case *wire.AckFrame: - // We use a pool for ACK frames. - // Implementations of the tracer interface may hold on to frames, so we need to make a copy here. - return ConvertAckFrame(f) - case *wire.CryptoFrame: - return &logging.CryptoFrame{ - Offset: f.Offset, - Length: protocol.ByteCount(len(f.Data)), - } - case *wire.StreamFrame: - return &logging.StreamFrame{ - StreamID: f.StreamID, - Offset: f.Offset, - Length: f.DataLen(), - Fin: f.Fin, - } - case *wire.DatagramFrame: - return &logging.DatagramFrame{ - Length: logging.ByteCount(len(f.Data)), - } - default: - return logging.Frame(frame) - } -} - -func ConvertAckFrame(f *wire.AckFrame) *logging.AckFrame { - ranges := make([]wire.AckRange, 0, len(f.AckRanges)) - ranges = append(ranges, f.AckRanges...) - ack := &logging.AckFrame{ - AckRanges: ranges, - DelayTime: f.DelayTime, - ECNCE: f.ECNCE, - ECT0: f.ECT0, - ECT1: f.ECT1, - } - return ack -} diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/conn.go b/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go similarity index 97% rename from vendor/github.com/quic-go/quic-go/internal/handshake/conn.go rename to vendor/github.com/quic-go/quic-go/internal/qtls/conn.go index 54af823ba22..6660ac6679c 100644 --- a/vendor/github.com/quic-go/quic-go/internal/handshake/conn.go +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/conn.go @@ -1,4 +1,4 @@ -package handshake +package qtls import ( "net" diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go b/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go index ebcd9d4dedf..cdfe82a2bcb 100644 --- a/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/qtls.go @@ -4,20 +4,23 @@ import ( "bytes" "crypto/tls" "fmt" + "net" "github.com/quic-go/quic-go/internal/protocol" ) -func SetupConfigForServer(qconf *tls.QUICConfig, _ bool, getData func() []byte, handleSessionTicket func([]byte, bool) bool) { - conf := qconf.TLSConfig - +func SetupConfigForServer( + conf *tls.Config, + localAddr, remoteAddr net.Addr, + getData func() []byte, + handleSessionTicket func([]byte, bool) bool, +) *tls.Config { // Workaround for https://github.com/golang/go/issues/60506. // This initializes the session tickets _before_ cloning the config. _, _ = conf.DecryptTicket(nil, tls.ConnectionState{}) conf = conf.Clone() conf.MinVersion = tls.VersionTLS13 - qconf.TLSConfig = conf // add callbacks to save transport parameters into the session ticket origWrapSession := conf.WrapSession @@ -58,6 +61,29 @@ func SetupConfigForServer(qconf *tls.QUICConfig, _ bool, getData func() []byte, return state, nil } + // The tls.Config contains two callbacks that pass in a tls.ClientHelloInfo. + // Since crypto/tls doesn't do it, we need to make sure to set the Conn field with a fake net.Conn + // that allows the caller to get the local and the remote address. + if conf.GetConfigForClient != nil { + gcfc := conf.GetConfigForClient + conf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { + info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} + c, err := gcfc(info) + if c != nil { + // We're returning a tls.Config here, so we need to apply this recursively. + c = SetupConfigForServer(c, localAddr, remoteAddr, getData, handleSessionTicket) + } + return c, err + } + } + if conf.GetCertificate != nil { + gc := conf.GetCertificate + conf.GetCertificate = func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + info.Conn = &conn{localAddr: localAddr, remoteAddr: remoteAddr} + return gc(info) + } + } + return conf } func SetupConfigForClient( diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go deleted file mode 100644 index a9b715e2f11..00000000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import ( - "bytes" - "io" -) - -// A ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit unsigned integers. -type ByteOrder interface { - Uint32([]byte) uint32 - Uint24([]byte) uint32 - Uint16([]byte) uint16 - - ReadUint32(io.ByteReader) (uint32, error) - ReadUint24(io.ByteReader) (uint32, error) - ReadUint16(io.ByteReader) (uint16, error) - - WriteUint32(*bytes.Buffer, uint32) - WriteUint24(*bytes.Buffer, uint32) - WriteUint16(*bytes.Buffer, uint16) -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go deleted file mode 100644 index 834a711b9ef..00000000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go +++ /dev/null @@ -1,103 +0,0 @@ -package utils - -import ( - "bytes" - "encoding/binary" - "io" -) - -// BigEndian is the big-endian implementation of ByteOrder. -var BigEndian ByteOrder = bigEndian{} - -type bigEndian struct{} - -var _ ByteOrder = &bigEndian{} - -// ReadUintN reads N bytes -func (bigEndian) ReadUintN(b io.ByteReader, length uint8) (uint64, error) { - var res uint64 - for i := uint8(0); i < length; i++ { - bt, err := b.ReadByte() - if err != nil { - return 0, err - } - res ^= uint64(bt) << ((length - 1 - i) * 8) - } - return res, nil -} - -// ReadUint32 reads a uint32 -func (bigEndian) ReadUint32(b io.ByteReader) (uint32, error) { - var b1, b2, b3, b4 uint8 - var err error - if b4, err = b.ReadByte(); err != nil { - return 0, err - } - if b3, err = b.ReadByte(); err != nil { - return 0, err - } - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint32(b1) + uint32(b2)<<8 + uint32(b3)<<16 + uint32(b4)<<24, nil -} - -// ReadUint24 reads a uint24 -func (bigEndian) ReadUint24(b io.ByteReader) (uint32, error) { - var b1, b2, b3 uint8 - var err error - if b3, err = b.ReadByte(); err != nil { - return 0, err - } - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint32(b1) + uint32(b2)<<8 + uint32(b3)<<16, nil -} - -// ReadUint16 reads a uint16 -func (bigEndian) ReadUint16(b io.ByteReader) (uint16, error) { - var b1, b2 uint8 - var err error - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint16(b1) + uint16(b2)<<8, nil -} - -func (bigEndian) Uint32(b []byte) uint32 { - return binary.BigEndian.Uint32(b) -} - -func (bigEndian) Uint24(b []byte) uint32 { - _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 - return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 -} - -func (bigEndian) Uint16(b []byte) uint16 { - return binary.BigEndian.Uint16(b) -} - -// WriteUint32 writes a uint32 -func (bigEndian) WriteUint32(b *bytes.Buffer, i uint32) { - b.Write([]byte{uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i)}) -} - -// WriteUint24 writes a uint24 -func (bigEndian) WriteUint24(b *bytes.Buffer, i uint32) { - b.Write([]byte{uint8(i >> 16), uint8(i >> 8), uint8(i)}) -} - -// WriteUint16 writes a uint16 -func (bigEndian) WriteUint16(b *bytes.Buffer, i uint16) { - b.Write([]byte{uint8(i >> 8), uint8(i)}) -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/ip.go b/vendor/github.com/quic-go/quic-go/internal/utils/ip.go deleted file mode 100644 index 7ac7ffec11b..00000000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/ip.go +++ /dev/null @@ -1,10 +0,0 @@ -package utils - -import "net" - -func IsIPv4(ip net.IP) bool { - // If ip is not an IPv4 address, To4 returns nil. - // Note that there might be some corner cases, where this is not correct. - // See https://stackoverflow.com/questions/22751035/golang-distinguish-ipv4-ipv6. - return ip.To4() != nil -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go b/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go deleted file mode 100644 index 03a9c9a873c..00000000000 --- a/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go +++ /dev/null @@ -1,36 +0,0 @@ -package utils - -import ( - "math" - "time" -) - -// InfDuration is a duration of infinite length -const InfDuration = time.Duration(math.MaxInt64) - -// MinNonZeroDuration return the minimum duration that's not zero. -func MinNonZeroDuration(a, b time.Duration) time.Duration { - if a == 0 { - return b - } - if b == 0 { - return a - } - return min(a, b) -} - -// MinTime returns the earlier time -func MinTime(a, b time.Time) time.Time { - if a.After(b) { - return b - } - return a -} - -// MaxTime returns the later time -func MaxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go index 463b95424f5..24e1f64c9c6 100644 --- a/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go @@ -64,7 +64,7 @@ func (r *RTTStats) PTO(includeMaxAckDelay bool) time.Duration { // UpdateRTT updates the RTT based on a new sample. func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) { - if sendDelta == InfDuration || sendDelta <= 0 { + if sendDelta <= 0 { return } diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go index e0f2db3c71d..8befef4f2de 100644 --- a/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go @@ -2,11 +2,11 @@ package wire import ( "errors" + "math" "sort" "time" "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/utils" "github.com/quic-go/quic-go/quicvarint" ) @@ -40,7 +40,7 @@ func parseAckFrame(frame *AckFrame, b []byte, typ uint64, ackDelayExponent uint8 delayTime := time.Duration(delay*1< 0 { kp = protocol.KeyPhaseOne } - var err error if data[0]&0x18 != 0 { err = ErrInvalidReservedBits } diff --git a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go index a354bbd9466..96bf4617dd9 100644 --- a/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/connection_tracer.go @@ -8,14 +8,14 @@ import ( // A ConnectionTracer records events. type ConnectionTracer struct { StartedConnection func(local, remote net.Addr, srcConnID, destConnID ConnectionID) - NegotiatedVersion func(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) + NegotiatedVersion func(chosen Version, clientVersions, serverVersions []Version) ClosedConnection func(error) SentTransportParameters func(*TransportParameters) ReceivedTransportParameters func(*TransportParameters) RestoredTransportParameters func(parameters *TransportParameters) // for 0-RTT SentLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, *AckFrame, []Frame) SentShortHeaderPacket func(*ShortHeader, ByteCount, ECN, *AckFrame, []Frame) - ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, _ []VersionNumber) + ReceivedVersionNegotiationPacket func(dest, src ArbitraryLenConnectionID, _ []Version) ReceivedRetry func(*Header) ReceivedLongHeaderPacket func(*ExtendedHeader, ByteCount, ECN, []Frame) ReceivedShortHeaderPacket func(*ShortHeader, ByteCount, ECN, []Frame) @@ -57,7 +57,7 @@ func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTra } } }, - NegotiatedVersion: func(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) { + NegotiatedVersion: func(chosen Version, clientVersions, serverVersions []Version) { for _, t := range tracers { if t.NegotiatedVersion != nil { t.NegotiatedVersion(chosen, clientVersions, serverVersions) @@ -106,7 +106,7 @@ func NewMultiplexedConnectionTracer(tracers ...*ConnectionTracer) *ConnectionTra } } }, - ReceivedVersionNegotiationPacket: func(dest, src ArbitraryLenConnectionID, versions []VersionNumber) { + ReceivedVersionNegotiationPacket: func(dest, src ArbitraryLenConnectionID, versions []Version) { for _, t := range tracers { if t.ReceivedVersionNegotiationPacket != nil { t.ReceivedVersionNegotiationPacket(dest, src, versions) diff --git a/vendor/github.com/quic-go/quic-go/logging/interface.go b/vendor/github.com/quic-go/quic-go/logging/interface.go index a618a1893d3..254911bd784 100644 --- a/vendor/github.com/quic-go/quic-go/logging/interface.go +++ b/vendor/github.com/quic-go/quic-go/logging/interface.go @@ -37,7 +37,10 @@ type ( // The StreamType is the type of the stream (unidirectional or bidirectional). StreamType = protocol.StreamType // The VersionNumber is the QUIC version. + // Deprecated: use Version instead. VersionNumber = protocol.Version + // The Version is the QUIC version. + Version = protocol.Version // The Header is the QUIC packet header, before removing header protection. Header = wire.Header @@ -72,27 +75,27 @@ const ( const ( // KeyPhaseZero is key phase bit 0 - KeyPhaseZero KeyPhaseBit = protocol.KeyPhaseZero + KeyPhaseZero = protocol.KeyPhaseZero // KeyPhaseOne is key phase bit 1 - KeyPhaseOne KeyPhaseBit = protocol.KeyPhaseOne + KeyPhaseOne = protocol.KeyPhaseOne ) const ( // PerspectiveServer is used for a QUIC server - PerspectiveServer Perspective = protocol.PerspectiveServer + PerspectiveServer = protocol.PerspectiveServer // PerspectiveClient is used for a QUIC client - PerspectiveClient Perspective = protocol.PerspectiveClient + PerspectiveClient = protocol.PerspectiveClient ) const ( // EncryptionInitial is the Initial encryption level - EncryptionInitial EncryptionLevel = protocol.EncryptionInitial + EncryptionInitial = protocol.EncryptionInitial // EncryptionHandshake is the Handshake encryption level - EncryptionHandshake EncryptionLevel = protocol.EncryptionHandshake + EncryptionHandshake = protocol.EncryptionHandshake // Encryption1RTT is the 1-RTT encryption level - Encryption1RTT EncryptionLevel = protocol.Encryption1RTT + Encryption1RTT = protocol.Encryption1RTT // Encryption0RTT is the 0-RTT encryption level - Encryption0RTT EncryptionLevel = protocol.Encryption0RTT + Encryption0RTT = protocol.Encryption0RTT ) const ( diff --git a/vendor/github.com/quic-go/quic-go/logging/tracer.go b/vendor/github.com/quic-go/quic-go/logging/tracer.go index edd85dbaa0f..625a809eeac 100644 --- a/vendor/github.com/quic-go/quic-go/logging/tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/tracer.go @@ -5,7 +5,7 @@ import "net" // A Tracer traces events. type Tracer struct { SentPacket func(net.Addr, *Header, ByteCount, []Frame) - SentVersionNegotiationPacket func(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []VersionNumber) + SentVersionNegotiationPacket func(_ net.Addr, dest, src ArbitraryLenConnectionID, _ []Version) DroppedPacket func(net.Addr, PacketType, ByteCount, PacketDropReason) Debug func(name, msg string) Close func() @@ -27,7 +27,7 @@ func NewMultiplexedTracer(tracers ...*Tracer) *Tracer { } } }, - SentVersionNegotiationPacket: func(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []VersionNumber) { + SentVersionNegotiationPacket: func(remote net.Addr, dest, src ArbitraryLenConnectionID, versions []Version) { for _, t := range tracers { if t.SentVersionNegotiationPacket != nil { t.SentVersionNegotiationPacket(remote, dest, src, versions) diff --git a/vendor/github.com/quic-go/quic-go/metrics/connection_tracer.go b/vendor/github.com/quic-go/quic-go/metrics/connection_tracer.go new file mode 100644 index 00000000000..431a94df15f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/metrics/connection_tracer.go @@ -0,0 +1,192 @@ +package metrics + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/logging" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + connStarted = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connections_started_total", + Help: "Connections Started", + }, + []string{"dir"}, + ) + connClosed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connections_closed_total", + Help: "Connections Closed", + }, + []string{"dir", "reason"}, + ) + connDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "connection_duration_seconds", + Help: "Duration of a Connection", + Buckets: prometheus.ExponentialBuckets(1.0/16, 2, 25), // up to 24 days + }, + []string{"dir"}, + ) + connHandshakeDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "handshake_duration_seconds", + Help: "Duration of the QUIC Handshake", + Buckets: prometheus.ExponentialBuckets(0.001, 1.3, 35), + }, + []string{"dir"}, + ) +) + +// DefaultConnectionTracer returns a callback that creates a metrics ConnectionTracer. +// The ConnectionTracer returned can be set on the quic.Config for a new connection. +func DefaultConnectionTracer(_ context.Context, p logging.Perspective, _ logging.ConnectionID) *logging.ConnectionTracer { + switch p { + case logging.PerspectiveClient: + return NewClientConnectionTracerWithRegisterer(prometheus.DefaultRegisterer) + case logging.PerspectiveServer: + return NewServerConnectionTracerWithRegisterer(prometheus.DefaultRegisterer) + default: + panic("invalid perspective") + } +} + +// NewClientConnectionTracerWithRegisterer creates a new connection tracer for a connection +// dialed on the client side with a given Prometheus registerer. +func NewClientConnectionTracerWithRegisterer(registerer prometheus.Registerer) *logging.ConnectionTracer { + return newConnectionTracerWithRegisterer(registerer, true) +} + +// NewServerConnectionTracerWithRegisterer creates a new connection tracer for a connection +// accepted on the server side with a given Prometheus registerer. +func NewServerConnectionTracerWithRegisterer(registerer prometheus.Registerer) *logging.ConnectionTracer { + return newConnectionTracerWithRegisterer(registerer, false) +} + +func newConnectionTracerWithRegisterer(registerer prometheus.Registerer, isClient bool) *logging.ConnectionTracer { + for _, c := range [...]prometheus.Collector{ + connStarted, + connHandshakeDuration, + connClosed, + connDuration, + } { + if err := registerer.Register(c); err != nil { + if ok := errors.As(err, &prometheus.AlreadyRegisteredError{}); !ok { + panic(err) + } + } + } + + direction := "incoming" + if isClient { + direction = "outgoing" + } + + var ( + startTime time.Time + handshakeComplete bool + ) + return &logging.ConnectionTracer{ + StartedConnection: func(_, _ net.Addr, _, _ logging.ConnectionID) { + tags := getStringSlice() + defer putStringSlice(tags) + + startTime = time.Now() + + *tags = append(*tags, direction) + connStarted.WithLabelValues(*tags...).Inc() + }, + ClosedConnection: func(e error) { + tags := getStringSlice() + defer putStringSlice(tags) + + *tags = append(*tags, direction) + // call connDuration.Observe before adding any more labels + if handshakeComplete { + connDuration.WithLabelValues(*tags...).Observe(time.Since(startTime).Seconds()) + } + + var ( + statelessResetErr *quic.StatelessResetError + handshakeTimeoutErr *quic.HandshakeTimeoutError + idleTimeoutErr *quic.IdleTimeoutError + applicationErr *quic.ApplicationError + transportErr *quic.TransportError + versionNegotiationErr *quic.VersionNegotiationError + ) + var reason string + switch { + case errors.As(e, &statelessResetErr): + reason = "stateless_reset" + case errors.As(e, &handshakeTimeoutErr): + reason = "handshake_timeout" + case errors.As(e, &idleTimeoutErr): + if handshakeComplete { + reason = "idle_timeout" + } else { + reason = "handshake_timeout" + } + case errors.As(e, &applicationErr): + if applicationErr.Remote { + reason = "application_error (remote)" + } else { + reason = "application_error (local)" + } + case errors.As(e, &transportErr): + switch { + case transportErr.ErrorCode == qerr.ApplicationErrorErrorCode: + if transportErr.Remote { + reason = "application_error (remote)" + } else { + reason = "application_error (local)" + } + case transportErr.ErrorCode.IsCryptoError(): + if transportErr.Remote { + reason = "crypto_error (remote)" + } else { + reason = "crypto_error (local)" + } + default: + if transportErr.Remote { + reason = "transport_error (remote)" + } else { + reason = fmt.Sprintf("transport_error (local): %s", transportErr.ErrorCode) + } + } + case errors.As(e, &versionNegotiationErr): + reason = "version_mismatch" + default: + reason = "unknown" + } + *tags = append(*tags, reason) + connClosed.WithLabelValues(*tags...).Inc() + }, + UpdatedKeyFromTLS: func(l logging.EncryptionLevel, p logging.Perspective) { + // The client derives both 1-RTT keys when the handshake completes. + // The server derives the 1-RTT read key when the handshake completes. + if l != logging.Encryption1RTT || p != logging.PerspectiveClient { + return + } + handshakeComplete = true + + tags := getStringSlice() + defer putStringSlice(tags) + + *tags = append(*tags, direction) + connHandshakeDuration.WithLabelValues(*tags...).Observe(time.Since(startTime).Seconds()) + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/metrics/pool.go b/vendor/github.com/quic-go/quic-go/metrics/pool.go new file mode 100644 index 00000000000..75868beaeca --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/metrics/pool.go @@ -0,0 +1,27 @@ +package metrics + +import ( + "fmt" + "sync" +) + +const capacity = 4 + +// The stringPool is used to avoid allocations when passing labels to Prometheus. +var stringPool = sync.Pool{New: func() any { + s := make([]string, 0, capacity) + return &s +}} + +func getStringSlice() *[]string { + s := stringPool.Get().(*[]string) + *s = (*s)[:0] + return s +} + +func putStringSlice(s *[]string) { + if c := cap(*s); c < capacity { + panic(fmt.Sprintf("unexpected slice cap: %d", c)) + } + stringPool.Put(s) +} diff --git a/vendor/github.com/quic-go/quic-go/metrics/tracer.go b/vendor/github.com/quic-go/quic-go/metrics/tracer.go new file mode 100644 index 00000000000..293291dbde1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/metrics/tracer.go @@ -0,0 +1,147 @@ +package metrics + +import ( + "errors" + "fmt" + "net" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/logging" + + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "quicgo" + +func getIPVersion(addr net.Addr) string { + udpAddr, ok := addr.(*net.UDPAddr) + if !ok { + return "" + } + if udpAddr.IP.To4() != nil { + return "ipv4" + } + return "ipv6" +} + +var ( + connsRejected = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "server_connections_rejected_total", + Help: "Connections Rejected", + }, + []string{"ip_version", "reason"}, + ) + packetDropped = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "server_received_packets_dropped_total", + Help: "packets dropped", + }, + []string{"ip_version", "reason"}, + ) +) + +// NewTracer creates a new tracer using the default Prometheus registerer. +// The Tracer returned from this function can be used to collect metrics for +// events happening before the establishment of a QUIC connection. +// It can be set on the Tracer field of quic.Transport. +func NewTracer() *logging.Tracer { + return NewTracerWithRegisterer(prometheus.DefaultRegisterer) +} + +// NewTracerWithRegisterer creates a new tracer using a given Prometheus registerer. +func NewTracerWithRegisterer(registerer prometheus.Registerer) *logging.Tracer { + for _, c := range [...]prometheus.Collector{ + connsRejected, + packetDropped, + } { + if err := registerer.Register(c); err != nil { + if ok := errors.As(err, &prometheus.AlreadyRegisteredError{}); !ok { + panic(err) + } + } + } + + return &logging.Tracer{ + SentPacket: func(addr net.Addr, hdr *logging.Header, _ logging.ByteCount, frames []logging.Frame) { + tags := getStringSlice() + defer putStringSlice(tags) + + var reason string + switch { + case hdr.Type == protocol.PacketTypeRetry: + reason = "retry" + case hdr.Type == protocol.PacketTypeInitial: + var ccf *logging.ConnectionCloseFrame + for _, f := range frames { + cc, ok := f.(*logging.ConnectionCloseFrame) + if ok { + ccf = cc + break + } + } + // This should never happen. We only send Initials before creating the connection in order to + // reject a connection attempt. + if ccf == nil { + return + } + if ccf.IsApplicationError { + //nolint:exhaustive // Only a few error codes applicable. + switch qerr.TransportErrorCode(ccf.ErrorCode) { + case qerr.ConnectionRefused: + reason = "connection_refused" + case qerr.InvalidToken: + reason = "invalid_token" + default: + // This shouldn't happen, the server doesn't send CONNECTION_CLOSE frames with different errors. + reason = fmt.Sprintf("transport_error: %d", ccf.ErrorCode) + } + } else { + // This shouldn't happen, the server doesn't send application-level CONNECTION_CLOSE frames. + reason = "application_error" + } + } + *tags = append(*tags, getIPVersion(addr)) + *tags = append(*tags, reason) + connsRejected.WithLabelValues(*tags...).Inc() + }, + SentVersionNegotiationPacket: func(addr net.Addr, _, _ logging.ArbitraryLenConnectionID, _ []logging.Version) { + tags := getStringSlice() + defer putStringSlice(tags) + + *tags = append(*tags, getIPVersion(addr)) + *tags = append(*tags, "version_negotiation") + connsRejected.WithLabelValues(*tags...).Inc() + }, + DroppedPacket: func(addr net.Addr, pt logging.PacketType, _ logging.ByteCount, reason logging.PacketDropReason) { + tags := getStringSlice() + defer putStringSlice(tags) + + var dropReason string + //nolint:exhaustive // Only a few drop reasons applicable. + switch reason { + case logging.PacketDropDOSPrevention: + if pt == logging.PacketType0RTT { + dropReason = "0rtt_dos_prevention" + } else { + dropReason = "dos_prevention" + } + case logging.PacketDropHeaderParseError: + dropReason = "header_parsing" + case logging.PacketDropPayloadDecryptError: + dropReason = "payload_decrypt" + case logging.PacketDropUnexpectedPacket: + dropReason = "unexpected_packet" + default: + dropReason = "unknown" + } + + *tags = append(*tags, getIPVersion(addr)) + *tags = append(*tags, dropReason) + packetDropped.WithLabelValues(*tags...).Inc() + }, + } +} diff --git a/vendor/github.com/quic-go/quic-go/mockgen.go b/vendor/github.com/quic-go/quic-go/mockgen.go index 81cc4a5ef57..65ec465aa33 100644 --- a/vendor/github.com/quic-go/quic-go/mockgen.go +++ b/vendor/github.com/quic-go/quic-go/mockgen.go @@ -14,23 +14,17 @@ type Sender = sender //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_internal_test.go github.com/quic-go/quic-go StreamI" type StreamI = streamI -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_crypto_stream_test.go github.com/quic-go/quic-go CryptoStream" -type CryptoStream = cryptoStream - //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_receive_stream_internal_test.go github.com/quic-go/quic-go ReceiveStreamI" type ReceiveStreamI = receiveStreamI //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_send_stream_internal_test.go github.com/quic-go/quic-go SendStreamI" type SendStreamI = sendStreamI -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_getter_test.go github.com/quic-go/quic-go StreamGetter" -type StreamGetter = streamGetter - //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_sender_test.go github.com/quic-go/quic-go StreamSender" type StreamSender = streamSender -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_crypto_data_handler_test.go github.com/quic-go/quic-go CryptoDataHandler" -type CryptoDataHandler = cryptoDataHandler +//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_stream_control_frame_getter_test.go github.com/quic-go/quic-go StreamControlFrameGetter" +type StreamControlFrameGetter = streamControlFrameGetter //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -build_flags=\"-tags=gomock\" -package quic -self_package github.com/quic-go/quic-go -destination mock_frame_source_test.go github.com/quic-go/quic-go FrameSource" type FrameSource = frameSource @@ -72,5 +66,4 @@ type PacketHandlerManager = packetHandlerManager // //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -source sys_conn_oob.go -destination mock_batch_conn_test.go -mock_names batchConn=MockBatchConn" -//go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -self_package github.com/quic-go/quic-go -destination mock_token_store_test.go github.com/quic-go/quic-go TokenStore" //go:generate sh -c "go run go.uber.org/mock/mockgen -typed -package quic -self_package github.com/quic-go/quic-go -self_package github.com/quic-go/quic-go -destination mock_packetconn_test.go net PacketConn" diff --git a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go index fb7c09a925d..3f3a640ae2f 100644 --- a/vendor/github.com/quic-go/quic-go/mtu_discoverer.go +++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go @@ -25,16 +25,80 @@ const ( maxMTUDiff = 20 // send a probe packet every mtuProbeDelay RTTs mtuProbeDelay = 5 + // Once maxLostMTUProbes MTU probe packets larger than a certain size are lost, + // MTU discovery won't probe for larger MTUs than this size. + // The algorithm used here is resilient to packet loss of (maxLostMTUProbes - 1) packets. + maxLostMTUProbes = 3 ) +// The Path MTU is found by sending a larger packet every now and then. +// If the packet is acknowledged, we conclude that the path supports this larger packet size. +// If the packet is lost, this can mean one of two things: +// 1. The path doesn't support this larger packet size, or +// 2. The packet was lost due to packet loss, independent of its size. +// The algorithm used here is resilient to packet loss of (maxLostMTUProbes - 1) packets. +// For simplicty, the following example use maxLostMTUProbes = 2. +// +// Initialization: +// |------------------------------------------------------------------------------| +// min max +// +// The first MTU probe packet will have size (min+max)/2. +// Assume that this packet is acknowledged. We can now move the min marker, +// and continue the search in the resulting interval. +// +// If 1st probe packet acknowledged: +// |---------------------------------------|--------------------------------------| +// min max +// +// If 1st probe packet lost: +// |---------------------------------------|--------------------------------------| +// min lost[0] max +// +// We can't conclude that the path doesn't support this packet size, since the loss of the probe +// packet could have been unrelated to the packet size. A larger probe packet will be sent later on. +// After a loss, the next probe packet has size (min+lost[0])/2. +// Now assume this probe packet is acknowledged: +// +// 2nd probe packet acknowledged: +// |------------------|--------------------|--------------------------------------| +// min lost[0] max +// +// First of all, we conclude that the path supports at least this MTU. That's progress! +// Second, we probe a bit more aggressively with the next probe packet: +// After an acknowledgement, the next probe packet has size (min+max)/2. +// This means we'll send a packet larger than the first probe packet (which was lost). +// +// If 3rd probe packet acknowledged: +// |-------------------------------------------------|----------------------------| +// min max +// +// We can conclude that the loss of the 1st probe packet was not due to its size, and +// continue searching in a much smaller interval now. +// +// If 3rd probe packet lost: +// |------------------|--------------------|---------|----------------------------| +// min lost[0] max +// +// Since in our example numPTOProbes = 2, and we lost 2 packets smaller than max, we +// conclude that this packet size is not supported on the path, and reduce the maximum +// value of the search interval. +// +// MTU discovery concludes once the interval min and max has been narrowed down to maxMTUDiff. + type mtuFinder struct { lastProbeTime time.Time mtuIncreased func(protocol.ByteCount) rttStats *utils.RTTStats + inFlight protocol.ByteCount // the size of the probe packet currently in flight. InvalidByteCount if none is in flight - current protocol.ByteCount - max protocol.ByteCount // the maximum value, as advertised by the peer (or our maximum size buffer) + min protocol.ByteCount + limit protocol.ByteCount + + // on initialization, we treat the maximum size as the first "lost" packet + lost [maxLostMTUProbes]protocol.ByteCount + lastProbeWasLost bool tracer *logging.ConnectionTracer } @@ -47,33 +111,43 @@ func newMTUDiscoverer( mtuIncreased func(protocol.ByteCount), tracer *logging.ConnectionTracer, ) *mtuFinder { - return &mtuFinder{ + f := &mtuFinder{ inFlight: protocol.InvalidByteCount, - current: start, - max: max, + min: start, + limit: max, rttStats: rttStats, mtuIncreased: mtuIncreased, tracer: tracer, } + for i := range f.lost { + if i == 0 { + f.lost[i] = max + continue + } + f.lost[i] = protocol.InvalidByteCount + } + return f } func (f *mtuFinder) done() bool { - return f.max-f.current <= maxMTUDiff+1 + return f.max()-f.min <= maxMTUDiff+1 } -func (f *mtuFinder) SetMax(max protocol.ByteCount) { - f.max = max +func (f *mtuFinder) max() protocol.ByteCount { + for i, v := range f.lost { + if v == protocol.InvalidByteCount { + return f.lost[i-1] + } + } + return f.lost[len(f.lost)-1] } func (f *mtuFinder) Start() { - if f.max == protocol.InvalidByteCount { - panic("invalid") - } f.lastProbeTime = time.Now() // makes sure the first probe packet is not sent immediately } func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { - if f.max == 0 || f.lastProbeTime.IsZero() { + if f.lastProbeTime.IsZero() { return false } if f.inFlight != protocol.InvalidByteCount || f.done() { @@ -83,7 +157,12 @@ func (f *mtuFinder) ShouldSendProbe(now time.Time) bool { } func (f *mtuFinder) GetPing() (ackhandler.Frame, protocol.ByteCount) { - size := (f.max + f.current) / 2 + var size protocol.ByteCount + if f.lastProbeWasLost { + size = (f.min + f.lost[0]) / 2 + } else { + size = (f.min + f.max()) / 2 + } f.lastProbeTime = time.Now() f.inFlight = size return ackhandler.Frame{ @@ -93,7 +172,7 @@ func (f *mtuFinder) GetPing() (ackhandler.Frame, protocol.ByteCount) { } func (f *mtuFinder) CurrentSize() protocol.ByteCount { - return f.current + return f.min } type mtuFinderAckHandler struct { @@ -108,7 +187,25 @@ func (h *mtuFinderAckHandler) OnAcked(wire.Frame) { panic("OnAcked callback called although there's no MTU probe packet in flight") } h.inFlight = protocol.InvalidByteCount - h.current = size + h.min = size + h.lastProbeWasLost = false + // remove all values smaller than size from the lost array + var j int + for i, v := range h.lost { + if size < v { + j = i + break + } + } + if j > 0 { + for i := 0; i < len(h.lost); i++ { + if i+j < len(h.lost) { + h.lost[i] = h.lost[i+j] + } else { + h.lost[i] = protocol.InvalidByteCount + } + } + } if h.tracer != nil && h.tracer.UpdatedMTU != nil { h.tracer.UpdatedMTU(size, h.done()) } @@ -120,6 +217,13 @@ func (h *mtuFinderAckHandler) OnLost(wire.Frame) { if size == protocol.InvalidByteCount { panic("OnLost callback called although there's no MTU probe packet in flight") } - h.max = size + h.lastProbeWasLost = true h.inFlight = protocol.InvalidByteCount + for i, v := range h.lost { + if size < v { + copy(h.lost[i+1:], h.lost[i:]) + h.lost[i] = size + break + } + } } diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go index e707734fb46..8b8a03d44fd 100644 --- a/vendor/github.com/quic-go/quic-go/packet_packer.go +++ b/vendor/github.com/quic-go/quic-go/packet_packer.go @@ -121,8 +121,8 @@ type packetPacker struct { perspective protocol.Perspective cryptoSetup sealingManager - initialStream cryptoStream - handshakeStream cryptoStream + initialStream *cryptoStream + handshakeStream *cryptoStream token []byte @@ -141,7 +141,7 @@ var _ packer = &packetPacker{} func newPacketPacker( srcConnID protocol.ConnectionID, getDestConnID func() protocol.ConnectionID, - initialStream, handshakeStream cryptoStream, + initialStream, handshakeStream *cryptoStream, packetNumberManager packetNumberManager, retransmissionQueue *retransmissionQueue, cryptoSetup sealingManager, @@ -482,7 +482,7 @@ func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, en return nil, payload{} } - var s cryptoStream + var s *cryptoStream var handler ackhandler.FrameHandler var hasRetransmission bool //nolint:exhaustive // Initial and Handshake are the only two encryption levels here. @@ -645,6 +645,9 @@ func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAc pl.length += lengthAdded // add handlers for the control frames that were added for i := startLen; i < len(pl.frames); i++ { + if pl.frames[i].Handler != nil { + continue + } switch pl.frames[i].Frame.(type) { case *wire.PathChallengeFrame, *wire.PathResponseFrame: // Path probing is currently not supported, therefore we don't need to set the OnAcked callback yet. diff --git a/vendor/github.com/quic-go/quic-go/packet_unpacker.go b/vendor/github.com/quic-go/quic-go/packet_unpacker.go index 1034aab1dbb..9e0fa9d90e7 100644 --- a/vendor/github.com/quic-go/quic-go/packet_unpacker.go +++ b/vendor/github.com/quic-go/quic-go/packet_unpacker.go @@ -1,7 +1,6 @@ package quic import ( - "bytes" "fmt" "time" @@ -53,7 +52,7 @@ func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetU // If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits. // If any other error occurred when parsing the header, the error is of type headerParseError. // If decrypting the payload fails for any reason, the error is the error returned by the AEAD. -func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.Version) (*unpackedPacket, error) { +func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, data []byte) (*unpackedPacket, error) { var encLevel protocol.EncryptionLevel var extHdr *wire.ExtendedHeader var decrypted []byte @@ -65,7 +64,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) if err != nil { return nil, err } @@ -75,7 +74,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) if err != nil { return nil, err } @@ -85,7 +84,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) if err != nil { return nil, err } @@ -125,8 +124,8 @@ func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (prot return pn, pnLen, kp, decrypted, nil } -func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte, v protocol.Version) (*wire.ExtendedHeader, []byte, error) { - extHdr, parseErr := u.unpackLongHeader(opener, hdr, data, v) +func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) { + extHdr, parseErr := u.unpackLongHeader(opener, hdr, data) // If the reserved bits are set incorrectly, we still need to continue unpacking. // This avoids a timing side-channel, which otherwise might allow an attacker // to gain information about the header encryption. @@ -187,17 +186,15 @@ func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int } // The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError. -func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.Version) (*wire.ExtendedHeader, error) { - extHdr, err := unpackLongHeader(hd, hdr, data, v) +func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { + extHdr, err := unpackLongHeader(hd, hdr, data) if err != nil && err != wire.ErrInvalidReservedBits { return nil, &headerParseError{err: err} } return extHdr, err } -func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.Version) (*wire.ExtendedHeader, error) { - r := bytes.NewReader(data) - +func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { hdrLen := hdr.ParsedLen() if protocol.ByteCount(len(data)) < hdrLen+4+16 { //nolint:stylecheck @@ -214,7 +211,7 @@ func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v proto data[hdrLen:hdrLen+4], ) // 3. parse the header (and learn the actual length of the packet number) - extHdr, parseErr := hdr.ParseExtended(r, v) + extHdr, parseErr := hdr.ParseExtended(data) if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { return nil, parseErr } diff --git a/vendor/github.com/quic-go/quic-go/qlog/connection_tracer.go b/vendor/github.com/quic-go/quic-go/qlog/connection_tracer.go index 3ea0ce11ca8..b696c2c7899 100644 --- a/vendor/github.com/quic-go/quic-go/qlog/connection_tracer.go +++ b/vendor/github.com/quic-go/quic-go/qlog/connection_tracer.go @@ -39,7 +39,7 @@ func NewConnectionTracer(w io.WriteCloser, p logging.Perspective, odcid protocol StartedConnection: func(local, remote net.Addr, srcConnID, destConnID logging.ConnectionID) { t.StartedConnection(local, remote, srcConnID, destConnID) }, - NegotiatedVersion: func(chosen logging.VersionNumber, clientVersions, serverVersions []logging.VersionNumber) { + NegotiatedVersion: func(chosen logging.Version, clientVersions, serverVersions []logging.Version) { t.NegotiatedVersion(chosen, clientVersions, serverVersions) }, ClosedConnection: func(e error) { t.ClosedConnection(e) }, @@ -61,7 +61,7 @@ func NewConnectionTracer(w io.WriteCloser, p logging.Perspective, odcid protocol ReceivedRetry: func(hdr *wire.Header) { t.ReceivedRetry(hdr) }, - ReceivedVersionNegotiationPacket: func(dest, src logging.ArbitraryLenConnectionID, versions []logging.VersionNumber) { + ReceivedVersionNegotiationPacket: func(dest, src logging.ArbitraryLenConnectionID, versions []logging.Version) { t.ReceivedVersionNegotiationPacket(dest, src, versions) }, BufferedPacket: func(pt logging.PacketType, size protocol.ByteCount) { @@ -147,24 +147,24 @@ func (t *connectionTracer) StartedConnection(local, remote net.Addr, srcConnID, }) } -func (t *connectionTracer) NegotiatedVersion(chosen logging.VersionNumber, client, server []logging.VersionNumber) { - var clientVersions, serverVersions []versionNumber +func (t *connectionTracer) NegotiatedVersion(chosen logging.Version, client, server []logging.Version) { + var clientVersions, serverVersions []version if len(client) > 0 { - clientVersions = make([]versionNumber, len(client)) + clientVersions = make([]version, len(client)) for i, v := range client { - clientVersions[i] = versionNumber(v) + clientVersions[i] = version(v) } } if len(server) > 0 { - serverVersions = make([]versionNumber, len(server)) + serverVersions = make([]version, len(server)) for i, v := range server { - serverVersions[i] = versionNumber(v) + serverVersions[i] = version(v) } } t.recordEvent(time.Now(), &eventVersionNegotiated{ clientVersions: clientVersions, serverVersions: serverVersions, - chosenVersion: versionNumber(chosen), + chosenVersion: version(chosen), }) } @@ -313,10 +313,10 @@ func (t *connectionTracer) ReceivedRetry(hdr *wire.Header) { }) } -func (t *connectionTracer) ReceivedVersionNegotiationPacket(dest, src logging.ArbitraryLenConnectionID, versions []logging.VersionNumber) { - ver := make([]versionNumber, len(versions)) +func (t *connectionTracer) ReceivedVersionNegotiationPacket(dest, src logging.ArbitraryLenConnectionID, versions []logging.Version) { + ver := make([]version, len(versions)) for i, v := range versions { - ver[i] = versionNumber(v) + ver[i] = version(v) } t.recordEvent(time.Now(), &eventVersionNegotiationReceived{ Header: packetHeaderVersionNegotiation{ diff --git a/vendor/github.com/quic-go/quic-go/qlog/event.go b/vendor/github.com/quic-go/quic-go/qlog/event.go index 7602b55cec4..54134699ddc 100644 --- a/vendor/github.com/quic-go/quic-go/qlog/event.go +++ b/vendor/github.com/quic-go/quic-go/qlog/event.go @@ -9,7 +9,6 @@ import ( "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/internal/protocol" - "github.com/quic-go/quic-go/internal/utils" "github.com/quic-go/quic-go/logging" "github.com/francoispqt/gojay" @@ -37,7 +36,7 @@ func (e event) MarshalJSONObject(enc *gojay.Encoder) { enc.ObjectKey("data", e.eventDetails) } -type versions []versionNumber +type versions []version func (v versions) IsNil() bool { return false } func (v versions) MarshalJSONArray(enc *gojay.Encoder) { @@ -72,7 +71,7 @@ func (e eventConnectionStarted) Name() string { return "connection_started func (e eventConnectionStarted) IsNil() bool { return false } func (e eventConnectionStarted) MarshalJSONObject(enc *gojay.Encoder) { - if utils.IsIPv4(e.SrcAddr.IP) { + if e.SrcAddr.IP.To4() != nil { enc.StringKey("ip_version", "ipv4") } else { enc.StringKey("ip_version", "ipv6") @@ -86,8 +85,8 @@ func (e eventConnectionStarted) MarshalJSONObject(enc *gojay.Encoder) { } type eventVersionNegotiated struct { - clientVersions, serverVersions []versionNumber - chosenVersion versionNumber + clientVersions, serverVersions []version + chosenVersion version } func (e eventVersionNegotiated) Category() category { return categoryTransport } @@ -221,7 +220,7 @@ func (e eventRetryReceived) MarshalJSONObject(enc *gojay.Encoder) { type eventVersionNegotiationReceived struct { Header packetHeaderVersionNegotiation - SupportedVersions []versionNumber + SupportedVersions []version } func (e eventVersionNegotiationReceived) Category() category { return categoryTransport } @@ -235,7 +234,7 @@ func (e eventVersionNegotiationReceived) MarshalJSONObject(enc *gojay.Encoder) { type eventVersionNegotiationSent struct { Header packetHeaderVersionNegotiation - SupportedVersions []versionNumber + SupportedVersions []version } func (e eventVersionNegotiationSent) Category() category { return categoryTransport } diff --git a/vendor/github.com/quic-go/quic-go/qlog/packet_header.go b/vendor/github.com/quic-go/quic-go/qlog/packet_header.go index 53c9f2b0da7..d2c57ba605a 100644 --- a/vendor/github.com/quic-go/quic-go/qlog/packet_header.go +++ b/vendor/github.com/quic-go/quic-go/qlog/packet_header.go @@ -43,7 +43,7 @@ type packetHeader struct { KeyPhaseBit logging.KeyPhaseBit PacketNumber logging.PacketNumber - Version logging.VersionNumber + Version logging.Version SrcConnectionID logging.ConnectionID DestConnectionID logging.ConnectionID @@ -76,7 +76,7 @@ func (h packetHeader) MarshalJSONObject(enc *gojay.Encoder) { enc.Int64Key("packet_number", int64(h.PacketNumber)) } if h.Version != 0 { - enc.StringKey("version", versionNumber(h.Version).String()) + enc.StringKey("version", version(h.Version).String()) } if h.PacketType != logging.PacketType1RTT { enc.IntKey("scil", h.SrcConnectionID.Len()) diff --git a/vendor/github.com/quic-go/quic-go/qlog/qlog_dir.go b/vendor/github.com/quic-go/quic-go/qlog/qlog_dir.go index e7acef4f035..1c7e97fa628 100644 --- a/vendor/github.com/quic-go/quic-go/qlog/qlog_dir.go +++ b/vendor/github.com/quic-go/quic-go/qlog/qlog_dir.go @@ -13,9 +13,15 @@ import ( ) // DefaultTracer creates a qlog file in the qlog directory specified by the QLOGDIR environment variable. -// File names are _.qlog. +// Deprecated: use DefaultConnectionTracer instead. +func DefaultTracer(ctx context.Context, p logging.Perspective, connID logging.ConnectionID) *logging.ConnectionTracer { + return DefaultConnectionTracer(ctx, p, connID) +} + +// DefaultConnectionTracer creates a qlog file in the qlog directory specified by the QLOGDIR environment variable. +// File names are _.sqlog. // Returns nil if QLOGDIR is not set. -func DefaultTracer(_ context.Context, p logging.Perspective, connID logging.ConnectionID) *logging.ConnectionTracer { +func DefaultConnectionTracer(_ context.Context, p logging.Perspective, connID logging.ConnectionID) *logging.ConnectionTracer { var label string switch p { case logging.PerspectiveClient: @@ -27,7 +33,7 @@ func DefaultTracer(_ context.Context, p logging.Perspective, connID logging.Conn } // qlogDirTracer creates a qlog file in the qlog directory specified by the QLOGDIR environment variable. -// File names are _